blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
aa98278bf982510809e97f209972b9d3ffecdc03
|
4b191334ac835f99236d51ab6a7857f968250df2
|
/utils/add_ipv6.py
|
bb66dc7c153fcc6c478e5b53b9b056124f043392
|
[] |
no_license
|
soutzis/Janus-IPv6
|
bfdd1e89260a9d5faf9796e9da836d96fbfc607b
|
a1079a1f3283bc193597b40f90e998a149ae2781
|
refs/heads/master
| 2021-07-12T17:55:05.936232
| 2020-06-24T14:30:53
| 2020-06-24T14:30:53
| 168,965,023
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,493
|
py
|
#!/usr/bin/python3.6
from subprocess import call
import argparse
'''
For simplicity, this file is also copied in "usr/local/bin", so that it can be run from any directory
by simply calling: add_ipv6 <hostname> <ipv6>.
The file in "usr/local/bin" is renamed to 'add_ipv6' (no ".py" file extension)
'''
add_ipv6_cmd = "ifconfig h{hostname}-eth0 inet6 add 2000::{ip}/64"
add_ipv6_dist_global = "ifconfig h{hostname}-eth0 inet6 add 200{ip}::{ip}/64"
add_ipv6_custom_cmd = "ifconfig h{hostname}-eth0 inet6 add {ip}/64"
parser = argparse.ArgumentParser(description="Add an IPv6 GUA, to the eth0 interface")
parser.add_argument("hostname",
help="Add the number of the host. e.g: if host is 'h4', enter: 4",
type=int)
mutex = parser.add_mutually_exclusive_group()
# mutex.add_argument("-d","--distinct",
# help="add a different IPv6 GUA for this node",
# action=store_true)
mutex.add_argument("-c", "--custom", help="Add a custom IPv6 GUA.", type=str)
args = parser.parse_args()
# if args.distinct:
# command = add_ipv6_dist_global.format(hostname=args.hostname,
# ip=args.hostname)
if args.custom:
command = add_ipv6_custom_cmd.format(hostname=args.hostname, ip=args.custom)
else:
command = add_ipv6_cmd.format(hostname=args.hostname, ip=args.hostname)
print("Executing command: "+command)
call(command.split(" "))
print("IPv6 address added successfully.")
|
[
"noreply@github.com"
] |
noreply@github.com
|
562d159153258105237dee275a61136e7c194853
|
e6dab5aa1754ff13755a1f74a28a201681ab7e1c
|
/.parts/lib/django-1.3/django/contrib/localflavor/generic/forms.py
|
b8a111a6b5f57fa81698f292b86258925d561b4a
|
[] |
no_license
|
ronkagan/Euler_1
|
67679203a9510147320f7c6513eefd391630703e
|
022633cc298475c4f3fd0c6e2bde4f4728713995
|
refs/heads/master
| 2021-01-06T20:45:52.901025
| 2014-09-06T22:34:16
| 2014-09-06T22:34:16
| 23,744,842
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 109
|
py
|
/home/action/.parts/packages/googleappengine/1.9.4/lib/django-1.3/django/contrib/localflavor/generic/forms.py
|
[
"ron.y.kagan@gmail.com"
] |
ron.y.kagan@gmail.com
|
f2e46158afebdf251132475b4a4284e808cdffbb
|
7205218c520b405f01d5fa1ae5728c9f3071f04d
|
/Exercises - Module III/EX108 - Formatando moeda().py
|
49c953dc8e09077c0b5a0a9b8b4069bbd79bf6f3
|
[] |
no_license
|
talesritz/Learning-Python---Guanabara-classes
|
164b22ca27158b41e851152257750ac5fcd0cecc
|
273a06037e3b283a4e78a3f105c0828ae70bfab0
|
refs/heads/master
| 2020-05-19T23:43:12.007950
| 2019-05-06T21:44:44
| 2019-05-06T21:44:44
| 185,273,748
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 634
|
py
|
# Adapte o código do desafio 107, criando uma função adicional chamada moeda() que consiga mostrar os valores como um valor monetário formatado.
from uteis import formata, moedab, validacao
#Versão necessária para o ex108
formata.cabecalho('EX108 - Formatando Moeda()')
tmp = validacao.leiaInt('Digite o preço: ')
print(f'A metade de {moedab.moeda(tmp)} é {moedab.moeda(moedab.metade(tmp))}')
print(f'O dobro de {moedab.moeda(tmp)} é {moedab.moeda(moedab.dobro(tmp))}')
print(f'Aumentando 10%, temos {moedab.moeda(moedab.aumentar(tmp))}')
print(f'Diminuindo 15%, temos {moedab.moeda(moedab.diminuir(tmp))}')
|
[
"noreply@github.com"
] |
noreply@github.com
|
d3238509ecaea8d3e0a51a8943890b4578e5a8da
|
e3d447a81c5462d2d14201f2bc6b82cdcbbca51a
|
/chapter10/c10_6_addition.py
|
af50d5e3378247cb7a726c51df05b727370cecc4
|
[] |
no_license
|
barcern/python-crash-course
|
f6026f13f75ecddc7806711d65bc53cb88e24496
|
8b55775c9f0ed49444becb35b8d529620537fa54
|
refs/heads/master
| 2023-04-19T17:28:44.342022
| 2021-02-07T23:51:06
| 2021-02-07T23:51:06
| 257,201,280
| 2
| 3
| null | 2021-05-12T17:35:56
| 2020-04-20T07:14:28
|
Python
|
UTF-8
|
Python
| false
| false
| 2,214
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 26 12:27:25 2020
@author: barbora
One common problem when prompting for numerical input occurs when people
provide text instead of numbers. When you try to convert the input to an int,
you'll get a ValueError. Write a program that prompts for two numbers.
Add them together and print the result. Catch the ValueError if either input
value is not a number, and print a friendly error message. Test your program
by entering two numbers and then by entering some text instead of a number.
"""
# Option 1 - while loop
# Create a while loop to allow for users to input the two values
flag = True
while flag:
message1 = "Please input the first value to add. To quit, type 'q': "
message2 = "Please input the second value to add. To quit, type 'q': "
value1 = input(message1)
# Exit conditions
if (value1 == 'q'):
print("Ending program")
break
value2 = input(message2)
if (value2 == 'q'):
print("Ending program")
break
# Convert to integer and check for a ValueError
try:
int1 = int(value1)
int2 = int(value2)
except ValueError:
print("Please input two integer values")
else:
result = int1 + int2
print(f"Final result: {result}")
# Option 2 - while loop and function
# Create a function to add two values
def addition(value1, value2):
"""Function to add two integer values, with a ValueError check."""
try:
int1 = int(value1)
int2 = int(value2)
except ValueError:
return("Please input two integer values")
else:
result = int1 + int2
return(f"Final result: {result}")
print(addition(2,3))
# While loop to obtain user input
flag = True
while flag:
message1 = "Please input the first value to add. To quit, type 'q': "
message2 = "Please input the second value to add. To quit, type 'q': "
value1 = input(message1)
# Exit conditions
if (value1 == 'q'):
print("Ending program")
break
value2 = input(message2)
if (value2 == 'q'):
print("Ending program")
break
# Call function
print(addition(value1, value2))
|
[
"bcernakova01@gmail.com"
] |
bcernakova01@gmail.com
|
62eaf9858c418fef633ac4d4dff91466518cb03b
|
c47e4c82a68563dbb5828dae8e9b1a3598297b7c
|
/NajaParser.py
|
8f71d0beb5236a2d8f756c33fae40069a7b2d5b8
|
[] |
no_license
|
MarceloCFSF/Naja
|
b0f28afc1a1feae7339d916a2b11189e6be0290a
|
edc38d5bd02afe840ea2ad006491e0d950191818
|
refs/heads/master
| 2023-07-11T15:06:06.850798
| 2021-08-14T05:17:09
| 2021-08-14T05:17:09
| 395,882,114
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 38,637
|
py
|
# Generated from Naja.g4 by ANTLR 4.9
# encoding: utf-8
from antlr4 import *
from io import StringIO
import sys
if sys.version_info[1] > 5:
from typing import TextIO
else:
from typing.io import TextIO
def serializedATN():
with StringIO() as buf:
buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\3\31")
buf.write("\u008d\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7")
buf.write("\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t\r\4\16")
buf.write("\t\16\4\17\t\17\4\20\t\20\4\21\t\21\3\2\3\2\3\2\3\2\3")
buf.write("\3\3\3\3\3\3\3\7\3+\n\3\f\3\16\3.\13\3\3\4\3\4\3\5\6\5")
buf.write("\63\n\5\r\5\16\5\64\3\6\3\6\3\6\3\6\3\6\3\6\3\6\5\6>\n")
buf.write("\6\3\7\3\7\3\7\3\7\3\7\3\b\3\b\3\b\3\b\3\b\3\t\3\t\3\n")
buf.write("\3\n\3\n\3\n\3\13\3\13\3\13\3\13\6\13T\n\13\r\13\16\13")
buf.write("U\3\13\3\13\5\13Z\n\13\3\f\3\f\3\f\6\f_\n\f\r\f\16\f`")
buf.write("\3\f\3\f\3\r\3\r\3\r\3\r\6\ri\n\r\r\r\16\rj\3\r\3\r\3")
buf.write("\16\3\16\3\16\6\16r\n\16\r\16\16\16s\3\16\3\16\3\16\3")
buf.write("\16\3\17\3\17\3\17\3\17\3\17\3\17\3\20\3\20\3\20\3\20")
buf.write("\7\20\u0084\n\20\f\20\16\20\u0087\13\20\5\20\u0089\n\20")
buf.write("\3\21\3\21\3\21\2\2\22\2\4\6\b\n\f\16\20\22\24\26\30\32")
buf.write("\34\36 \2\5\3\2\5\6\3\2\26\27\4\2\26\26\30\30\2\u008b")
buf.write("\2\"\3\2\2\2\4&\3\2\2\2\6/\3\2\2\2\b\62\3\2\2\2\n=\3\2")
buf.write("\2\2\f?\3\2\2\2\16D\3\2\2\2\20I\3\2\2\2\22K\3\2\2\2\24")
buf.write("O\3\2\2\2\26[\3\2\2\2\30d\3\2\2\2\32n\3\2\2\2\34y\3\2")
buf.write("\2\2\36\u0088\3\2\2\2 \u008a\3\2\2\2\"#\7\3\2\2#$\5\b")
buf.write("\5\2$%\7\4\2\2%\3\3\2\2\2&\'\5\6\4\2\',\7\26\2\2()\7\22")
buf.write("\2\2)+\7\26\2\2*(\3\2\2\2+.\3\2\2\2,*\3\2\2\2,-\3\2\2")
buf.write("\2-\5\3\2\2\2.,\3\2\2\2/\60\t\2\2\2\60\7\3\2\2\2\61\63")
buf.write("\5\n\6\2\62\61\3\2\2\2\63\64\3\2\2\2\64\62\3\2\2\2\64")
buf.write("\65\3\2\2\2\65\t\3\2\2\2\66>\5\f\7\2\67>\5\4\3\28>\5\16")
buf.write("\b\29>\5\22\n\2:>\5\24\13\2;>\5\30\r\2<>\5\32\16\2=\66")
buf.write("\3\2\2\2=\67\3\2\2\2=8\3\2\2\2=9\3\2\2\2=:\3\2\2\2=;\3")
buf.write("\2\2\2=<\3\2\2\2>\13\3\2\2\2?@\7\7\2\2@A\7\r\2\2AB\7\26")
buf.write("\2\2BC\7\16\2\2C\r\3\2\2\2DE\7\b\2\2EF\7\r\2\2FG\5\20")
buf.write("\t\2GH\7\16\2\2H\17\3\2\2\2IJ\t\3\2\2J\21\3\2\2\2KL\7")
buf.write("\26\2\2LM\7\21\2\2MN\5\36\20\2N\23\3\2\2\2OP\7\t\2\2P")
buf.write("Q\5\34\17\2QS\7\23\2\2RT\5\n\6\2SR\3\2\2\2TU\3\2\2\2U")
buf.write("S\3\2\2\2UV\3\2\2\2VW\3\2\2\2WY\7\24\2\2XZ\5\26\f\2YX")
buf.write("\3\2\2\2YZ\3\2\2\2Z\25\3\2\2\2[\\\7\n\2\2\\^\7\23\2\2")
buf.write("]_\5\n\6\2^]\3\2\2\2_`\3\2\2\2`^\3\2\2\2`a\3\2\2\2ab\3")
buf.write("\2\2\2bc\7\24\2\2c\27\3\2\2\2de\7\13\2\2ef\5\34\17\2f")
buf.write("h\7\23\2\2gi\5\n\6\2hg\3\2\2\2ij\3\2\2\2jh\3\2\2\2jk\3")
buf.write("\2\2\2kl\3\2\2\2lm\7\24\2\2m\31\3\2\2\2no\7\f\2\2oq\7")
buf.write("\23\2\2pr\5\n\6\2qp\3\2\2\2rs\3\2\2\2sq\3\2\2\2st\3\2")
buf.write("\2\2tu\3\2\2\2uv\7\24\2\2vw\7\13\2\2wx\5\34\17\2x\33\3")
buf.write("\2\2\2yz\7\r\2\2z{\t\4\2\2{|\7\25\2\2|}\t\4\2\2}~\7\16")
buf.write("\2\2~\35\3\2\2\2\177\u0089\7\27\2\2\u0080\u0085\5 \21")
buf.write("\2\u0081\u0082\7\20\2\2\u0082\u0084\5 \21\2\u0083\u0081")
buf.write("\3\2\2\2\u0084\u0087\3\2\2\2\u0085\u0083\3\2\2\2\u0085")
buf.write("\u0086\3\2\2\2\u0086\u0089\3\2\2\2\u0087\u0085\3\2\2\2")
buf.write("\u0088\177\3\2\2\2\u0088\u0080\3\2\2\2\u0089\37\3\2\2")
buf.write("\2\u008a\u008b\t\4\2\2\u008b!\3\2\2\2\f,\64=UY`js\u0085")
buf.write("\u0088")
return buf.getvalue()
class NajaParser ( Parser ):
grammarFileName = "Naja.g4"
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
sharedContextCache = PredictionContextCache()
literalNames = [ "<INVALID>", "'head'", "'tail'", "'numero'", "'texto'",
"'leia'", "'escreva'", "'se'", "'senao'", "'enquanto'",
"'execute'", "'('", "')'", "';'", "<INVALID>", "':'",
"','", "'{'", "'}'" ]
symbolicNames = [ "<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "AP", "FP",
"SC", "OP", "ATTR", "VIR", "ACH", "FCH", "OPREL",
"ID", "TEXTO", "NUMBER", "WS" ]
RULE_prog = 0
RULE_declaravar = 1
RULE_tipo = 2
RULE_bloco = 3
RULE_cmd = 4
RULE_cmdleitura = 5
RULE_cmdescrita = 6
RULE_escrita = 7
RULE_cmdattrib = 8
RULE_cmdselecao = 9
RULE_cmdelse = 10
RULE_cmdenquanto = 11
RULE_cmdexecute = 12
RULE_cmdcondicao = 13
RULE_expr = 14
RULE_termo = 15
ruleNames = [ "prog", "declaravar", "tipo", "bloco", "cmd", "cmdleitura",
"cmdescrita", "escrita", "cmdattrib", "cmdselecao", "cmdelse",
"cmdenquanto", "cmdexecute", "cmdcondicao", "expr", "termo" ]
EOF = Token.EOF
T__0=1
T__1=2
T__2=3
T__3=4
T__4=5
T__5=6
T__6=7
T__7=8
T__8=9
T__9=10
AP=11
FP=12
SC=13
OP=14
ATTR=15
VIR=16
ACH=17
FCH=18
OPREL=19
ID=20
TEXTO=21
NUMBER=22
WS=23
def __init__(self, input:TokenStream, output:TextIO = sys.stdout):
super().__init__(input, output)
self.checkVersion("4.9")
self._interp = ParserATNSimulator(self, self.atn, self.decisionsToDFA, self.sharedContextCache)
self._predicates = None
class ProgContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def bloco(self):
return self.getTypedRuleContext(NajaParser.BlocoContext,0)
def getRuleIndex(self):
return NajaParser.RULE_prog
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterProg" ):
listener.enterProg(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitProg" ):
listener.exitProg(self)
def prog(self):
localctx = NajaParser.ProgContext(self, self._ctx, self.state)
self.enterRule(localctx, 0, self.RULE_prog)
try:
self.enterOuterAlt(localctx, 1)
self.state = 32
self.match(NajaParser.T__0)
self.state = 33
self.bloco()
self.state = 34
self.match(NajaParser.T__1)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class DeclaravarContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def tipo(self):
return self.getTypedRuleContext(NajaParser.TipoContext,0)
def ID(self, i:int=None):
if i is None:
return self.getTokens(NajaParser.ID)
else:
return self.getToken(NajaParser.ID, i)
def VIR(self, i:int=None):
if i is None:
return self.getTokens(NajaParser.VIR)
else:
return self.getToken(NajaParser.VIR, i)
def getRuleIndex(self):
return NajaParser.RULE_declaravar
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDeclaravar" ):
listener.enterDeclaravar(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDeclaravar" ):
listener.exitDeclaravar(self)
def declaravar(self):
localctx = NajaParser.DeclaravarContext(self, self._ctx, self.state)
self.enterRule(localctx, 2, self.RULE_declaravar)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 36
self.tipo()
self.state = 37
self.match(NajaParser.ID)
self.state = 42
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==NajaParser.VIR:
self.state = 38
self.match(NajaParser.VIR)
self.state = 39
self.match(NajaParser.ID)
self.state = 44
self._errHandler.sync(self)
_la = self._input.LA(1)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class TipoContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return NajaParser.RULE_tipo
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterTipo" ):
listener.enterTipo(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitTipo" ):
listener.exitTipo(self)
def tipo(self):
localctx = NajaParser.TipoContext(self, self._ctx, self.state)
self.enterRule(localctx, 4, self.RULE_tipo)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 45
_la = self._input.LA(1)
if not(_la==NajaParser.T__2 or _la==NajaParser.T__3):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class BlocoContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def cmd(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(NajaParser.CmdContext)
else:
return self.getTypedRuleContext(NajaParser.CmdContext,i)
def getRuleIndex(self):
return NajaParser.RULE_bloco
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterBloco" ):
listener.enterBloco(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitBloco" ):
listener.exitBloco(self)
def bloco(self):
localctx = NajaParser.BlocoContext(self, self._ctx, self.state)
self.enterRule(localctx, 6, self.RULE_bloco)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 48
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 47
self.cmd()
self.state = 50
self._errHandler.sync(self)
_la = self._input.LA(1)
if not ((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << NajaParser.T__2) | (1 << NajaParser.T__3) | (1 << NajaParser.T__4) | (1 << NajaParser.T__5) | (1 << NajaParser.T__6) | (1 << NajaParser.T__8) | (1 << NajaParser.T__9) | (1 << NajaParser.ID))) != 0)):
break
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class CmdContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def cmdleitura(self):
return self.getTypedRuleContext(NajaParser.CmdleituraContext,0)
def declaravar(self):
return self.getTypedRuleContext(NajaParser.DeclaravarContext,0)
def cmdescrita(self):
return self.getTypedRuleContext(NajaParser.CmdescritaContext,0)
def cmdattrib(self):
return self.getTypedRuleContext(NajaParser.CmdattribContext,0)
def cmdselecao(self):
return self.getTypedRuleContext(NajaParser.CmdselecaoContext,0)
def cmdenquanto(self):
return self.getTypedRuleContext(NajaParser.CmdenquantoContext,0)
def cmdexecute(self):
return self.getTypedRuleContext(NajaParser.CmdexecuteContext,0)
def getRuleIndex(self):
return NajaParser.RULE_cmd
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCmd" ):
listener.enterCmd(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCmd" ):
listener.exitCmd(self)
def cmd(self):
localctx = NajaParser.CmdContext(self, self._ctx, self.state)
self.enterRule(localctx, 8, self.RULE_cmd)
try:
self.state = 59
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [NajaParser.T__4]:
self.enterOuterAlt(localctx, 1)
self.state = 52
self.cmdleitura()
pass
elif token in [NajaParser.T__2, NajaParser.T__3]:
self.enterOuterAlt(localctx, 2)
self.state = 53
self.declaravar()
pass
elif token in [NajaParser.T__5]:
self.enterOuterAlt(localctx, 3)
self.state = 54
self.cmdescrita()
pass
elif token in [NajaParser.ID]:
self.enterOuterAlt(localctx, 4)
self.state = 55
self.cmdattrib()
pass
elif token in [NajaParser.T__6]:
self.enterOuterAlt(localctx, 5)
self.state = 56
self.cmdselecao()
pass
elif token in [NajaParser.T__8]:
self.enterOuterAlt(localctx, 6)
self.state = 57
self.cmdenquanto()
pass
elif token in [NajaParser.T__9]:
self.enterOuterAlt(localctx, 7)
self.state = 58
self.cmdexecute()
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class CmdleituraContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def AP(self):
return self.getToken(NajaParser.AP, 0)
def ID(self):
return self.getToken(NajaParser.ID, 0)
def FP(self):
return self.getToken(NajaParser.FP, 0)
def getRuleIndex(self):
return NajaParser.RULE_cmdleitura
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCmdleitura" ):
listener.enterCmdleitura(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCmdleitura" ):
listener.exitCmdleitura(self)
def cmdleitura(self):
localctx = NajaParser.CmdleituraContext(self, self._ctx, self.state)
self.enterRule(localctx, 10, self.RULE_cmdleitura)
try:
self.enterOuterAlt(localctx, 1)
self.state = 61
self.match(NajaParser.T__4)
self.state = 62
self.match(NajaParser.AP)
self.state = 63
self.match(NajaParser.ID)
self.state = 64
self.match(NajaParser.FP)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class CmdescritaContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def AP(self):
return self.getToken(NajaParser.AP, 0)
def escrita(self):
return self.getTypedRuleContext(NajaParser.EscritaContext,0)
def FP(self):
return self.getToken(NajaParser.FP, 0)
def getRuleIndex(self):
return NajaParser.RULE_cmdescrita
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCmdescrita" ):
listener.enterCmdescrita(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCmdescrita" ):
listener.exitCmdescrita(self)
def cmdescrita(self):
localctx = NajaParser.CmdescritaContext(self, self._ctx, self.state)
self.enterRule(localctx, 12, self.RULE_cmdescrita)
try:
self.enterOuterAlt(localctx, 1)
self.state = 66
self.match(NajaParser.T__5)
self.state = 67
self.match(NajaParser.AP)
self.state = 68
self.escrita()
self.state = 69
self.match(NajaParser.FP)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class EscritaContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def ID(self):
return self.getToken(NajaParser.ID, 0)
def TEXTO(self):
return self.getToken(NajaParser.TEXTO, 0)
def getRuleIndex(self):
return NajaParser.RULE_escrita
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterEscrita" ):
listener.enterEscrita(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitEscrita" ):
listener.exitEscrita(self)
def escrita(self):
localctx = NajaParser.EscritaContext(self, self._ctx, self.state)
self.enterRule(localctx, 14, self.RULE_escrita)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 71
_la = self._input.LA(1)
if not(_la==NajaParser.ID or _la==NajaParser.TEXTO):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class CmdattribContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def ID(self):
return self.getToken(NajaParser.ID, 0)
def ATTR(self):
return self.getToken(NajaParser.ATTR, 0)
def expr(self):
return self.getTypedRuleContext(NajaParser.ExprContext,0)
def getRuleIndex(self):
return NajaParser.RULE_cmdattrib
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCmdattrib" ):
listener.enterCmdattrib(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCmdattrib" ):
listener.exitCmdattrib(self)
def cmdattrib(self):
localctx = NajaParser.CmdattribContext(self, self._ctx, self.state)
self.enterRule(localctx, 16, self.RULE_cmdattrib)
try:
self.enterOuterAlt(localctx, 1)
self.state = 73
self.match(NajaParser.ID)
self.state = 74
self.match(NajaParser.ATTR)
self.state = 75
self.expr()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class CmdselecaoContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def cmdcondicao(self):
return self.getTypedRuleContext(NajaParser.CmdcondicaoContext,0)
def ACH(self):
return self.getToken(NajaParser.ACH, 0)
def FCH(self):
return self.getToken(NajaParser.FCH, 0)
def cmd(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(NajaParser.CmdContext)
else:
return self.getTypedRuleContext(NajaParser.CmdContext,i)
def cmdelse(self):
return self.getTypedRuleContext(NajaParser.CmdelseContext,0)
def getRuleIndex(self):
return NajaParser.RULE_cmdselecao
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCmdselecao" ):
listener.enterCmdselecao(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCmdselecao" ):
listener.exitCmdselecao(self)
def cmdselecao(self):
localctx = NajaParser.CmdselecaoContext(self, self._ctx, self.state)
self.enterRule(localctx, 18, self.RULE_cmdselecao)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 77
self.match(NajaParser.T__6)
self.state = 78
self.cmdcondicao()
self.state = 79
self.match(NajaParser.ACH)
self.state = 81
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 80
self.cmd()
self.state = 83
self._errHandler.sync(self)
_la = self._input.LA(1)
if not ((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << NajaParser.T__2) | (1 << NajaParser.T__3) | (1 << NajaParser.T__4) | (1 << NajaParser.T__5) | (1 << NajaParser.T__6) | (1 << NajaParser.T__8) | (1 << NajaParser.T__9) | (1 << NajaParser.ID))) != 0)):
break
self.state = 85
self.match(NajaParser.FCH)
self.state = 87
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==NajaParser.T__7:
self.state = 86
self.cmdelse()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class CmdelseContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def ACH(self):
return self.getToken(NajaParser.ACH, 0)
def FCH(self):
return self.getToken(NajaParser.FCH, 0)
def cmd(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(NajaParser.CmdContext)
else:
return self.getTypedRuleContext(NajaParser.CmdContext,i)
def getRuleIndex(self):
return NajaParser.RULE_cmdelse
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCmdelse" ):
listener.enterCmdelse(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCmdelse" ):
listener.exitCmdelse(self)
def cmdelse(self):
localctx = NajaParser.CmdelseContext(self, self._ctx, self.state)
self.enterRule(localctx, 20, self.RULE_cmdelse)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 89
self.match(NajaParser.T__7)
self.state = 90
self.match(NajaParser.ACH)
self.state = 92
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 91
self.cmd()
self.state = 94
self._errHandler.sync(self)
_la = self._input.LA(1)
if not ((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << NajaParser.T__2) | (1 << NajaParser.T__3) | (1 << NajaParser.T__4) | (1 << NajaParser.T__5) | (1 << NajaParser.T__6) | (1 << NajaParser.T__8) | (1 << NajaParser.T__9) | (1 << NajaParser.ID))) != 0)):
break
self.state = 96
self.match(NajaParser.FCH)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class CmdenquantoContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def cmdcondicao(self):
return self.getTypedRuleContext(NajaParser.CmdcondicaoContext,0)
def ACH(self):
return self.getToken(NajaParser.ACH, 0)
def FCH(self):
return self.getToken(NajaParser.FCH, 0)
def cmd(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(NajaParser.CmdContext)
else:
return self.getTypedRuleContext(NajaParser.CmdContext,i)
def getRuleIndex(self):
return NajaParser.RULE_cmdenquanto
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCmdenquanto" ):
listener.enterCmdenquanto(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCmdenquanto" ):
listener.exitCmdenquanto(self)
def cmdenquanto(self):
localctx = NajaParser.CmdenquantoContext(self, self._ctx, self.state)
self.enterRule(localctx, 22, self.RULE_cmdenquanto)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 98
self.match(NajaParser.T__8)
self.state = 99
self.cmdcondicao()
self.state = 100
self.match(NajaParser.ACH)
self.state = 102
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 101
self.cmd()
self.state = 104
self._errHandler.sync(self)
_la = self._input.LA(1)
if not ((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << NajaParser.T__2) | (1 << NajaParser.T__3) | (1 << NajaParser.T__4) | (1 << NajaParser.T__5) | (1 << NajaParser.T__6) | (1 << NajaParser.T__8) | (1 << NajaParser.T__9) | (1 << NajaParser.ID))) != 0)):
break
self.state = 106
self.match(NajaParser.FCH)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class CmdexecuteContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def ACH(self):
return self.getToken(NajaParser.ACH, 0)
def FCH(self):
return self.getToken(NajaParser.FCH, 0)
def cmdcondicao(self):
return self.getTypedRuleContext(NajaParser.CmdcondicaoContext,0)
def cmd(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(NajaParser.CmdContext)
else:
return self.getTypedRuleContext(NajaParser.CmdContext,i)
def getRuleIndex(self):
return NajaParser.RULE_cmdexecute
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCmdexecute" ):
listener.enterCmdexecute(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCmdexecute" ):
listener.exitCmdexecute(self)
def cmdexecute(self):
localctx = NajaParser.CmdexecuteContext(self, self._ctx, self.state)
self.enterRule(localctx, 24, self.RULE_cmdexecute)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 108
self.match(NajaParser.T__9)
self.state = 109
self.match(NajaParser.ACH)
self.state = 111
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 110
self.cmd()
self.state = 113
self._errHandler.sync(self)
_la = self._input.LA(1)
if not ((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << NajaParser.T__2) | (1 << NajaParser.T__3) | (1 << NajaParser.T__4) | (1 << NajaParser.T__5) | (1 << NajaParser.T__6) | (1 << NajaParser.T__8) | (1 << NajaParser.T__9) | (1 << NajaParser.ID))) != 0)):
break
self.state = 115
self.match(NajaParser.FCH)
self.state = 116
self.match(NajaParser.T__8)
self.state = 117
self.cmdcondicao()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class CmdcondicaoContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def AP(self):
return self.getToken(NajaParser.AP, 0)
def OPREL(self):
return self.getToken(NajaParser.OPREL, 0)
def FP(self):
return self.getToken(NajaParser.FP, 0)
def ID(self, i:int=None):
if i is None:
return self.getTokens(NajaParser.ID)
else:
return self.getToken(NajaParser.ID, i)
def NUMBER(self, i:int=None):
if i is None:
return self.getTokens(NajaParser.NUMBER)
else:
return self.getToken(NajaParser.NUMBER, i)
def getRuleIndex(self):
return NajaParser.RULE_cmdcondicao
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCmdcondicao" ):
listener.enterCmdcondicao(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCmdcondicao" ):
listener.exitCmdcondicao(self)
def cmdcondicao(self):
localctx = NajaParser.CmdcondicaoContext(self, self._ctx, self.state)
self.enterRule(localctx, 26, self.RULE_cmdcondicao)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 119
self.match(NajaParser.AP)
self.state = 120
_la = self._input.LA(1)
if not(_la==NajaParser.ID or _la==NajaParser.NUMBER):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 121
self.match(NajaParser.OPREL)
self.state = 122
_la = self._input.LA(1)
if not(_la==NajaParser.ID or _la==NajaParser.NUMBER):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 123
self.match(NajaParser.FP)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ExprContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def TEXTO(self):
return self.getToken(NajaParser.TEXTO, 0)
def termo(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(NajaParser.TermoContext)
else:
return self.getTypedRuleContext(NajaParser.TermoContext,i)
def OP(self, i:int=None):
if i is None:
return self.getTokens(NajaParser.OP)
else:
return self.getToken(NajaParser.OP, i)
def getRuleIndex(self):
return NajaParser.RULE_expr
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterExpr" ):
listener.enterExpr(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitExpr" ):
listener.exitExpr(self)
def expr(self):
localctx = NajaParser.ExprContext(self, self._ctx, self.state)
self.enterRule(localctx, 28, self.RULE_expr)
self._la = 0 # Token type
try:
self.state = 134
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [NajaParser.TEXTO]:
self.enterOuterAlt(localctx, 1)
self.state = 125
self.match(NajaParser.TEXTO)
pass
elif token in [NajaParser.ID, NajaParser.NUMBER]:
self.enterOuterAlt(localctx, 2)
self.state = 126
self.termo()
self.state = 131
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==NajaParser.OP:
self.state = 127
self.match(NajaParser.OP)
self.state = 128
self.termo()
self.state = 133
self._errHandler.sync(self)
_la = self._input.LA(1)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class TermoContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def ID(self):
return self.getToken(NajaParser.ID, 0)
def NUMBER(self):
return self.getToken(NajaParser.NUMBER, 0)
def getRuleIndex(self):
return NajaParser.RULE_termo
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterTermo" ):
listener.enterTermo(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitTermo" ):
listener.exitTermo(self)
def termo(self):
localctx = NajaParser.TermoContext(self, self._ctx, self.state)
self.enterRule(localctx, 30, self.RULE_termo)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 136
_la = self._input.LA(1)
if not(_la==NajaParser.ID or _la==NajaParser.NUMBER):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
|
[
"marcelo.cfsf@gmail.com"
] |
marcelo.cfsf@gmail.com
|
2cd97cb8b0b9c6e273657a730b6c9cceac772bfc
|
694760b87cbf4b72eb2cfc554fe3818f666c81a0
|
/source code/hello-world.py
|
327a22d8d96d42ef4d2e35959436a7d662703bee
|
[] |
no_license
|
vipermax/dummy-project
|
a5aac2860849abf46efdbcddd9a35c361aeb9481
|
dd0047fac7ad53e428b1ff7b208d319d79efa853
|
refs/heads/master
| 2021-01-10T01:30:36.137587
| 2016-03-01T08:58:58
| 2016-03-01T08:58:58
| 52,859,628
| 0
| 0
| null | 2016-03-01T08:57:32
| 2016-03-01T08:09:18
|
Python
|
UTF-8
|
Python
| false
| false
| 280
|
py
|
print "Hello World!"
print "Hello Again and again"
print "I like typing this."
print "This is a lot of fun."
print 'Yay! Printing.'
print "I'd much rather you 'not'."
print 'I "said" do not touch this.'
Print "Hey, here is something new!"
print "some udpdate from master branch"
|
[
"vipermax@gmail.com"
] |
vipermax@gmail.com
|
acea4cdf9cddd739a1daddc42cb820e70fe0e59c
|
3a18085d011b2dfc2c15ca6eb10838c604ef8a2c
|
/transform_web_traffic.py
|
60b01996ed53bf8eaf3b2b944629e994c5dd01a9
|
[] |
no_license
|
ericness/when_i_work_code_challenge
|
049f986df9cc9c1de29f502f006e138e119bac70
|
7e67505ebc451138327effd51ec967f200ee9d0a
|
refs/heads/master
| 2021-07-24T06:42:34.133140
| 2017-11-02T20:25:05
| 2017-11-02T20:25:05
| 109,053,886
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,285
|
py
|
import argparse
import io
import re
import pandas as pd
import boto3
import botocore
# parse command args to set run configuration
parser = argparse.ArgumentParser(description=('Transform raw web traffic data '
'into a pivoted table of aggregated time per '
'path by user.'))
parser.add_argument('bucket', type=str,
help='Name of S3 bucket that contains web traffic data')
parser.add_argument('--prefix', type=str,
help='Prefix to filter S3 keys by')
parser.add_argument('--output', type=str,
help='Name of output CSV file')
args = parser.parse_args()
# set configuration variables from command line args
S3_BUCKET_NAME = args.bucket
if args.prefix:
S3_PREFIX = args.prefix
else:
S3_PREFIX = ''
if args.output:
OUTPUT_CSV_NAME = args.output
else:
OUTPUT_CSV_NAME = 'web_traffic.csv'
def clean_web_traffic_data(web_traffic_df, s3_object_name):
"""Return a dataframe with any corrupt data removed and data types
corrected.
web_traffic_df - dataframe with fields path, user_id and length
s3_object_name - name of source file to use in status messages
"""
frame_size = len(web_traffic_df.index)
# check that format of path is valid. remove any invalid rows.
web_traffic_df = web_traffic_df[web_traffic_df['path'].str.
match('^(/[\w-]*)+\s*$') == True]
filter_count = frame_size - len(web_traffic_df.index)
if filter_count != 0:
print(f'{filter_count} rows filtered out of {s3_object_name} because '
f'of invalid path formats.')
frame_size = len(web_traffic_df.index)
# check that all length values are integers
if web_traffic_df['length'].dtype != 'int64':
web_traffic_df = web_traffic_df[web_traffic_df['length'].astype('str').
str.isdigit() == True]
filter_count = frame_size - len(web_traffic_df.index)
if filter_count != 0:
print(f'{filter_count} rows filtered out of {s3_object_name} '
f'because field length is non-integer.')
web_traffic_df['length'] = web_traffic_df['length'].astype(int)
return web_traffic_df
# use the UNSIGNED signature version for anonymous access
s3 = boto3.resource('s3', config=botocore.client.
Config(signature_version=botocore.UNSIGNED))
# set up objects to iterate through list of S3 objects
s3_bucket = s3.Bucket(S3_BUCKET_NAME)
if S3_PREFIX != '':
s3_bucket_objects = s3_bucket.objects.filter(Prefix=S3_PREFIX)
else:
s3_bucket_objects = s3_bucket.objects.all()
# list of dataframes created from the CSV files
web_traffic_list = []
print(f'Getting list of CSV files to process.')
# iterate through CSV files and parse them into dataframes
try:
for s3_obj in s3_bucket_objects:
# only process CSV files
if re.match('.*\.csv$', s3_obj.key):
obj = s3.Object(s3_obj.bucket_name, s3_obj.key)
web_traffic_subset = pd.read_csv(io.BytesIO(obj.get()['Body'].
read()), encoding='utf8')
print(f'Processing file {s3_obj.key}.')
# check structure and contents of dataframe
if set(['user_id', 'path', 'length']).issubset(web_traffic_subset.
columns):
web_traffic_subset = clean_web_traffic_data(web_traffic_subset,
s3_obj.key)
web_traffic_list.append(web_traffic_subset[['user_id', 'path',
'length']])
else:
print(f'Data in file {s3_obj.key} was skipped because it does '
f'not contain fields user_id, path and length.')
except botocore.exceptions.ClientError as e:
print(e.response['Error']['Message'])
exit()
# make sure that at least one file was processed
if len(web_traffic_list) == 0:
print(f'There are no CSV files with the proper structure to process.')
exit()
print(f'All files have been ingested. Beginning data transformation.')
# combine the dataframes from all the files into one large dataframe
web_traffic = pd.concat(web_traffic_list, ignore_index=True)
# aggregate the length of time that each user spent on each path
web_traffic_user_path = web_traffic.groupby(['user_id','path'])['length'].sum()
# pivot the table so that the path names are in columns
web_traffic_user = web_traffic_user_path.reset_index()
web_traffic_user = web_traffic_user.pivot(index='user_id', columns='path',
values='length')
# fill in any missing data with zeros
web_traffic_user = web_traffic_user.fillna(0)
# dtype converts to float when pivoting because of the presence of NaNs.
# convert the data type back to int.
web_traffic_user = web_traffic_user.astype(dtype='int')
print(f'Writing transformed data to file {OUTPUT_CSV_NAME}.')
# output data to specified location
web_traffic_user.to_csv(OUTPUT_CSV_NAME)
print(f'Done!')
|
[
"ericness@UdemySparkCourse.fpcodpc5vfjurkxi5gs5htsn0d.gx.internal.cloudapp.net"
] |
ericness@UdemySparkCourse.fpcodpc5vfjurkxi5gs5htsn0d.gx.internal.cloudapp.net
|
59429ee5ee8ca7e0edd5fe634b9e3e46f97d9c73
|
9bf9ba2ff40e63e87efc67d0d90416b3e839ca3f
|
/TwoPlayerTicTacToe2.py
|
1976094f651cf9f5f5bca70443c4c2911928cc3e
|
[] |
no_license
|
Muhammad-Ahmed-Mansoor/Python-tic-tac-toe
|
243353dda52cad2256633cd979fe989c8fdc5f13
|
79d3619aea057bafab015498de3ae85418cf7889
|
refs/heads/master
| 2022-11-13T21:57:52.504479
| 2020-07-07T15:48:47
| 2020-07-07T15:48:47
| 277,858,848
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,952
|
py
|
import os;
def boardPrint(): #displays the board
global board
print('\n') #two blank lines for clarity's sake
for i in [7,8,9,4,5,6,1,2,3]:#to match keyboard numpad
if i % 3 !=0:#to avoid '|' at the end of a row
print(board[i],'|',end=' ')
else: #to insert new line at end of row
print (board[i])
if i !=3:
print('---------')#no '--------' after last row
continue
print('\n') #two blank lines for clarity's sake
return
#note that variable move is use twice, each time locally, never globally
def boardManager(move): #makes changes to the board according to given move
global board,currentPlayer
board[move]=currentPlayer
return
def moveInput():
global currentPlayer
move='whatever' #initializing move before while loop
while move not in board :
move=input(currentPlayer+' moves:')
continue
move=int(move)#move is not made int at input time to account for mismatched string inputs
return move
def judge():#returns the state of the match
global board,moveCount
#checking for a win in the rows
for a in range(1,10,3): #a takes values 1,4,7
if board[a]==board[a+1]==board[a+2]:#checks an entire row for equality
return 'win'
continue
#checking for a win in the columns
for b in range(1,4):
if board[b]==board[b+3]==board[b+6]:#checks an entire column for equality
return 'win'
continue
#checking for a win in diagonals
if board[1]==board[5]==board[9] or board[3]==board[5]==board[7]:
return 'win'
#check for draw
if moveCount==9:
return 'draw'
#if no win or draw, match continues
return 'continue'
#main program starts here
while True:#so game plays until user closes
board=[str(i) for i in range(10)]#board[0] to be ignored for simplicity & readibilty. board[1:9] to represent
#the 9 squares of a tic tac toe board.
moveCount=0
#starting the game loop:
while judge()=='continue':
if moveCount %2==0:
currentPlayer='X' #as X goes first so gets even numbered moves
else :
currentPlayer='O'
boardPrint()
boardManager(moveInput())
os.system("cls")
moveCount+=1
continue
boardPrint()
if judge()=='win':
print(currentPlayer+' wins.')
elif judge()=='draw':
print ('Match Drawn')
print()
restart=input('Press enter to restart or type exit to exit.')
if restart=='exit':
break;
os.system("cls")
continue
|
[
"noreply@github.com"
] |
noreply@github.com
|
88850f9c8b1aef4142ac6d51fb5ce192a8482057
|
be1e8444482e40df5d02d57964f61cfbd9249f13
|
/Django-0.90/django/core/db/backends/postgresql.py
|
b1b2d9cb52d964e5d1fd6012266dabed23eedd4c
|
[
"BSD-3-Clause",
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
tungvx/deploy
|
9946d4350f5fbc5da25d45505b75384fd40e6088
|
9e1917c6c645b4ce0efe115b0da76027d4bc634c
|
refs/heads/master
| 2021-01-02T09:08:45.691746
| 2011-11-12T19:44:48
| 2011-11-12T19:44:48
| 2,763,145
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,276
|
py
|
"""
PostgreSQL database backend for Django.
Requires psycopg 1: http://initd.org/projects/psycopg1
"""
from django.core.db import base, typecasts
import psycopg as Database
DatabaseError = Database.DatabaseError
class DatabaseWrapper:
def __init__(self):
self.connection = None
self.queries = []
def cursor(self):
from django.conf.settings import DATABASE_USER, DATABASE_NAME, DATABASE_HOST, DATABASE_PORT, DATABASE_PASSWORD, DEBUG, TIME_ZONE
if self.connection is None:
if DATABASE_NAME == '':
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured, "You need to specify DATABASE_NAME in your Django settings file."
conn_string = "dbname=%s" % DATABASE_NAME
if DATABASE_USER:
conn_string = "user=%s %s" % (DATABASE_USER, conn_string)
if DATABASE_PASSWORD:
conn_string += " password='%s'" % DATABASE_PASSWORD
if DATABASE_HOST:
conn_string += " host=%s" % DATABASE_HOST
if DATABASE_PORT:
conn_string += " port=%s" % DATABASE_PORT
self.connection = Database.connect(conn_string)
self.connection.set_isolation_level(1) # make transactions transparent to all cursors
cursor = self.connection.cursor()
cursor.execute("SET TIME ZONE %s", [TIME_ZONE])
if DEBUG:
return base.CursorDebugWrapper(cursor, self)
return cursor
def commit(self):
return self.connection.commit()
def rollback(self):
if self.connection:
return self.connection.rollback()
def close(self):
if self.connection is not None:
self.connection.close()
self.connection = None
def quote_name(self, name):
if name.startswith('"') and name.endswith('"'):
return name # Quoting once is enough.
return '"%s"' % name
def dictfetchone(cursor):
"Returns a row from the cursor as a dict"
return cursor.dictfetchone()
def dictfetchmany(cursor, number):
"Returns a certain number of rows from a cursor as a dict"
return cursor.dictfetchmany(number)
def dictfetchall(cursor):
"Returns all rows from a cursor as a dict"
return cursor.dictfetchall()
def get_last_insert_id(cursor, table_name, pk_name):
cursor.execute("SELECT CURRVAL('%s_%s_seq')" % (table_name, pk_name))
return cursor.fetchone()[0]
def get_date_extract_sql(lookup_type, table_name):
# lookup_type is 'year', 'month', 'day'
# http://www.postgresql.org/docs/8.0/static/functions-datetime.html#FUNCTIONS-DATETIME-EXTRACT
return "EXTRACT('%s' FROM %s)" % (lookup_type, table_name)
def get_date_trunc_sql(lookup_type, field_name):
# lookup_type is 'year', 'month', 'day'
# http://www.postgresql.org/docs/8.0/static/functions-datetime.html#FUNCTIONS-DATETIME-TRUNC
return "DATE_TRUNC('%s', %s)" % (lookup_type, field_name)
def get_limit_offset_sql(limit, offset=None):
sql = "LIMIT %s" % limit
if offset and offset != 0:
sql += " OFFSET %s" % offset
return sql
def get_random_function_sql():
return "RANDOM()"
def get_table_list(cursor):
"Returns a list of table names in the current database."
cursor.execute("""
SELECT c.relname
FROM pg_catalog.pg_class c
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE c.relkind IN ('r', 'v', '')
AND n.nspname NOT IN ('pg_catalog', 'pg_toast')
AND pg_catalog.pg_table_is_visible(c.oid)""")
return [row[0] for row in cursor.fetchall()]
def get_relations(cursor, table_name):
"""
Returns a dictionary of {field_index: (field_index_other_table, other_table)}
representing all relationships to the given table. Indexes are 0-based.
"""
cursor.execute("""
SELECT con.conkey, con.confkey, c2.relname
FROM pg_constraint con, pg_class c1, pg_class c2
WHERE c1.oid = con.conrelid
AND c2.oid = con.confrelid
AND c1.relname = %s
AND con.contype = 'f'""", [table_name])
relations = {}
for row in cursor.fetchall():
try:
# row[0] and row[1] are like "{2}", so strip the curly braces.
relations[int(row[0][1:-1]) - 1] = (int(row[1][1:-1]) - 1, row[2])
except ValueError:
continue
return relations
# Register these custom typecasts, because Django expects dates/times to be
# in Python's native (standard-library) datetime/time format, whereas psycopg
# use mx.DateTime by default.
try:
Database.register_type(Database.new_type((1082,), "DATE", typecasts.typecast_date))
except AttributeError:
raise Exception, "You appear to be using psycopg version 2, which isn't supported yet, because it's still in beta. Use psycopg version 1 instead: http://initd.org/projects/psycopg1"
Database.register_type(Database.new_type((1083,1266), "TIME", typecasts.typecast_time))
Database.register_type(Database.new_type((1114,1184), "TIMESTAMP", typecasts.typecast_timestamp))
Database.register_type(Database.new_type((16,), "BOOLEAN", typecasts.typecast_boolean))
OPERATOR_MAPPING = {
'exact': '=',
'iexact': 'ILIKE',
'contains': 'LIKE',
'icontains': 'ILIKE',
'ne': '!=',
'gt': '>',
'gte': '>=',
'lt': '<',
'lte': '<=',
'startswith': 'LIKE',
'endswith': 'LIKE',
'istartswith': 'ILIKE',
'iendswith': 'ILIKE',
}
# This dictionary maps Field objects to their associated PostgreSQL column
# types, as strings. Column-type strings can contain format strings; they'll
# be interpolated against the values of Field.__dict__ before being output.
# If a column type is set to None, it won't be included in the output.
DATA_TYPES = {
'AutoField': 'serial',
'BooleanField': 'boolean',
'CharField': 'varchar(%(maxlength)s)',
'CommaSeparatedIntegerField': 'varchar(%(maxlength)s)',
'DateField': 'date',
'DateTimeField': 'timestamp with time zone',
'EmailField': 'varchar(75)',
'FileField': 'varchar(100)',
'FilePathField': 'varchar(100)',
'FloatField': 'numeric(%(max_digits)s, %(decimal_places)s)',
'ImageField': 'varchar(100)',
'IntegerField': 'integer',
'IPAddressField': 'inet',
'ManyToManyField': None,
'NullBooleanField': 'boolean',
'OneToOneField': 'integer',
'PhoneNumberField': 'varchar(20)',
'PositiveIntegerField': 'integer CHECK (%(column)s >= 0)',
'PositiveSmallIntegerField': 'smallint CHECK (%(column)s >= 0)',
'SlugField': 'varchar(50)',
'SmallIntegerField': 'smallint',
'TextField': 'text',
'TimeField': 'time',
'URLField': 'varchar(200)',
'USStateField': 'varchar(2)',
}
# Maps type codes to Django Field types.
DATA_TYPES_REVERSE = {
16: 'BooleanField',
21: 'SmallIntegerField',
23: 'IntegerField',
25: 'TextField',
869: 'IPAddressField',
1043: 'CharField',
1082: 'DateField',
1083: 'TimeField',
1114: 'DateTimeField',
1184: 'DateTimeField',
1266: 'TimeField',
1700: 'FloatField',
}
|
[
"toilatung90@gmail.com"
] |
toilatung90@gmail.com
|
bcd00e175fe8619264aa97bc0a61bbf04c8d0fc0
|
61a887eaf972bda8839728292147bf923103e8a1
|
/representations/explicit.py
|
4cf27c5cfa3ff12a6320ead1c90aa30c691999e8
|
[] |
no_license
|
soltustik/RHG
|
45f05fb215f0e2fbcd1b51b8a44b78ae09454b5b
|
c94de165285cf06f3d101c316173175328874848
|
refs/heads/master
| 2023-01-01T02:17:36.041309
| 2020-10-07T10:00:24
| 2020-10-07T10:00:24
| 300,230,313
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,063
|
py
|
import heapq
from scipy.sparse import dok_matrix, csr_matrix
from scipy.stats import logistic
import numpy as np
from representations.matrix_serializer import load_vocabulary, load_matrix
class Explicit:
"""
Base class for explicit representations. Assumes that the serialized input is e^PMI.
"""
def __init__(self, path, normalize=True):
self.wi, self.iw = load_vocabulary(path + '.words.vocab')
self.ci, self.ic = load_vocabulary(path + '.contexts.vocab')
self.m = load_matrix(path)
self.m.data = np.log(self.m.data)
self.normal = normalize
if normalize:
self.normalize()
def normalize(self):
m2 = self.m.copy()
m2.data **= 2
norm = np.reciprocal(np.sqrt(np.array(m2.sum(axis=1))[:, 0]))
normalizer = dok_matrix((len(norm), len(norm)))
normalizer.setdiag(norm)
self.m = normalizer.tocsr().dot(self.m)
def represent(self, w):
if w in self.wi:
return self.m[self.wi[w], :]
else:
return csr_matrix((1, len(self.ic)))
def similarity_first_order(self, w, c):
return self.m[self.wi[w], self.ci[c]]
def similarity(self, w1, w2):
"""
Assumes the vectors have been normalized.
"""
return self.represent(w1).dot(self.represent(w2).T)[0, 0]
def closest_contexts(self, w, n=10):
"""
Assumes the vectors have been normalized.
"""
scores = self.represent(w)
return heapq.nlargest(n, zip(scores.data, [self.ic[i] for i in scores.indices]))
def closest(self, w, n=10):
"""
Assumes the vectors have been normalized.
"""
scores = self.m.dot(self.represent(w).T).T.tocsr()
return heapq.nlargest(n, zip(scores.data, [self.iw[i] for i in scores.indices]))
class PositiveExplicit(Explicit):
"""
Positive PMI (PPMI) with negative sampling (neg).
Negative samples shift the PMI matrix before truncation.
"""
def __init__(self, path, normalize=True, neg=1):
Explicit.__init__(self, path, normalize)
self.m.data -= np.log(neg)
self.m.data[self.m.data < 0] = 0
self.m.eliminate_zeros()
if normalize:
self.normalize()
class BPMI(Explicit):
"""
Binarized PMI
"""
def __init__(self, path, normalize=True, neg=1):
Explicit.__init__(self, path, normalize)
self.m.data -= np.log(neg)
self.m.data[self.m.data < 0] = 0
self.m.data[self.m.data > 0] = 1
self.m.eliminate_zeros()
if normalize:
self.normalize()
class Squashed(Explicit):
"""
Squashed SPMI
"""
def __init__(self, path, normalize=True, neg=1):
Explicit.__init__(self, path, normalize)
self.m.data -= np.log(neg)
self.m.data = logistic.cdf(self.m.data)
self.m.eliminate_zeros()
if normalize:
self.normalize()
|
[
"noreply@github.com"
] |
noreply@github.com
|
02185c94d3eb3432880096e3dc2c60d9712cb52f
|
b78849c6afe4e2a13e464ee21c3e31758d5d17de
|
/imagedownloader Web_scraping with gui.py
|
8065b39c29080438704e3582843739cb9ff955a5
|
[] |
no_license
|
kevalmahajan/Python-Projects
|
7c9184d91f1506f87ceb9157d88214b3547f5c17
|
91379c4c159ee30019c6e46164374994855d30b1
|
refs/heads/master
| 2023-02-14T15:46:52.142843
| 2020-12-23T05:53:12
| 2020-12-23T05:53:12
| 280,148,602
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,122
|
py
|
import os
import requests # to sent GET requests
from bs4 import BeautifulSoup # to parse HTML
from tkinter import *
import tkinter as tk
import traceback
from tkinter import messagebox as m_box
yahoo_img = \
'https://in.images.search.yahoo.com/search/images;_ylt=AwrwJSJD2Q1fTlkATCK8HAx.;_ylc=X1MDMjExNDcyMzAwNARfcgMyBGZyAwRncHJpZAN6VDFjeUl0WlFfLnRqMGU1YlNTTGVBBG5fc3VnZwMxMARvcmlnaW4DaW4uaW1hZ2VzLnNlYXJjaC55YWhvby5jb20EcG9zAzAEcHFzdHIDBHBxc3RybAMEcXN0cmwDNARxdWVyeQNkb2dzBHRfc3RtcAMxNTk0NzQzMTEw?fr2=sb-top-in.images.search&'
user_agent = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'Accept-Encoding': 'none',
'Accept-Language': 'en-US,en;q=0.8',
'Connection': 'keep-alive',
}
save_folder = 'images'
#---------------------------------------------------------------------------------
def download_n():
root1 = Tk()
z = Canvas(root1, width=400,height=250)
root1.title("Download n Images")
Label(root1, text="What are you looking for?", fg = "Black",
font = "Verdana 10").place(x=90,y=20)
e1=Entry(root1)
e1.place(x=90,y=50)
Label(root1, text="How many images do you want? ", fg = "Black",
font = "Verdana 10").place(x=90,y=90)
e2=Entry(root1)
e2.place(x=90,y=120)
button4 = tk.Button(root1, text='Download', width=17,
bg="#D3D3D3",fg='black',
command=lambda:download_images(e1,e2))
button4.place(x=90,y=160)
button5 = tk.Button(root1, text='Back', width=10,
bg="#D3D3D3",fg='black',
command=lambda:[root1.destroy(),main()]).place(x=225,y=160)
z.pack()
def download_images(e1,e2):
try:
root1 = Tk()
root1.title("Done")
data=e1.get()
n_images=e2.get()
if data=='' or n_images=='':
root1.withdraw()
m_box.showerror('Error','Please fill both entries ')
else:
data=str(data)
n_images=int(n_images)
# print(data,n_images)
z = Canvas(root1, width=260,height=110)
print('Start searching...')
# get url query string
searchurl = yahoo_img + 'p=' + data
#print(searchurl)
# request url, without user_agent the permission gets denied
response = requests.get(searchurl, headers=user_agent)
html = response.text
soup = BeautifulSoup(html, 'html.parser')
results = soup.find_all('img',class_= 'process',limit=n_images)
# extract the link from the img tag
imagelinks= []
for re in results:
url1=re.attrs.get('data-src')
imagelinks.append(url1)
print(f'found {len(imagelinks)} images')
Label(root1, text=f'found {len(imagelinks)} images', fg = "Black",
font = "Verdana 10").place(x=70,y=20)
print('Start downloading...')
# Label(root1, text="Start downloading...", fg = "Black",
# font = "Verdana 10").pack()
for i, imagelink in enumerate(imagelinks):
# open image link and save as file
response = requests.get(imagelink)
imagename = save_folder + '/' + data + str(i+1) + '.jpg'
with open(imagename, 'wb') as file:
file.write(response.content)
print('Done')
Label(root1, text="DOWNLOADING COMPLETE", fg = "Black",
font = "Verdana 10").place(x=40,y=40)
button5 = tk.Button(root1, text='OK', width=10,
bg="#D3D3D3",fg='black',
command=root1.destroy).place(x=90,y=70)
z.pack()
except ValueError:
root1.withdraw()
m_box.showwarning('Error','Enter a Valid Number')
# print("enter valid number")
# root2 = Tk()
# z = Canvas(root2, width=260,height=110)
# Label(root2, text="Enter a valid Number", fg = "Black",
# font = "Verdana 10").place(x=60,y=30)
# button5 = tk.Button(root2, text='OK', width=10,
# bg="#D3D3D3",fg='black',
# command=root2.destroy).place(x=90,y=70)
#
# z.pack()
#------------------------------------------------------------------------------------
def url_n():
root1 = Tk()
root1.title("Download Image using url")
z = Canvas(root1, width=400,height=250)
Label(root1, text="Enter Url : ", fg = "Black",
font = "Verdana 10").place(x=90,y=20)
e1=Entry(root1,width=35)
e1.place(x=90,y=50)
Label(root1, text="Name of the image to be saved :", fg = "Black",
font = "Verdana 10").place(x=90,y=90)
e2=Entry(root1)
e2.place(x=90,y=120)
button4 = tk.Button(root1, text='Download', width=17,
bg="#D3D3D3",fg='black',
command=lambda:url_images(e1,e2)).place(x=90,y=160)
button5 = tk.Button(root1, text='Back', width=10,
bg="#D3D3D3",fg='black',
command=lambda:[root1.destroy(),main()]).place(x=225,y=160)
z.pack()
def url_images(e1,e2):
try:
root1 = Tk()
root1.title("Done")
z = Canvas(root1, width=260,height=110)
imagelink=e1.get()
data=e2.get()
if imagelink=='' or data=='':
root1.withdraw()
m_box.showerror('Error','Please fill both entries ')
else:
response = requests.get(imagelink)
imagename = save_folder + '/' + data + '.jpg'
with open(imagename, 'wb') as file:
file.write(response.content)
print('Done')
Label(root1, text="IMAGE DOWNLOADED", fg = "Black",
font = "Verdana 10").place(x=60,y=30)
button5 = tk.Button(root1, text='OK', width=10,
bg="#D3D3D3",fg='black',
command=root1.destroy).place(x=90,y=70)
z.pack()
except :
root1.withdraw()
m_box.showwarning('Invalid Url','Enter a Valid URL')
#------------------------------------------------------------------------------------------
def insta_n():
root1 = Tk()
root1.title("Download Instagram Image ")
z = Canvas(root1, width=400,height=250)
Label(root1, text="Enter Instagram Image link : ", fg = "Black",
font = "Verdana 10").place(x=90,y=20)
e1=Entry(root1,width=35)
e1.place(x=90,y=50)
Label(root1, text="Name of the image to be saved :", fg = "Black",
font = "Verdana 10").place(x=90,y=90)
e2=Entry(root1)
e2.place(x=90,y=120)
button4 = tk.Button(root1, text='Download', width=17,
bg="#D3D3D3",fg='black',
command=lambda:insta_images(e1,e2)).place(x=90,y=160)
button5 = tk.Button(root1, text='Back', width=10,
bg="#D3D3D3",fg='black',
command=lambda:[root1.destroy(),main()]).place(x=225,y=160)
z.pack()
def insta_images(e1,e2):
try:
root1 = Tk()
root1.title("Done")
z = Canvas(root1, width=260,height=110)
url=e1.get()
data=e2.get()
if data=='' or n_images=='':
root1.withdraw()
m_box.showerror('Error','Please fill both entries ')
else:
usr_agent = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'Accept-Encoding': 'none',
'Accept-Language': 'en-US,en;q=0.8',
'Connection': 'keep-alive',
}
response = requests.get(url, headers=usr_agent)
html = response.text
#soup = BeautifulSoup(html, 'html.parser')
soup = BeautifulSoup(html,'html.parser')
metaTag = soup.find_all('meta', {'property':'og:image'})
imagelink = metaTag[0]['content']
response = requests.get(imagelink)
imagename = save_folder + '/' + data + '.jpg'
with open(imagename, 'wb') as file:
file.write(response.content)
print('Done')
Label(root1, text="IMAGE DOWNLOADED", fg = "Black",
font = "Verdana 10").place(x=60,y=30)
button5 = tk.Button(root1, text='OK', width=10,
bg="#D3D3D3",fg='black',
command=root1.destroy).place(x=90,y=70)
z.pack()
except :
root1.withdraw()
m_box.showwarning('Invalid Instagram Link','Enter a Valid URL')
# print("Invalid Image Url")
# root2 = Tk()
# z = Canvas(root2, width=260,height=110)
# Label(root2, text="Invalid Image Url", fg = "Black",
# font = "Verdana 10").place(x=60,y=30)
# button5 = tk.Button(root2, text='OK', width=10,
# bg="#D3D3D3",fg='black',
# command=root2.destroy).place(x=90,y=70)
#
# z.pack()
def main():
if not os.path.exists(save_folder):
os.mkdir(save_folder)
root = Tk()
root.title("Image Downloader")
w = Canvas(root, width=400,height=250)
Label(root, text="Image Downloader", fg = "Black",
font = "Verdana 14",pady=10,padx=10,bg = "LightGrey").place(x=100,y=20)
button1 = tk.Button(root, text='Download n required images', width=35,
command=lambda: [download_n(),root.destroy()]).place(x=75,y=100)
button2 = tk.Button(root, text='Download via url', width=35,
command=lambda: [url_n(),root.destroy()]).place(x=75,y=140)
button3 = tk.Button(root, text='Download instagram images', width=35,
command=lambda: [insta_n(),root.destroy()]).place(x=75,y=180)
w.pack()
mainloop()
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
noreply@github.com
|
d107a85fb3ea25bf12a822113f007101ca0d82e5
|
be82971799d625de703ad2c58d49d8bbb5b06fab
|
/TI-DSS-Python-ClientExample.py
|
bf9598bc77a30357a6e2880f0f13c8aa7ac66f07
|
[
"Unlicense"
] |
permissive
|
PML-Trey/TI-DSS-Python
|
2f4c9fe9c01979c2d11b371b907d180a0aa3c422
|
1862ff434d2eb0d9ad04f3df03ffe5109218a300
|
refs/heads/main
| 2023-03-07T12:07:27.787467
| 2021-02-23T13:37:45
| 2021-02-23T13:37:45
| 341,315,758
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,073
|
py
|
import Pyro4
dss = Pyro4.Proxy("PYRONAME:dss") # use name server object lookup uri shortcut
# Setup target configuration
# Populated with a path/file of a target configuraiton made using CCS
dss.setConfig("./targetConfig.ccxml")
# Connect to debugger and configure this debug session
# Populate with the name of the connection and the target cpu
# For instance "Texas Instruments XDS110 USB Debug Probe_0/C28xx_CPU1"
# This returns the session name that is used in all subsequent calls
# that interact with the target device
sessionName = dss.startDebugSession("Connection Name")
print('Connected to debugger')
# Connect to the target CPU using the debugger
dss.connectTarget(sessionName)
print('Connected to targets')
# Program the target device
# Change application.out to be the path to your executable
# This can take a while depending on the device
print('Programming target')
dss.loadProgram(sessionName, "./application.out")
print("Done programming")
# End the debug session and stop the debug server
dss.endDebugSession(sessionName)
dss.stopDebugServer()
|
[
"trey@polymorphiclabs.com"
] |
trey@polymorphiclabs.com
|
2b63fb46758a1f007ae3ed5ce851d0c3a99bb6e0
|
f5788e1e1d8522c0d4ae3b4668faa5537680cb07
|
/mutual_sale_discount_total/__openerp__.py
|
55acff682f3f211f074ab7660a836cc839f366de
|
[] |
no_license
|
mutualSecurity/mutual-erp-residential
|
8549e179af6df1ffceadf42369d69d4dd44f07ac
|
88debefc662dd1510a1d52a877ede4673c319532
|
refs/heads/master
| 2021-11-11T13:33:37.878051
| 2021-11-02T10:14:49
| 2021-11-02T10:14:49
| 71,433,705
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,007
|
py
|
{
'name': 'Sale Discount on Total Amount',
'version': '1.0',
'category': 'sale',
'sequence': 6,
'summary': "Discount on total in Sale and invoice with Discount limit and approval",
'author': 'Cybrosys Techno Solutions',
'company': 'Cybrosys Techno Solutions',
'website': 'http://www.cybrosys.com',
'description': """
Sale Discount for Total Amount
=======================
Module to manage discount on total amount in Sale.
as an specific amount or percentage
""",
'depends': ['sale','mutual_sales', 'base', 'stock','mutual_inventory','mutual_reports','mutual_followups','mutual_project','mutual_mass_editing'],
'data': [
'views/sale_view.xml',
'views/account_invoice_view.xml',
'views/invoice_report.xml',
'views/sale_order_report.xml',
'views/sale_discount_approval_view.xml',
'views/sale_discount_approval_workflow.xml'
],
'demo': [
],
'installable': True,
'auto_install': False,
}
|
[
"pk_bscs@yahoo.com"
] |
pk_bscs@yahoo.com
|
e47f21b19c216ae807692a673b8184880a5aa25d
|
51761bbf3e42543687664291dd3a7d3ae9a90fd2
|
/utils.py
|
68c6c6eba79327b7c718fd4655159cd4dda8850b
|
[] |
no_license
|
MarcelaBarella/luizalabs_challenge
|
03612291e8f89875c1572eb301235bc5b6f7948d
|
12b977b6836222bcd7a8d8464a7b840425d2afe2
|
refs/heads/master
| 2020-03-20T11:43:29.243943
| 2019-11-22T14:59:50
| 2019-11-22T14:59:50
| 137,410,494
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 166
|
py
|
from datetime import datetime
def str_to_datetime(date):
if type(date) != str:
return date
return datetime.strptime(date, '%d/%m/%Y %H:%M')
|
[
"marcela.barella@hotmail.com"
] |
marcela.barella@hotmail.com
|
b59262788ee519c9e2a4555e7cb75382fba2da3d
|
ca920a476e43b68d6d041fb5af098cecf2dbbbd0
|
/py-list-vulnerabilities/smartcheck.py
|
6c73fdeb20c2d2f5c4cbf17a130a430a41b7c19e
|
[
"Apache-2.0"
] |
permissive
|
felipecosta09/smartcheck-samples
|
8b74a8773bfb21e82b03eccd9f9090bdd6bdfca3
|
bdaade3b2c057abbdc1d437132ba043b14a00d14
|
refs/heads/master
| 2021-01-02T18:59:15.320466
| 2019-07-24T12:58:09
| 2019-07-24T13:09:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,520
|
py
|
#
# Copyright 2019 Trend Micro and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import requests
from docker_image import reference
class _SlightlyImprovedSession(requests.Session):
"""
A _SlightlyImprovedSession keeps track of the base URL and any kwargs that
should be passed to requests.
When you make a `get` or `post` request, the URL you provide will be
`urljoin`'d with the base URL, so relative URLs will work pretty well.
Technically, this is totally broken, because relative URLs should be
evaluated relative to the resource that provided the URL, but for our
purposes this works perfectly and really simplifies life, so we're
going to ignore the pedants.
"""
def __init__(self, base, **kwargs):
super(_SlightlyImprovedSession, self).__init__()
self.base = base
self.kwargs = kwargs
def post(self, url, **kwargs):
for k in self.kwargs:
if k not in kwargs:
kwargs[k] = self.kwargs[k]
return super(_SlightlyImprovedSession, self).post(
requests.compat.urljoin(self.base, url),
**kwargs
)
def get(self, url, **kwargs):
for k in self.kwargs:
if k not in kwargs:
kwargs[k] = self.kwargs[k]
return super(_SlightlyImprovedSession, self).get(
requests.compat.urljoin(self.base, url),
**kwargs
)
def delete(self, url, **kwargs):
for k in self.kwargs:
if k not in kwargs:
kwargs[k] = self.kwargs[k]
return super(_SlightlyImprovedSession, self).delete(
requests.compat.urljoin(self.base, url),
**kwargs
)
class Smartcheck(_SlightlyImprovedSession):
"""
A Smartcheck object provides some convenience methods for performing actions
using the Deep Security Smart Check API.
"""
def __init__(self, base, user, password, verify=True, trace=False, **kwargs):
"""Authenticate with the service and return a session."""
if not base.startswith('http'):
base = 'https://' + base
if not verify:
import urllib3
urllib3.disable_warnings()
# Turn on trace logging if requested
if trace:
import logging
try:
import http.client as http_client
except ImportError:
import httplib as http_client
http_client.HTTPConnection.debuglevel = 1
logging.basicConfig()
logging.getLogger().setLevel(logging.DEBUG)
requests_log = logging.getLogger('requests.packages.urllib3')
requests_log.setLevel(logging.DEBUG)
requests_log.propagate = True
super(Smartcheck, self).__init__(base, verify=verify, **kwargs)
self.headers.update({'X-Api-Version': '2018-05-01'})
self.credentials = {
'user': {
'userID': user,
'password': password
}
}
def __enter__(self):
"""
Context manager method that's called when someone creates a
with Smartcheck(...) as session:
block. We'll start the session when the block is entered.
"""
# Create the session with the credentials that were provided in
# the constructor.
response = self.post('/api/sessions', json=self.credentials)
if not response.ok:
raise CreateSessionException(response)
# Parse the created session
session = response.json()
# Save the session href (needed for later refreshes (TODO)
# or to terminate the session when we're done).
self.session_href = session['href']
# Put the session token into the `Authorization` header so all
# requests in this session get authenticated and authorized.
self.headers.update({
'Authorization': f'Bearer {session["token"]}'
})
return self
def __exit__(self, exception_type, exception_value, exception_traceback):
"""
Context manager method that's called when someone exits a
with Smartcheck(...) as session:
block. We'll use this trigger to terminate the session.
"""
# Send `DELETE {href}` to terminate the session
self.delete(self.session_href)
# Don't eat any exception that might be coming...
return False
def _list(self, url, exception_kind, key, **kwargs):
"""
Generic "list anything in Deep Security Smart Check" method. Is a generator that
will yield the individual items being listed and retrieve additional pages of data
as needed until there are no more.
The way listing resources works in the Deep Security Smart Check API is as follows:
1. Perform `GET /api/things` to get the first page of things.
2. The response will have the structure `{ things: [...] }`
and if there is more data there will be a header `Link: <...>;rel="next"`
that will take you to the next page. If there is no more data,
the `Link rel=next` header won't be there.
This method is the generic implementation that all of the `list*` methods will call.
"""
# Get the first page of results
response = self.get(url, **kwargs)
while True:
# If the request failed, bail out -- we've got a specific exception type
# for each kind of thing we list, so raise the appropriate exception
if not response.ok:
raise exception_kind(response)
# All of the `list*` responses have the same structure:
# { [key]: [items], next: "cursor?" }
# Use the key to extract the items and yield them one at a time.
for item in response.json()[key]:
yield item
# We use the link in the `Link: rel='next'` header as it's easier
# than building a URL based on the cursor in the body. If there is
# no header then there's no more data.
if not 'next' in response.links:
break
# Extract the URL from the `Link: rel='next'` header.
url = response.links['next']['url']
# Get the next page of results, we'll see you again at the top of the loop
response = self.get(url)
def list_scans(self, image_ref=None, limit=None, **kwargs):
"""List scans that match an image reference."""
# If the caller provided any parameters (like `limit`), then extract them here
# as we've got more to add...
params = kwargs.get('params', {})
# Delete `params` from `kwargs` as we'll be passing them in explicitly
if 'params' in kwargs:
del(kwargs['params'])
if image_ref is not None:
# Parse the image reference into its component parts
image_ref = reference.Reference.parse(image_ref)
# The "hostname" part still needs to get split into the registry and repository
registry, repository = image_ref.split_hostname()
# Add query parameters to search on the image reference bits
params.update({
'registry': registry,
'repository': repository,
'tag': image_ref['tag'],
'digest': image_ref['digest'],
'exact': True,
})
if limit is not None:
params['limit'] = limit
# Yield the resulting scans
for scan in self._list('/api/scans', ListScansException, 'scans', params=params, **kwargs):
yield scan
def create_scan(self, image_ref, image_pull_auth=None, insecure_skip_registry_tls_verify=False):
"""Start a scan."""
# Parse the image reference into its component parts
parsed_ref = reference.Reference.parse(image_ref)
# The "hostname" part still needs to get split into the registry and repository
registry, repository = parsed_ref.split_hostname()
# Parse the provided image_pull_auth into an object if it's a string (assuming JSON).
# It will get serialized back into JSON in the request momentarily.
if isinstance(image_pull_auth, str):
image_pull_auth = json.loads(image_pull_auth)
# Send the request
response = self.post('/api/scans',
json={
'name': image_ref,
'source': {
'type': 'docker',
'registry': registry,
'repository': repository,
'tag': parsed_ref['tag'],
'digest': parsed_ref['digest'],
'credentials': image_pull_auth,
'insecureSkipVerify': insecure_skip_registry_tls_verify,
}
})
if not response.ok:
raise CreateScanException(response)
# Return the parsed scan object
return response.json()
def list_malware(self, scan):
"""List the malware found during a scan."""
# Scan results have malware identified per-layer to help folks identify where
# in their process they need to resolve the issue. This means we need to go
# through the layers in order to find any malware findings.
for layer in scan['details']['results']:
if 'malware' in layer:
for package in self._list(layer['malware'], ListMalwareException, 'malware'):
yield package
def list_content_findings(self, scan):
"""List the content findings found during a scan."""
# Scan results have content findings identified per-layer to help folks identify where
# in their process they need to resolve the issue. This means we need to go
# through the layers in order to find any content findings.
for layer in scan['details']['results']:
if 'contents' in layer:
for finding in self._list(layer['contents'], ListContentFindingsException, 'contents'):
yield finding
# Scan results have vulnerabilities identified per-layer (mostly) to help folks identify where
# in their process they need to resolve the issue. This means we need to go
# through the layers in order to find any vulnerability findings.
def list_vulnerable_packages(self, scan):
"""List the vulnerable packages found during a scan."""
for layer in scan['details']['results']:
if 'vulnerabilities' in layer:
for package in self._list(layer['vulnerabilities'], ListVulnerabilitiesException, 'vulnerabilities'):
yield package
# Scan results have checklist findings identified per-checklist and per-profile within
# each checklist. This means we need to go through each checklist and profile to find
# all the results.
def list_checklist_findings(self, scan):
"""List the checklist findings found during a scan."""
if 'checklists' in scan['details']:
for checklist in self._list(scan['details']['checklists'], ListChecklistsException, 'checklists'):
# Save details about the checklist so we can report it with the result
# without creating a new object for each result. This will help if the
# consumer wants to re-create the tree.
checklist_info = {
'id': checklist['id'],
'href': checklist['href'],
'title': checklist.get('title', None),
'version': checklist.get('version', None),
}
for profile in checklist['profiles']:
# Save details about the profile so we can report it with the result
# without creating a new object for each result. This will help if the
# consumer wants to re-create the tree.
profile_info = {
'id': profile['id'],
'title': profile.get('title', None),
}
for rule in self._list(profile['rules'], ListChecklistProfileRuleResultsException, 'rules'):
result = rule['result']
# "pass" and "not-applicable" aren't really findings... we may want a separate
# method to get all checklist results
if result == 'pass' or result == 'not-applicable':
continue
yield {
'checklist': checklist_info,
'profile': profile_info,
'result': rule
}
class CreateException(Exception):
def __init__(self, kind, response):
super(CreateException, self).__init__(
f'could not create {kind}: {response}'
)
self.response = response
class ListException(Exception):
def __init__(self, kind, response):
super(ListException, self).__init__(
f'*** WARNING: could not retrieve {kind}: {response}'
)
class CreateSessionException(CreateException):
def __init__(self, response):
super(CreateSessionException, self).__init__('session', response)
class CreateScanException(CreateException):
def __init__(self, response):
super(CreateScanException, self).__init__('scan', response)
class ListScansException(ListException):
def __init__(self, response):
super(ListScansException, self).__init__('scans', response)
class ListMalwareException(ListException):
def __init__(self, response):
super(ListMalwareException, self).__init__(
'malware', response
)
class ListVulnerabilitiesException(ListException):
def __init__(self, response):
super(ListVulnerabilitiesException, self).__init__(
'vulnerabilities', response
)
class ListContentFindingsException(ListException):
def __init__(self, response):
super(ListContentFindingsException, self).__init__(
'content findings', response
)
class ListChecklistsException(ListException):
def __init__(self, response):
super(ListChecklistsException, self).__init__(
'checklists', response
)
class ListChecklistProfileRuleResultsException(ListException):
def __init__(self, response):
super(ListChecklistProfileRuleResultsException, self).__init__(
'checklist profile rule results', response
)
|
[
"Geoff_Baskwill@trendmicro.com"
] |
Geoff_Baskwill@trendmicro.com
|
be45bcb1e674793f5bb4889a3cdcada07a013a45
|
5b71e2952f34dd3bb20148874d952fee06d31857
|
/app/mf/crud/migrations/0100_auto_20210206_1820.py
|
41f0df779972b165576ab9f2962e9261c1ec7a13
|
[] |
no_license
|
isela1998/facebook
|
a937917cddb9ef043dd6014efc44d59d034102b1
|
a0f2f146eb602b45c951995a5cb44409426250c5
|
refs/heads/master
| 2023-07-18T02:14:50.293774
| 2021-08-28T03:26:06
| 2021-08-28T03:26:06
| 400,613,743
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 432
|
py
|
# Generated by Django 3.1.1 on 2021-02-06 22:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('crud', '0099_debts_rate'),
]
operations = [
migrations.AlterField(
model_name='debts',
name='rate',
field=models.DecimalField(decimal_places=2, default=0.0, max_digits=30, verbose_name='Tasa(Bs.)'),
),
]
|
[
"infantefernandezisela@gmail.com"
] |
infantefernandezisela@gmail.com
|
ea81a3f2769fe2186891c4edce86d5f3c483d4e5
|
940622a48cc8711a39dd7f36122bae1e25ee2fcc
|
/QuestionTime/QuestionTime/urls.py
|
a68ebcf6bfba297eff05f5c23e941b75964ca7f5
|
[] |
no_license
|
merveealpay/django-vue-question-app
|
144d1f9b49cd1f0cbd91820c2c11cc42ff95a09d
|
f12c88bdbfcac685b7098145370e13be935c8d8f
|
refs/heads/main
| 2023-02-05T12:58:28.651036
| 2020-12-27T18:05:35
| 2020-12-27T18:05:35
| 319,586,207
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,738
|
py
|
"""QuestionTime URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include, path, re_path
from django_registration.backends.one_step.views import RegistrationView
#look at django-registration documentation!!!
from core.views import IndexTemplateView
from users.forms import CustomUserForm
urlpatterns = [
path('admin/', admin.site.urls),
path("accounts/register/",
RegistrationView.as_view(
form_class=CustomUserForm,
success_url="/",
), name="django_registration_register"),
path("accounts/",
include("django_registration.backends.one_step.urls")),
path("accounts/",
include("django.contrib.auth.urls")),
path("api/",
include("users.api.urls")),
path("api/",
include("questions.api.urls")),
path("api-auth/",
include("rest_framework.urls")),
path("api/rest-auth/",
include("rest_auth.urls")),
path("api/rest-auth/registration/",
include("rest_auth.registration.urls")),
re_path(r"^.*$", IndexTemplateView.as_view(), name="entry-point")
]
|
[
"merveealpay@gmail.com"
] |
merveealpay@gmail.com
|
2e54441258e9589bbbcf8cfd910724f80e61d746
|
966d68245763f12d950efbc39928cbb14655b9d1
|
/backend/textManipulation/summarization.py
|
05f35fdc3ae8da0ba8c1c697fe038840a8d6ad96
|
[] |
no_license
|
wcooper90/ivyhacks2020
|
ebd1352465eb364d802f7673af06ffa407758f1f
|
684d964a5a352cd78faf11df91c3b1bc08355ee8
|
refs/heads/main
| 2022-12-25T00:14:34.213824
| 2020-10-04T17:57:53
| 2020-10-04T17:57:53
| 301,185,526
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,453
|
py
|
from __future__ import absolute_import
from __future__ import division, print_function, unicode_literals
from sumy.parsers.html import HtmlParser
from sumy.parsers.plaintext import PlaintextParser
from sumy.nlp.tokenizers import Tokenizer
from sumy.summarizers.lsa import LsaSummarizer as Summarizer
from sumy.nlp.stemmers import Stemmer
from sumy.utils import get_stop_words
from sumy.summarizers.luhn import LuhnSummarizer
from sumy.summarizers.edmundson import EdmundsonSummarizer
from sumy.summarizers.text_rank import TextRankSummarizer
from sumy.summarizers.lex_rank import LexRankSummarizer
import nltk
nltk.download('punkt')
from UserInputs import UserInputs
from inputs.article_scraper import url_text_conversion, collect_text
# default summarization for text
def summarize_text(text, num_sentences):
LANGUAGE = "english"
# parser = PlaintextParser.from_file("document.txt", Tokenizer(LANGUAGE))
parser = PlaintextParser.from_string(text, Tokenizer(LANGUAGE))
stemmer = Stemmer(LANGUAGE)
# use text rank as default
summarizer = TextRankSummarizer(stemmer)
summarizer.stop_words = get_stop_words(LANGUAGE)
result = ''
counter = 0
for sentence in summarizer(parser.document, num_sentences):
counter += 1
# if counter > num_sentences / 2:
# break
# print(sentence)
result += ' ' + str(sentence)
title = 'to be implemented'
return result, title
# adjust to summarize speech better
def summarize_speech_transcription(text, num_sentences):
body = text
model = Summarizer()
result = model(body, num_sentences=num_sentences)
result = ''.join(result)
title = 'to be implemented'
return result, title
# summarization for an article
def summarize_article_text(url, num_sentences):
LANGUAGE = "english"
parser = HtmlParser.from_url(url, Tokenizer(LANGUAGE))
# or for plain text files
# parser = PlaintextParser.from_file("document.txt", Tokenizer(LANGUAGE))
# parser = PlaintextParser.from_string("Check this out.", Tokenizer(LANGUAGE))
stemmer = Stemmer(LANGUAGE)
summarizer = Summarizer(stemmer)
summarizer.stop_words = get_stop_words(LANGUAGE)
result = ''
for sentence in summarizer(parser.document, num_sentences):
result += str(sentence)
title = 'to be implemented'
return result, title
|
[
"wcooperhockey@gmail.com"
] |
wcooperhockey@gmail.com
|
fb535040a409105773f5e30f68bd636c8b3931a2
|
307d6435a8da159eede9c233dc14bce29d8af11f
|
/src/fewer_than_15_siblings.py
|
0450dcc4a5c0f7321f333cc894452d9854905de2
|
[] |
no_license
|
Ahsan45/SSW-CS555
|
7d82d0f039bfb31cbb775718debfde02615a8ce1
|
fc4808884a99d48ff29f122d67c197061102c57c
|
refs/heads/master
| 2021-01-25T06:30:25.675681
| 2017-08-03T00:56:58
| 2017-08-03T00:56:58
| 93,584,540
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 282
|
py
|
#Erik Lim
#SSW 555 Agile Methods for Software Development
'''Module for checking if mthere are fewer than 15 siblings in a family'''
from utils import date_first
def fewer_than_15_siblings(fam):
if 'CHIL' in fam:
return len(fam['CHIL']) < 15
return True
|
[
"noreply@github.com"
] |
noreply@github.com
|
a13fe0f96eed0b4b55663eed124b9bba9ead6cec
|
51ea0825d013e4205a74e288a95cec86b379e6ef
|
/augmentations.py
|
01b80e72b84a38d252970a0cf0355501a1d6c22c
|
[] |
no_license
|
lucaslu1987/faceboxes
|
79d6e1f4d34087825cf81d76a4401e0bc40e77e1
|
7d3a459ad7e98c791ce9ad7b9058329f0663f4e4
|
refs/heads/master
| 2020-03-29T22:06:24.612806
| 2018-08-24T01:25:31
| 2018-08-24T01:25:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,384
|
py
|
import torch
from torchvision import transforms
import cv2
import numpy as np
import types
from numpy import random
class Compose(object):
"""Composes several augmentations together.
Args:
transforms (List[Transform]): list of transforms to compose.
Example:
>>> augmentations.Compose([
>>> transforms.CenterCrop(10),
>>> transforms.ToTensor(),
>>> ])
"""
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, img, boxes=None, labels=None):
for t in self.transforms:
img, boxes, labels = t(img, boxes, labels)
return img, boxes, labels
class ConvertFromInts(object):
def __call__(self, image, boxes=None, labels=None):
return image.astype(np.float32), boxes, labels
class RandomSaturation(object):
def __init__(self, lower=0.5, upper=1.5):
self.lower = lower
self.upper = upper
assert self.upper >= self.lower, "contrast upper must be >= lower."
assert self.lower >= 0, "contrast lower must be non-negative."
def __call__(self, image, boxes=None, labels=None):
if random.randint(2):
image[:, :, 1] *= random.uniform(self.lower, self.upper)
return image, boxes, labels
class RandomHue(object):
def __init__(self, delta=18.0):
assert delta >= 0.0 and delta <= 360.0
self.delta = delta
def __call__(self, image, boxes=None, labels=None):
if random.randint(2):
image[:, :, 0] += random.uniform(-self.delta, self.delta)
image[:, :, 0][image[:, :, 0] > 360.0] -= 360.0
image[:, :, 0][image[:, :, 0] < 0.0] += 360.0
return image, boxes, labels
class RandomLightingNoise(object):
def __init__(self):
self.perms = ((0, 1, 2), (0, 2, 1),
(1, 0, 2), (1, 2, 0),
(2, 0, 1), (2, 1, 0))
def __call__(self, image, boxes=None, labels=None):
if random.randint(2):
swap = self.perms[random.randint(len(self.perms))]
shuffle = SwapChannels(swap) # shuffle channels
image = shuffle(image)
return image, boxes, labels
class ConvertColor(object):
def __init__(self, current='BGR', transform='HSV'):
self.transform = transform
self.current = current
def __call__(self, image, boxes=None, labels=None):
if self.current == 'BGR' and self.transform == 'HSV':
image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
elif self.current == 'HSV' and self.transform == 'BGR':
image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)
else:
raise NotImplementedError
return image, boxes, labels
class RandomContrast(object):
def __init__(self, lower=0.5, upper=1.5):
self.lower = lower
self.upper = upper
assert self.upper >= self.lower, "contrast upper must be >= lower."
assert self.lower >= 0, "contrast lower must be non-negative."
# expects float image
def __call__(self, image, boxes=None, labels=None):
if random.randint(2):
alpha = random.uniform(self.lower, self.upper)
image *= alpha
return image, boxes, labels
class RandomBrightness(object):
def __init__(self, delta=32):
assert delta >= 0.0
assert delta <= 255.0
self.delta = delta
def __call__(self, image, boxes=None, labels=None):
if random.randint(2):
delta = random.uniform(-self.delta, self.delta)
image += delta
return image, boxes, labels
class SwapChannels(object):
"""Transforms a tensorized image by swapping the channels in the order
specified in the swap tuple.
Args:
swaps (int triple): final order of channels
eg: (2, 1, 0)
"""
def __init__(self, swaps):
self.swaps = swaps
def __call__(self, image):
"""
Args:
image (Tensor): image tensor to be transformed
Return:
a tensor with channels swapped according to swap
"""
# if torch.is_tensor(image):
# image = image.data.cpu().numpy()
# else:
# image = np.array(image)
image = image[:, :, self.swaps]
return image
class PhotometricDistort(object):
def __init__(self):
self.pd = [
RandomContrast(),
ConvertColor(transform='HSV'),
RandomSaturation(),
RandomHue(),
ConvertColor(current='HSV', transform='BGR'),
RandomContrast()
]
self.rand_brightness = RandomBrightness()
self.rand_light_noise = RandomLightingNoise()
def __call__(self, image, boxes, labels):
im = image.copy()
im, boxes, labels = self.rand_brightness(im, boxes, labels)
if random.randint(2):
distort = Compose(self.pd[:-1])
else:
distort = Compose(self.pd[1:])
im, boxes, labels = distort(im, boxes, labels)
return self.rand_light_noise(im, boxes, labels)
class SSDAugmentation(object):
def __init__(self):
self.augment = Compose([
ConvertFromInts(),
PhotometricDistort(),
])
def __call__(self, img, boxes, labels):
return self.augment(img, boxes, labels)
|
[
"609632889@qq.com"
] |
609632889@qq.com
|
2a6df882b37651ba09c0543b3c1661bad7bf365e
|
fe9f4a9c75ec60cd4245b15164e27161567b43ff
|
/week3/2-Resolve-with-Functions/prime_digit.py
|
8506bc8d70591dc4442261e9465aecd2d51d7144
|
[] |
no_license
|
manuelavelinova/Programming0
|
5dc8273edc5c4302de37d48226e1ee7b9a062959
|
56132232ea4321f517af3dd6f0139ee35f00ef15
|
refs/heads/master
| 2016-09-05T13:40:11.416849
| 2015-03-22T10:02:11
| 2015-03-22T10:02:11
| 31,215,081
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 680
|
py
|
def is_prime(n):
start = 2
isPrime = True
while start < n:
if n % start == 0:
isPrime = False
if n == 1:
isPrime = False
start += 1
if isPrime:
return True
else:
return False
def to_digits(n):
digits = []
while n != 0:
digit = n % 10
digits += [digit]
n = n // 10
return digits
n = input("Enter n: ")
n = int(n)
result = to_digits(n)
primeDigit = False
for res in result:
if is_prime(res):
primeDigit = True
break
if primeDigit:
print("There are prime digits")
else:
print("There are no prime digits")
|
[
"manuelavelinova@gmail.com"
] |
manuelavelinova@gmail.com
|
f22af6b6113dc3091f9553766e30977fce309d38
|
db5264994305e8c926f89cb456f33bd3a4d64f76
|
/Sklep zielarski/account/urls.py
|
8f5e4dae0dd33bbbd640d540c02340a153233e68
|
[] |
no_license
|
marcinpelszyk/Django
|
7842e20d5e8b213c4cd42c421c1db9ab7d5f01d5
|
aff2b9bd20e978a22a4a98994bf8424892d3c82f
|
refs/heads/main
| 2023-05-01T19:20:37.267010
| 2021-05-18T17:51:53
| 2021-05-18T17:51:53
| 356,532,628
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,026
|
py
|
from django.contrib.auth import views as auth_views
from django.urls import path
from django.views.generic import TemplateView
from . import views
from .forms import PwdResetConfirmForm, PwdResetForm, UserLoginForm
app_name = 'account'
urlpatterns = [
path('login/', auth_views.LoginView.as_view(template_name='account/login.html',
form_class=UserLoginForm), name='login'),
path('logout/', auth_views.LogoutView.as_view(next_page='/account/login/'), name='logout'),
path('register/', views.account_register, name='register'),
path('activate/<slug:uidb64>/<slug:token>)/', views.account_activate, name='activate'),
# Reset password
path('password_reset/', auth_views.PasswordResetView.as_view(template_name="account/password_reset/password_reset_form.html",
success_url='password_reset_email_confirm',
email_template_name='account/password_reset/password_reset_email.html',
form_class=PwdResetForm), name='pwdreset'),
path('password_reset_confirm/<uidb64>/<token>', auth_views.PasswordResetConfirmView.as_view(template_name='account/password_reset/password_reset_confirm.html',
success_url='password_reset_complete/',
form_class=PwdResetConfirmForm), name="password_reset_confirm"),
path('password_reset/password_reset_email_confirm/',
TemplateView.as_view(template_name="account/password_reset/reset_status.html"), name='password_reset_done'),
path('password_reset_confirm/MTU/password_reset_complete/',
TemplateView.as_view(template_name="account/password_reset/reset_status.html"), name='password_reset_complete'),
# User dashboard
path('dashboard/', views.dashboard, name='dashboard'),
path('profile/edit/', views.edit_details, name='edit_details'),
path('profile/delete_user/', views.delete_user, name='delete_user'),
path('profile/delete_confirm/', TemplateView.as_view(template_name="account/dashboard/delete_confirm.html"), name='delete_confirmation'),
# Addresses
path('addresses/', views.view_address, name='addresses'),
path("add_address/", views.add_address, name="add_address"),
path("addresses/edit/<slug:id>/", views.edit_address, name="edit_address"),
path("addresses/delete/<slug:id>/", views.delete_address, name="delete_address"),
path("addresses/set_default/<slug:id>/", views.set_default, name="set_default"),
path("user_orders/", views.user_orders, name="user_orders"),
#Favorite list
path('favoritelist/', views.favoritelist, name='favoritelist'),
path('favoritelist/add_to_favoritelist/<int:id>', views.add_to_favoritelist, name='user_favorite'),
]
|
[
"marcin.pelszyk90@gmail.com"
] |
marcin.pelszyk90@gmail.com
|
c1a174860f449f624c4ea77b9f9327c3ae268a44
|
3f90cf7ddbc7afb9c7b8cf26ffee7f26f75d995d
|
/setup.py
|
e1037f1deeb0d503439c3bbc94f48aca0a855761
|
[
"MIT"
] |
permissive
|
Znigneering/CSCI-3154
|
0c0f9383dc9f0a42c6f653c3fb450410a4b1a642
|
bc318efc73d2a80025b98f5b3e4f7e4819e952e4
|
refs/heads/master
| 2022-12-24T17:49:17.711622
| 2018-11-27T18:18:28
| 2018-11-27T18:18:28
| 158,028,171
| 0
| 0
|
MIT
| 2022-12-10T08:41:36
| 2018-11-17T21:54:45
|
C
|
UTF-8
|
Python
| false
| false
| 345
|
py
|
from setuptools import setup
setup(
name='PyTPG',
version='0.8',
packages=['tpg'],
license='MIT',
description='Python implementation of Tangled Program Graphs.',
long_description=open('README.md').read(),
author='Ryan Amaral',
author_email='ryan_amaral@live.com',
url='https://github.com/Ryan-Amaral/PyTPG')
|
[
"zh676054@dal.ca"
] |
zh676054@dal.ca
|
a55f59281307acfcc9afc41d05c3550c1e1f0745
|
f77e219f6ab6794c8c52bcb06a936da02b381398
|
/libs/rl_salk/agents/sarsa_learner.py
|
25cd9e3adbd157433c7fa1fd7d9d318a67fa587f
|
[
"MIT"
] |
permissive
|
rl-salk/rl-salk
|
96a5608e66134f8d5d305d30769d15f0ea372aad
|
2e63020fc76c81f863052ccce749353644e2fc9e
|
refs/heads/master
| 2020-06-24T05:21:02.937984
| 2019-08-08T02:14:59
| 2019-08-08T02:14:59
| 198,861,132
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 255
|
py
|
from rl_salk.agents.td_learner import TDLearner
class SarsaLearner(TDLearner):
def learn(self, prev_state, action, state, reward, done):
super().learn(prev_state, action, state, reward, done,
target_policy='behavior')
|
[
"daniel.jaffe.butler@gmail.com"
] |
daniel.jaffe.butler@gmail.com
|
3a872299b9b73a04afddd47ddc4dda9480c8f34e
|
f9c12c1c04b51ec62d7d671c02eb471a0afaccda
|
/tugas1/server3.py
|
ae2f0a360bf436a78f36b210dbfe282764f61578
|
[] |
no_license
|
bastianf19/progjar-b-its-2020
|
dc92dbeb980f2c2391232626e4a65941978530c2
|
95405279b8de26c5d89cc39f0b360c7c0a78fb2a
|
refs/heads/master
| 2020-12-27T23:03:50.153132
| 2020-04-24T09:53:08
| 2020-04-24T09:53:08
| 238,096,160
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 747
|
py
|
import sys
import socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_address = ('localhost', 31002)
print(sys.stderr, 'starting up on %s port %s' % server_address)
sock.bind(server_address)
sock.listen(1)
while True:
print(sys.stderr, 'waiting for a connection')
connection, client_address = sock.accept()
print(sys.stderr, 'connection from', client_address)
while True:
data = connection.recv(64).decode()
print(sys.stderr, 'received "%s"' % data)
if data:
print(sys.stderr, 'sending data back to the client')
connection.sendall(data.encode())
else:
print(sys.stderr, 'no more data from', client_address)
break
connection.close()
|
[
"bastian.farandy@gmail.com"
] |
bastian.farandy@gmail.com
|
c3a204b93156cbcd8e27787d9c7665ae8196a3c3
|
ebd5c4632bb5f85c9e3311fd70f6f1bf92fae53f
|
/Sourcem8/pirates/instance/DistributedTeleportMgr.py
|
b9c0736e2218759b8f8a6859c8f75acb8541aa1b
|
[] |
no_license
|
BrandonAlex/Pirates-Online-Retribution
|
7f881a64ec74e595aaf62e78a39375d2d51f4d2e
|
980b7448f798e255eecfb6bd2ebb67b299b27dd7
|
refs/heads/master
| 2020-04-02T14:22:28.626453
| 2018-10-24T15:33:17
| 2018-10-24T15:33:17
| 154,521,816
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 31,217
|
py
|
from pandac.PandaModules import *
from direct.task import Task
from direct.distributed import DistributedObject
from pirates.piratesbase import PiratesGlobals
from pirates.world import ZoneLOD
from direct.showbase.PythonUtil import report
from otp.otpbase import OTPLocalizer
from pirates.piratesbase import PLocalizer
from pirates.piratesgui import PDialog
from otp.otpgui import OTPDialog
from pirates.quest import QuestDB, QuestLadderDB
'''
Congratulations, Disney! You've managed to write this very gay code.
DistributedTeleportMgr is the gayest thing ever existed.
Do not try to understand this shit, I've already done it for you.
By the way it gave me cancer and aids.
'''
class DistributedTeleportMgr(DistributedObject.DistributedObject):
notify = directNotify.newCategory('DistributedTeleportMgr')
def __init__(self, cr):
DistributedObject.DistributedObject.__init__(self, cr)
self.instanceType = None
self.fromInstanceType = None
self.lastLocalTeleportLoc = None
self.teleportQueryId = None
self.inInstanceType = PiratesGlobals.INSTANCE_MAIN
self.instanceName = 'mainWorld'
self.doneCallback = None
self.startedCallback = None
self.oldWorld = None
self.requestData = None
self.localTeleportId = None
self.localTeleportingObj = None
self.localTeleportCallback = None
self.localTeleportDestPos = None
self.popupDialog = None
self.doEffect = False
self.stowawayEffect = False
self.miniLog = None
self.teleportQueue = []
self.teleportQueueProcess = None
def generate(self):
DistributedObject.DistributedObject.generate(self)
base.cr.teleportMgr = self
self.localTeleportingObj = localAvatar
self.__pendingGoNow = [True]
localAvatar.readyToTeleport(self)
self.accept('localAvTeleportFinishedRequest', self.localAvTeleportFinishedRequest)
def requestLocalTeleport(self, locationName = None):
self.requestData = ((), {
'locationName': locationName })
localAvatar.confirmTeleport(self.localTeleportConfirmation, feedback = True)
def localTeleportConfirmation(self, confirmed):
if confirmed:
requestData = self.requestData
self.localTeleport(*requestData[0], **requestData[1])
locationUid = requestData['locationUid']
base.cr.loadingScreen.showTarget(locationUid)
base.cr.loadingScreen.showHint(locationUid)
self.requestData = None
def localTeleportEffect(self, teleportPosHpr, parent=None, smooth=False, goNow=False):
if localAvatar.testTeleportFlag(PiratesGlobals.TFInWater) or goNow:
self.localTeleportPos(teleportPosHpr, parent, smooth)
else:
localAvatar.b_setGameState('TeleportOut')
taskMgr.doMethodLater(5, self.localTeleportPos, self.uniqueName('localTeleportPos'), extraArgs = [
teleportPosHpr,
parent,
smooth])
def localTeleportPos(self, teleportPosHpr, parent = None, smooth = False):
localAvatar.b_setGameState('TeleportOut', [
None,
False])
currParent = localAvatar.getParentObj()
if isinstance(currParent, ZoneLOD.ZoneLOD):
localAvatar.leaveZoneLOD(currParent)
if parent == None:
parent = self.cr.activeWorld.worldGrid
messenger.send('islandPlayerBarrier', [
0])
teleportZone = parent.getZoneFromXYZ(teleportPosHpr[:3])
localAvatar.reparentTo(parent)
localAvatar.setPosHpr(*teleportPosHpr)
localAvatar.spawnWiggle()
localAvatar.b_setLocation(parent.getDoId(), teleportZone)
parent.addObjectToGrid(localAvatar)
parent.setPlayerBarrier(1)
currParent = localAvatar.getParentObj()
if isinstance(currParent, ZoneLOD.ZoneLOD):
localAvatar.enterZoneLOD(currParent)
parent.processVisibility(None)
if base.cr._completeEventCount.num > 0:
self.acceptOnce(base.cr.getAllInterestsCompleteEvent(), localAvatar.b_setGameState, extraArgs = [
'TeleportIn'])
else:
localAvatar.b_setGameState('TeleportIn')
def localTeleport(self, locationName=None, goNow=False, locationUid=None):
if locationName and locationUid:
locationName = None
for currIsle in base.cr.doId2do.values():
if not (hasattr(currIsle, 'getName') and hasattr(currIsle, 'getUniqueId')):
continue
if currIsle.getName() == locationName:
break
elif currIsle.getUniqueId() == locationUid:
break
else:
self.notify.error('not found: (%s, %s)' % (locationName, locationUid))
currInteractive = base.cr.interactionMgr.getCurrentInteractive()
if currInteractive:
currInteractive.requestExit()
questStateSpawnIdx = QuestLadderDB.getPreferredAreaSpawnNode(currIsle.getUniqueId(), localAvatar)
teleportPos = base.cr.activeWorld.getPlayerSpawnPt(currIsle.getDoId(), index = questStateSpawnIdx)
if teleportPos == None:
teleportPos = (0, 0, 0, 0, 0, 0)
self.localTeleportEffect(teleportPos, currIsle, goNow=goNow)
self.lastLocalTeleportLoc = currIsle.getDoId()
def requestTeleportToFishingShip(self):
print 'requestTeleportToFishingShip'
self.cr.teleportMgr.sendUpdate('requestTeleportToFishingShip')
def teleportToFishingShipResponse(self, shipId):
print 'teleportToFishingShipResponse'
print 'shipId=', shipId
self.cr.teleportMgr.localTeleportToId(shipId, localAvatar, showLoadingScreen = False)
def localTeleportToId(self, locationId, teleportingObj = None, destPos = None, callback = None, objectLocation = None, showLoadingScreen = True):
if showLoadingScreen:
self.cr.loadingScreen.show(waitForLocation = True)
if locationId in base.cr.doId2do and base.cr.doId2do[locationId].dclass.getName() == 'DistributedOceanGrid':
logBlock(1, 'localTeleportToId(%s,%s,%s,%s,%s,%s) to ocean grid\n\n' % (locationId, teleportingObj, destPos, callback, objectLocation, showLoadingScreen) + str(StackTrace()))
self.localTeleportId = locationId
self.localTeleportingObj = teleportingObj
self.localTeleportCallback = callback
self.localTeleportDestPos = destPos
destObj = self.cr.doId2do.get(locationId)
if destObj:
self._localTeleportToIdInterestComplete()
self.notify.debug('destination object %s found, teleporting to there now' % locationId)
elif objectLocation:
self._localTeleportToIdResponse(objectLocation[0], objectLocation[1])
self.notify.debug('destination object %s not found, but location %s given' % (locationId, objectLocation))
else:
self.sendUpdate('requestTargetsLocation', [
int(locationId)])
self.notify.debug('destination object %s not found, querying AI for its location' % locationId)
def _localTeleportToIdResponse(self, objectId, parentId, zoneId):
self.localTeleportId = objectId
if parentId != 0 and zoneId != 0:
if self.cr.doId2do.get(parentId):
localAvatar.setInterest(parentId, zoneId, [
'localTeleportToId'], 'localTeleportToIdInterestAddComplete')
self.acceptOnce('localTeleportToIdInterestAddComplete', self._localTeleportToIdInterestComplete)
self.notify.debug('parent %s of destination object found, setting up interest' % parentId)
else:
self.notify.warning('parent %s of destination object not found, teleport failure' % parentId)
else:
self.failTeleport(parentId, zoneId)
def failTeleport(self, parentId = None, zoneId = None, message = PLocalizer.TeleportToPlayerFailMessage):
self.sendUpdate('requestClearPreventDamage')
fallbackAreaId = localAvatar.getReturnLocation()
if fallbackAreaId != '':
areaDoId = base.cr.uidMgr.getDoId(fallbackAreaId)
self.clearAmInTeleport()
if areaDoId:
destPos = base.cr.activeWorld.getPlayerSpawnPt(areaDoId)
if destPos and self.localTeleportingObj:
self.localTeleportToId(areaDoId, self.localTeleportingObj, destPos)
else:
self.initiateTeleport(PiratesGlobals.INSTANCE_MAIN, 'mainWorld', doEffect = False)
else:
self.initiateTeleport(PiratesGlobals.INSTANCE_MAIN, 'mainWorld', doEffect = False)
self._DistributedTeleportMgr__createDialog(message)
else:
self.notify.warning(" teleport to object (%s %s) AND 'return location' %s failed" % (parentId, zoneId, fallbackAreaId))
def _DistributedTeleportMgr__cleanupDialog(self, value = None):
if self.popupDialog:
self.popupDialog.destroy()
del self.popupDialog
self.popupDialog = None
def _DistributedTeleportMgr__createDialog(self, message):
if message:
popupDialogText = message
if self.popupDialog:
self._DistributedTeleportMgr__cleanupDialog()
self.popupDialog = PDialog.PDialog(text = popupDialogText, style = OTPDialog.Acknowledge, command = self._DistributedTeleportMgr__cleanupDialog)
def _localTeleportToIdInterestComplete(self):
teleportToObj = self.cr.doId2do.get(self.localTeleportId)
if not teleportToObj:
self.sendUpdate('requestTargetsLocation', [
self.localTeleportId])
return None
curParent = localAvatar.getParentObj()
parentIsZoneLOD = isinstance(curParent, ZoneLOD.ZoneLOD)
if parentIsZoneLOD:
localAvatar.leaveZoneLOD(curParent)
try:
isAShip = teleportToObj._isShip()
except AttributeError:
isAShip = False
if isAShip:
if not teleportToObj.isSailable():
self.failTeleport(0, 0, PLocalizer.TeleportToGoneShipFailMessage)
return None
elif teleportToObj.gameFSM.getCurrentOrNextState() in ('InBoardingPosition', 'OtherShipBoarded'):
self.failTeleport(0, 0, PLocalizer.TeleportToBoardingShipFailMessage)
return None
teleportToObj.setZoneLevel(3)
teleportToObj.registerMainBuiltFunction(localAvatar.placeOnShip, [
teleportToObj])
teleportToObj.registerBuildCompleteFunction(teleportToObj.enableOnDeckInteractions)
teleportToObj.registerBuildCompleteFunction(self._localTeleportToIdDone)
base.setLocationCode('Ship')
else:
self.__pendingGoNow.append(False)
goNow = self.__pendingGoNow.pop(0)
self.localTeleport(locationUid=teleportToObj.getUniqueId(), goNow=goNow)
def _localTeleportToIdDone(self):
self.cr.loadingScreen.scheduleHide(base.cr.getAllInterestsCompleteEvent())
curParent = localAvatar.getParentObj()
if isinstance(curParent, ZoneLOD.ZoneLOD):
localAvatar.enterZoneLOD(curParent)
if self.localTeleportCallback:
self.localTeleportCallback()
self.localTeleportId = None
self.localTeleportingObj = None
self.localTeleportCallback = None
self.localTeleportDestPos = None
localAvatar.guiMgr.socialPanel.updateAll()
def disable(self):
DistributedObject.DistributedObject.disable(self)
messenger.send('destroyCrewMatchInvite')
taskMgr.removeTasksMatching('teleportRemoveInterest')
taskMgr.removeTasksMatching('teleportAddInterest')
taskMgr.removeTasksMatching(self.uniqueName('localTeleportPos'))
taskMgr.removeTasksMatching(self.uniqueName('fadeDone'))
self.requestData = None
self.ignoreAll()
if base.cr.teleportMgr == self:
base.cr.teleportMgr = None
requestData = self.requestData
self.requestData = None
if self.teleportQueueProcess:
taskMgr.remove(self.teleportQueueProcess)
def requestTeleport(self, instanceType, instanceName, shardId = 0, locationUid = '', instanceDoId = 0, doneCallback = None, startedCallback = None, gameType = -1, friendDoId = 0, friendAreaDoId = 0, doEffect = True):
self.requestData = ((instanceType, instanceName), {
'shardId': shardId,
'locationUid': locationUid,
'instanceDoId': instanceDoId,
'doneCallback': doneCallback,
'startedCallback': startedCallback,
'gameType': gameType,
'friendDoId': friendDoId,
'friendAreaDoId': friendAreaDoId,
'doEffect': doEffect })
localAvatar.confirmTeleport(self.teleportConfirmation, feedback = True)
def teleportConfirmation(self, confirmed):
if confirmed:
requestData = self.requestData
self.initiateTeleport(*requestData[0], **requestData[0])
locationUid = requestData[1]['locationUid']
base.cr.loadingScreen.showTarget(locationUid)
base.cr.loadingScreen.showHint(locationUid)
self.requestData = None
def requestTeleportToAvatar(self, shardId, instanceDoId, avatarId, avatarParentId):
self.requestTeleport(PiratesGlobals.INSTANCE_MAIN, '', shardId, '', instanceDoId, friendDoId = avatarId, friendAreaDoId = avatarParentId)
def teleportToObjectResp(self, shardId, instanceId, objId, parentId):
self.requestTeleport(PiratesGlobals.INSTANCE_MAIN, '', shardId, '', instanceId, friendDoId = objId, friendAreaDoId = parentId)
def requestTeleportToShip(self, shardId, instanceDoId, shipId):
self.initiateTeleport(PiratesGlobals.INSTANCE_MAIN, '', shardId, '', instanceDoId, friendDoId = 0, friendAreaDoId = shipId)
def requestTeleportToIsland(self, islandUid):
def teleportConfirmation(confirmed, islandUid = islandUid):
self.islandTeleportConfirmation(confirmed, islandUid)
localAvatar.setTeleportFlag(PiratesGlobals.TFNoIslandToken, localAvatar.confirmIslandTokenTeleport, [
islandUid])
localAvatar.setTeleportFlag(PiratesGlobals.TFSameArea, localAvatar.confirmNotSameAreaTeleport, [
islandUid])
localAvatar.confirmTeleport(teleportConfirmation, feedback = True)
localAvatar.clearTeleportFlag(PiratesGlobals.TFNoIslandToken)
localAvatar.clearTeleportFlag(PiratesGlobals.TFSameArea)
def islandTeleportConfirmation(self, confirmed, islandUid):
if confirmed:
islandDoId = self.cr.uidMgr.getDoId(islandUid)
island = self.cr.getDo(islandDoId)
if island and island.getParentObj() is self.cr.activeWorld:
self.localTeleport(locationName = island.getName())
else:
self.sendUpdate('requestTeleportToIsland', [
islandUid])
base.cr.loadingScreen.showTarget(islandUid)
base.cr.loadingScreen.showHint(islandUid)
def teleportToIslandResponse(self, instanceDoId, islandDoId):
if instanceDoId and islandDoId:
self.initiateTeleport(PiratesGlobals.INSTANCE_MAIN, '', self.cr.distributedDistrict.doId, '', instanceDoId, friendAreaDoId = islandDoId)
def stowawayTeleportResponse(self, instanceDoId, islandDoId):
if instanceDoId and islandDoId:
self.initiateTeleport(PiratesGlobals.INSTANCE_MAIN, '', self.cr.distributedDistrict.doId, '', instanceDoId, friendAreaDoId = islandDoId, doEffect = False, stowawayEffect = True)
base.cr.loadingScreen.showTarget(base.cr.doId2do[islandDoId].getUniqueId())
def queryAvatarForTeleport(self, avId):
self.setTeleportQueryId(avId)
def teleportConfirmation(confirmed, avId = avId):
if confirmed:
handle = self.cr.identifyAvatar(avId)
if handle:
shardId = self.cr.distributedDistrict.doId
if not localAvatar.getBandId():
pass
(bandMgr, bandId) = (0, 0)
guildId = localAvatar.getGuildId()
handle.sendTeleportQuery(avId, bandMgr, bandId, guildId, shardId)
localAvatar.confirmTeleport(teleportConfirmation, feedback = True)
def handleAvatarTeleportQuery(self, requesterId, requesterBandMgrId, requesterBandId, requesterGuildId, requesterShardId):
handle = self.cr.identifyAvatar(requesterId)
if not handle:
return None
if self.cr.identifyFriend(requesterId):
if requesterId in localAvatar.ignoreList or self.cr.avatarFriendsManager.checkIgnored(requesterId):
handle.sendTeleportResponse(PiratesGlobals.encodeTeleportFlag(PiratesGlobals.TFIgnore), 0, 0, 0, sendToId = requesterId)
return None
avName = handle.getName()
def confirmed(canTeleportTo, avId, failedFlag, avName = avName):
if canTeleportTo:
if self.cr.getActiveWorld() and self.cr.distributedDistrict and localAvatar.getParentObj():
handle.sendTeleportResponse(PiratesGlobals.TAAvailable, self.cr.distributedDistrict.doId, self.cr.getActiveWorld().doId, localAvatar.getParentObj().doId, sendToId = requesterId)
else:
handle.sendTeleportResponse(PiratesGlobals.encodeTeleportFlag(PiratesGlobals.TFUnavailable), 0, 0, 0, sendToId = requesterId)
elif localAvatar.failedTeleportMessageOk(requesterId):
localAvatar.setSystemMessage(requesterId, OTPLocalizer.WhisperFailedVisit % avName)
handle.sendTeleportResponse(PiratesGlobals.encodeTeleportFlag(failedFlag), 0, 0, 0, sendToId = requesterId)
localAvatar.confirmTeleportTo(confirmed, requesterId, avName, requesterBandMgrId, requesterBandId, requesterGuildId)
def handleAvatarTeleportResponse(self, avId, available, shardId, instanceDoId, areaDoId):
if not avId == self.teleportQueryId:
self.clearTeleportQueryId()
return None
self.clearTeleportQueryId()
handle = self.cr.identifyAvatar(avId)
if handle:
avName = handle.getName()
else:
return None
if available == PiratesGlobals.TAAvailable:
def teleportConfirmation(confirmed, shardId = shardId, instanceDoID = instanceDoId, avId = avId, avatarParentId = areaDoId):
if confirmed:
self.requestTeleportToAvatar(shardId, instanceDoId, avatarId = avId, avatarParentId = areaDoId)
localAvatar.setTeleportFlag(PiratesGlobals.TFSameArea, localAvatar.confirmNotSameAreaTeleportToPlayer, [
areaDoId])
localAvatar.confirmTeleport(teleportConfirmation, feedback = True)
localAvatar.clearTeleportFlag(PiratesGlobals.TFSameArea)
else:
flag = PiratesGlobals.decodeTeleportFlag(available)
if flag == PiratesGlobals.TAIgnore:
pass
1
if flag in PiratesGlobals.TFNoTeleportToReasons:
localAvatar.guiMgr.createWarning(PiratesGlobals.TFNoTeleportToReasons[flag] % avName, duration = 10)
def initiateTeleport(self, instanceType, instanceName, shardId = 0, locationUid = '', instanceDoId = 0, doneCallback = None, startedCallback = None, gameType = -1, friendDoId = 0, friendAreaDoId = 0, doEffect = True, queue = False, stowawayEffect = False):
currInteractive = base.cr.interactionMgr.getCurrentInteractive()
if currInteractive:
currInteractive.requestExit()
if self.cr.activeWorld:
fromInstanceType = self.cr.activeWorld.getType()
else:
fromInstanceType = PiratesGlobals.INSTANCE_NONE
if instanceType not in [
PiratesGlobals.INSTANCE_MAIN,
PiratesGlobals.INSTANCE_WELCOME] and fromInstanceType not in [
PiratesGlobals.INSTANCE_MAIN,
PiratesGlobals.INSTANCE_GENERIC,
PiratesGlobals.INSTANCE_NONE]:
if not base.config.GetBool('can-break-teleport-rules', 0):
import pdb as pdb
pdb.set_trace()
return None
if self.amInTeleport():
if queue:
self.queueInitiateTeleport(instanceType, instanceName, shardId, locationUid, instanceDoId, doneCallback, startedCallback, gameType, friendDoId, friendAreaDoId, doEffect, stowawayEffect)
return None
return None
self.setAmInTeleport()
if instanceType == PiratesGlobals.INSTANCE_MAIN and not locationUid:
locationUid = localAvatar.returnLocation
localAvatar.teleportFriendDoId = friendDoId
self.doEffect = doEffect
self.stowawayEffect = stowawayEffect
self.sendUpdate('initiateTeleport', [
instanceType,
fromInstanceType,
shardId,
locationUid,
instanceDoId,
instanceName,
gameType,
friendDoId,
friendAreaDoId])
self.doneCallback = doneCallback
self.startedCallback = startedCallback
self.teleportInit(instanceType, fromInstanceType, instanceName)
def queueInitiateTeleport(self, instanceType, instanceName, shardId = 0, locationUid = '', instanceDoId = 0, doneCallback = None, startedCallback = None, gameType = -1, friendDoId = 0, friendAreaDoId = 0, doEffect = True, stowawayEffect = False):
teleInfo = [
instanceType,
instanceName,
shardId,
locationUid,
instanceDoId,
doneCallback,
startedCallback,
gameType,
friendDoId,
friendAreaDoId,
doEffect,
stowawayEffect]
self.teleportQueue.append(teleInfo)
def processTeleportQueue(task = None):
if self.amInTeleport():
return Task.again
if not self.teleportQueue:
return Task.done
teleportInfo = self.teleportQueue.pop(0)
self.initiateTeleport(*teleportInfo)
if self.teleportQueue:
return Task.again
return Task.done
self.teleportQueueProcess = taskMgr.doMethodLater(1, processTeleportQueue, 'processTeleportQueue')
def amInTeleport(self):
return localAvatar.testTeleportFlag(PiratesGlobals.TFInTeleport)
def setAmInTeleport(self):
localAvatar.b_setTeleportFlag(PiratesGlobals.TFInTeleport)
localAvatar.b_clearTeleportFlag(PiratesGlobals.TFLookoutJoined)
def clearAmInTeleport(self):
localAvatar.clearTeleportFlag(PiratesGlobals.TFInInitTeleport)
localAvatar.b_clearTeleportFlag(PiratesGlobals.TFInTeleport)
def setTeleportQueryId(self, avId):
self.teleportQueryId = avId
def clearTeleportQueryId(self):
self.teleportQueryId = 0
def initiateTeleportAI(self, instanceType, instanceName):
self.teleportInit(instanceType, instanceName)
def teleportInit(self, instanceType, fromInstanceType, instanceName, gameType = None):
self.clearTeleportQueryId()
self.oldWorld = base.cr.activeWorld
self.instanceType = instanceType
self.fromInstanceType = fromInstanceType
self.instanceName = instanceName
self.gameType = gameType
self.miniLog = MiniLog('TeleportLog')
MiniLogSentry(self.miniLog, 'teleportInit', instanceType, fromInstanceType, instanceName, gameType)
def teleportHasBegun(self, instanceType, fromInstanceType, instanceName, gameType):
if not self.miniLog:
self.miniLog = MiniLog('TeleportLog')
s = MiniLogSentry(self.miniLog, 'teleportHasBegun', instanceType, fromInstanceType, instanceName, gameType)
if self.startedCallback:
self.startedCallback()
self.startedCallback = None
if self.oldWorld == None or self.oldWorld.isEmpty():
self.teleportInit(instanceType, fromInstanceType, instanceName, gameType)
def getRemoveInterestEventName(self):
return self.uniqueName('teleportRemoveInterest')
def getAddInterestEventName(self):
return self.uniqueName('teleportAddInterest')
def forceTeleportStart(self, instanceName, tzDoId, thDoId, worldGridDoId, tzParent, tzZone):
s = MiniLogSentry(self.miniLog, 'forceTeleportStart', instanceName, tzDoId, thDoId, worldGridDoId, tzParent, tzZone)
self.setAmInTeleport()
localAvatar.guiMgr.request('Cutscene')
if not base.transitions.fadeOutActive():
base.transitions.fadeOut()
if self.fromInstanceType == PiratesGlobals.INSTANCE_MAIN:
self.inInstanceType = PiratesGlobals.INSTANCE_MAIN
else:
self.inInstanceType = self.instanceType
if self.fromInstanceType == PiratesGlobals.INSTANCE_PVP:
localAvatar.clearTeleportFlag(PiratesGlobals.TFInPVP)
elif self.fromInstanceType == PiratesGlobals.INSTANCE_TUTORIAL:
localAvatar.clearTeleportFlag(PiratesGlobals.TFInTutorial)
def fadeDone():
base.cr.loadingScreen.show()
s = MiniLogSentry(self.miniLog, 'fadeDone')
curParent = localAvatar.getParentObj()
parentIsZoneLOD = isinstance(curParent, ZoneLOD.ZoneLOD)
if parentIsZoneLOD:
localAvatar.leaveZoneLOD(curParent)
curParent.turnOff()
if self.cr.doId2do.get(tzParent) == None:
self.failTeleport(None, None, PLocalizer.TeleportGenericFailMessage)
else:
self.teleportAddInterestTZ(instanceName, tzDoId, thDoId, worldGridDoId, tzParent, tzZone)
localAvatar.guiMgr.request('Interactive')
taskMgr.removeTasksMatching(self.uniqueName('fadeDone'))
taskMgr.doMethodLater(1, fadeDone, self.uniqueName('fadeDone'), extraArgs = [])
def teleportAddInterestTZ(self, instanceName, tzDoId, thDoId, worldGridDoId, tzParent, tzZone):
s = MiniLogSentry(self.miniLog, 'teleportAddInterestTZ', instanceName, tzDoId, thDoId, worldGridDoId, tzParent, tzZone)
addEvent = self.getAddInterestEventName()
self.accept(addEvent, self.teleportAddInterestCompleteTZ, extraArgs = [
tzDoId,
thDoId,
worldGridDoId])
localAvatar.setInterest(tzParent, tzZone, [
'TZInterest'], addEvent)
self.instanceName = instanceName
def teleportAddInterestCompleteTZ(self, tzDoId, thDoId, worldGridDoId):
s = MiniLogSentry(self.miniLog, 'teleportAddInterestCompleteTZ', tzDoId, thDoId, worldGridDoId)
base.cr.relatedObjectMgr.requestObjects([
tzDoId], eachCallback = lambda param1, param2 = thDoId: self.teleportZoneExists(param1, param2))
def teleportZoneExists(self, teleportZone, thDoId):
s = MiniLogSentry(self.miniLog, 'teleportZoneExists', teleportZone, thDoId)
base.cr.relatedObjectMgr.requestObjects([
thDoId], eachCallback = lambda param1, param2 = teleportZone: self.teleportHandlerExists(param1, param2))
def teleportHandlerExists(self, teleportHandler, teleportZone):
s = MiniLogSentry(self.miniLog, 'teleportHandlerExists', teleportHandler, teleportZone)
teleportHandler.instanceName = self.instanceName
teleportHandler.instanceType = self.instanceType
teleportHandler.doneCallback = self.doneCallback
self.doneCallback = None
teleportHandler.oldWorld = self.oldWorld
self.oldWorld = None
teleportHandler.miniLog = self.miniLog
self.miniLog = None
teleportHandler.startTeleport()
def localAvTeleportFinishedRequest(self, task = None):
if not self.amInTeleport():
messenger.send('localAvTeleportFinished')
def createSpawnInterests(self, parents, callback, destGrid, teleportingObj):
s = MiniLogSentry(self.miniLog, 'createSpawnInterests', parents, callback.__name__, destGrid, teleportingObj)
parentsLen = len(parents)
if self.miniLog:
self.miniLog.appendLine('parents - %s' % (parents,))
self.miniLog.appendLine('destGrid - %s' % (destGrid,))
if parentsLen == 0:
logBlock(2, self.miniLog)
callback(destGrid, teleportingObj)
else:
parentObj = base.cr.doId2do.get(parents[0])
if parentObj:
callback(parentObj, teleportingObj)
elif parentsLen > 2 and parents[2] in base.cr.doId2do:
base.cr.relatedObjectMgr.requestObjects([
parents[0]], eachCallback = lambda param1 = None, param2 = teleportingObj: callback(param1, param2))
localAvatar.setInterest(parents[2], parents[1], [
'instanceInterest'])
elif parentsLen > 2:
parentParentId = parents[2]
parentParentZone = parents[1]
else:
parentParentId = '<None Given>'
parentParentZone = '<None Given>'
parentId = parents[0]
self.notify.warning(('createSpawnInterests: parent %s of parent %s in zone %s ' + 'does not exist locally, aborting teleport') % (parentParentId, parentId, parentParentZone))
self.failTeleport(None, None, PLocalizer.TeleportGenericFailMessage)
def initiateCrossShardDeploy(self, shardId = 0, islandUid = '', shipId = 0, doneCallback = None, startedCallback = None, doEffect = True):
if not islandUid or not shipId:
return None
currInteractive = base.cr.interactionMgr.getCurrentInteractive()
if currInteractive:
currInteractive.requestExit()
if self.cr.activeWorld:
fromInstanceType = self.cr.activeWorld.getType()
else:
fromInstanceType = PiratesGlobals.INSTANCE_NONE
if self.amInTeleport():
return None
self.setAmInTeleport()
self.doEffect = doEffect
self.sendUpdate('requestCrossShardDeploy', [
shardId,
islandUid,
shipId])
self.doneCallback = doneCallback
self.startedCallback = startedCallback
self.teleportInit(PiratesGlobals.INSTANCE_MAIN, fromInstanceType, 'Main World')
def notifyFriendVisit(self, avId):
av = base.cr.identifyAvatar(avId)
if av:
avName = av.getName()
else:
avName = PLocalizer.Someone
localAvatar.setSystemMessage(avId, OTPLocalizer.WhisperComingToVisit % avName)
localAvatar.guiMgr.messageStack.addTextMessage(OTPLocalizer.WhisperComingToVisit % avName, icon = ('friends', None))
|
[
"brandoncarden12345@gmail.com"
] |
brandoncarden12345@gmail.com
|
8eda4c8d2fd5781128748cfa3f14c23c06229fc3
|
10e19b5cfd59208c1b754fea38c34cc1fb14fdbe
|
/desktop/core/ext-py/Babel-0.9.6/babel/messages/tests/data/project/ignored/this_wont_normally_be_here.py
|
f26ddee1f7972ffe1050d7bb17ab8f960c38096a
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
sarvex/hue
|
780d28d032edd810d04e83f588617d1630ec2bef
|
6e75f0c4da2f3231e19c57bdedd57fb5a935670d
|
refs/heads/master
| 2023-08-15T21:39:16.171556
| 2023-05-01T08:37:43
| 2023-05-01T08:37:43
| 32,574,366
| 0
| 0
|
Apache-2.0
| 2023-09-14T16:55:28
| 2015-03-20T09:18:18
|
Python
|
UTF-8
|
Python
| false
| false
| 295
|
py
|
# -*- coding: utf-8 -*-
# This file won't normally be in this directory.
# It IS only for tests
from gettext import ngettext
def foo():
# Note: This will have the TRANSLATOR: tag but shouldn't
# be included on the extracted stuff
print ngettext('FooBar', 'FooBars', 1)
|
[
"bcwalrus@cloudera.com"
] |
bcwalrus@cloudera.com
|
540a36640e92a49cebfdc1d2114b07b6d1012de4
|
90909fe5a9f9fdf65bd5b1e7374f5eee0afad325
|
/python-data-science-toolbox-part-2/generator-objects-1.py
|
37ae50931b731cffcfbe0e6f9149a0110aec0ef7
|
[] |
no_license
|
fejesgergorichard/DataCamp
|
c4a75ecb2f97347c87b55357ac915fd3c1cd7a7f
|
5307016e4b7da9569e08d5923a9f6e1283da6c65
|
refs/heads/master
| 2022-09-05T12:37:37.082755
| 2020-05-18T17:22:44
| 2020-05-18T17:22:44
| 250,319,954
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 572
|
py
|
# A generator object is similar to a list comprehension, but it is defined by ( ) , not by [ ]
# This object, unlike the list comprehensions, does not create the list. It produces a generator
# The generator can be called as an iterable, and generates the required values on the fly, thus saving memory
# Create generator object: result
result = (num for num in range(31))
# Print the first 5 values
print(next(result))
print(next(result))
print(next(result))
print(next(result))
print(next(result))
# Print the rest of the values
for value in result:
print(value)
|
[
"fejesgergorichard@gmail.com"
] |
fejesgergorichard@gmail.com
|
a29c354c212eb6398363a27ee49432b9ad922189
|
2d93f948ba86742bb493403cf038e76444e58842
|
/corona/__main__.py
|
d6e3d059734327c55aed8eef2b771f3978d857c3
|
[] |
no_license
|
antista/sir
|
debe6f31d0050e77ea6c3836c548d25cba2291fa
|
2b9d7c5e6c70d5486e55e209352400bc85b589d8
|
refs/heads/master
| 2022-12-28T01:40:02.843425
| 2020-10-10T11:41:42
| 2020-10-10T11:41:42
| 292,320,034
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 117
|
py
|
from matplotlib import pylab
from corona.corona import main
if __name__ == '__main__':
main()
pylab.show()
|
[
"anti2100@yandex.ru"
] |
anti2100@yandex.ru
|
fb1e1b39572b14702c97b0a9db81638e716cea2e
|
4be8f1143abc8e585cc5c751984b9d861a9254dc
|
/map/migrations/0005_auto_20180212_2334.py
|
14a88da48d451e0ddb22e63348d9aae08ffee250
|
[] |
no_license
|
jacksty/West-Marches-Website
|
b6ec14625a7c534e83008d47a22082a50050ec07
|
f00223dace7f1eb2c3013265856a5c62decf7be1
|
refs/heads/master
| 2021-04-27T10:44:56.511037
| 2018-02-26T06:38:04
| 2018-02-26T06:38:04
| 122,544,998
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 487
|
py
|
# Generated by Django 2.0.2 on 2018-02-13 04:34
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('map', '0004_edge_map'),
]
operations = [
migrations.RenameField(
model_name='edge',
old_name='node_from',
new_name='source',
),
migrations.RenameField(
model_name='edge',
old_name='node_to',
new_name='target',
),
]
|
[
"jacksty@gmx.com"
] |
jacksty@gmx.com
|
677854622ff234e79b8d645e1b88e6e7804ead61
|
4f1299f5af48ac43735dad9a26e091ed26a606ad
|
/Prac.py
|
c8072a990624301f4f25d2eebc8056071cc56de0
|
[
"MIT"
] |
permissive
|
BikashThapa/PythonPrograms
|
e163b7c62aced8c77ba4c6ee664a2bcf0c3f025a
|
f2e81771d6767fd96fea4622ef9fc8fe8d436b22
|
refs/heads/master
| 2020-07-25T01:23:55.746970
| 2019-12-24T08:30:58
| 2019-12-24T08:30:58
| 208,111,617
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 217
|
py
|
class Person:
def __init__(self, name):
self.name = name
def talk(self):
print(f'Hi, this is {self.name} here')
person1 = Person("Biaksh Thapa")
person1.talk()
Bob = Person("Bob")
Bob.talk()
|
[
"bthapa489@gmail.com"
] |
bthapa489@gmail.com
|
18e101f183f35166e395cf8563e66d8b90b148fa
|
4dd8416c2a6cf08dd3fa3d197d895f2852106097
|
/语法基础/01.Python基础/12-if条件满足或者不满足的时候执行多句代码.py
|
ff6369886f6d8f6702c0a7020bde5b55db5990cc
|
[] |
no_license
|
lanboys/HelloPython
|
19a74ad416ddadf7ed487023527e08ad9fcecb87
|
d411ee29781a9cf00470d05d1be61da8f2081080
|
refs/heads/master
| 2020-04-07T19:40:17.381564
| 2018-11-23T10:43:00
| 2018-11-23T10:43:00
| 158,657,533
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 517
|
py
|
age = 10
if age > 18:
# 如果条件满足,那么下面的4行代码一定执行
print("----0-----")
print("----1-----")
print("----1-----")
print("----1-----")
else:
# 如果第3行的代码,条件不满足,那么就执行接下来的4行代码
print("----1-----")
print("----1-----")
print("----1-----")
print("----1-----")
# 下面的这行代码,与上面的if没有任何的关系,即第3行的条件是否满足 与这里执行没有任何的影响
print("----9-----")
|
[
"lan_bing2013@163.com"
] |
lan_bing2013@163.com
|
1621790e8faa136dc64077fdd7cd47ca87f200ae
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/85/usersdata/228/54502/submittedfiles/funcoes1.py
|
881c334b3cea4bdaa4890004b8352ae9eab83fdf
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,624
|
py
|
# -*- coding: utf-8 -*-
n=int(input('digite o número de elementos:'))
lista1=[]
lista2=[]
lista3=[]
for i in range(0,n,1):
elemento1=int(input('digite o elemento:'))
lista1.append(elemento1)
elemento2=int(input('digite o elemento:'))
lista2.append(elemento2)
elemento3=int(input('digite o elemento:'))
lista3.append(elemento3)
def crescente(a):
for i in range(0,len(a),1):
if a[i]<a[i+1]:
return True
else:
return False
def decrescente(a):
for i in range(0,len(a),1):
if a[i]>a[i+1]:
return True
else:
return False
def elementosiguais(a):
for i in range(0,len(a),1):
if a[i]==a[i+1]:
return True
else:
return False
if crescent(lista1):
print('S')
if crescent(lista1)==False:
print('N')
if decrescente(lista1):
print('S')
if decrescente(lista1)==False:
print('N')
if elementosiguais(lista1):
print('S')
if elementosiguais(lista1)==False:
print('N')
if crescent(lista2):
print('S')
if crescent(lista2)==False:
print('N')
if decrescente(lista2):
print('S')
if decrescente(lista2)==False:
print('N')
if elementosiguais(lista2):
print('S')
if elementosiguais(lista2)==False:
print('N')
if crescent(lista3):
print('S')
if crescent(lista3)==False:
print('N')
if decrescente(lista3):
print('S')
if decrescente(lista3)==False:
print('N')
if elementosiguais(lista3):
print('S')
if elementosiguais(lista3)==False:
print('N')
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
b2aedb04d591a0a051b067311682d48c5f4de51b
|
ccce8382859124182ab87832e6aab5bc78141503
|
/app/__init__.py
|
817b64ae76b60d3395952719ff1446b285448028
|
[] |
no_license
|
774525000/train
|
52a24b7f3d0420b82e7c5406f4611d725ad7a2bd
|
ded9cd8a276dc63facc171e405aa34bf0ca672e6
|
refs/heads/master
| 2022-04-11T12:54:05.764310
| 2020-02-27T01:27:23
| 2020-02-27T01:27:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 306
|
py
|
# -*- coding:utf-8 -*-
from app.train.train import Train
from time import sleep
def run(home_url, email, password, chaojiying_name, chaojiying_pass, chaojiying_app_id, pic_type):
Train.default(home_url, email, password, chaojiying_name, chaojiying_pass, chaojiying_app_id, pic_type)
sleep(100)
|
[
"199441201qQ"
] |
199441201qQ
|
2317b9612a821152993d2c8d3d77909c6a5d504f
|
69266a7696f5f8be7c78fd29ef68a7619e41d28d
|
/Tools/ComputeTool.py
|
9353c424e8d6deac1c49914c31c6768d29dd1ec4
|
[] |
no_license
|
microelly2/PyFlowWWW
|
52deb54deb2db668cd21e9ce251894baaa663823
|
0b3d0009494327b2ec34af9fbca2a5fee1fef4a4
|
refs/heads/master
| 2022-04-14T02:35:08.999370
| 2020-04-11T19:48:54
| 2020-04-11T19:48:54
| 254,876,907
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,464
|
py
|
## Copyright 2015-2019 Ilgar Lunin, Pedro Cabrera, microelly
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
## http://www.apache.org/licenses/LICENSE-2.0
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
from nine import str
from PyFlow.UI.Tool.Tool import ShelfTool
from PyFlow.Core.Common import Direction
import FreeCADGui
from Qt import QtGui
from Qt.QtWidgets import QFileDialog
from nodeeditor.say import *
import sys
if sys.version_info[0] !=2:
from importlib import reload
import os
RESOURCES_DIR = os.path.dirname(os.path.realpath(__file__)) + "/res/"
class ComputeTool(ShelfTool):
"""docstring for PreviewTool."""
def __init__(self):
super( ComputeTool, self).__init__()
@staticmethod
def toolTip():
return "call compute method for selected nodes"
@staticmethod
def getIcon():
return QtGui.QIcon(RESOURCES_DIR + "compute.png")
@staticmethod
def name():
return str("ComputeTool")
def do(self):
nodes=FreeCAD.PF.graphManager.get().getAllNodes()
nodes2 = sorted(nodes, key=lambda node: node.x)
say("selected Nodes ...")
for n in nodes2:
if n.getWrapper().isSelected():
say(n,n.x)
n.compute()
class DeleteTool(ShelfTool):
"""docstring for PreviewTool."""
def __init__(self):
super( DeleteTool, self).__init__()
@staticmethod
def toolTip():
return "Delete the selected nodes"
@staticmethod
def getIcon():
return QtGui.QIcon(RESOURCES_DIR + "delete.png")
@staticmethod
def name():
return str("DeleteTool")
def do(self):
nodes=FreeCAD.PF.graphManager.get().getAllNodes()
nodes2 = sorted(nodes, key=lambda node: node.x)
say("selected Nodes ...")
for n in nodes2:
if n.getWrapper().isSelected():
say(n,n.x)
n.kill()
class ToyTool(ShelfTool):
"""docstring for PreviewTool."""
def __init__(self):
super( ToyTool, self).__init__()
@staticmethod
def toolTip():
return "Toy for Developer"
@staticmethod
def getIcon():
return QtGui.QIcon(RESOURCES_DIR + "toy.png")
@staticmethod
def name():
return str("ToyTool")
def do(self):
import nodeeditor.dev
reload (nodeeditor.dev)
nodeeditor.dev.run_shelfToy(self)
class FreeCADTool(ShelfTool):
"""docstring for PreviewTool."""
def __init__(self):
super( FreeCADTool, self).__init__()
@staticmethod
def toolTip():
return "FreeCAD mainWindow"
@staticmethod
def getIcon():
return QtGui.QIcon(RESOURCES_DIR + "freecad.png")
@staticmethod
def name():
return str("FreeCADTool")
def do(self):
mw=FreeCADGui.getMainWindow()
mw.hide()
mw.show()
def toollist():
return [
ComputeTool,
DeleteTool,
FreeCADTool,
ToyTool,
]
|
[
"thomas@freecadbuch.de"
] |
thomas@freecadbuch.de
|
fbe6fad964090c227647ca05aee2f02fe8a3aafb
|
cdd8f8ed846820ffa107de24d45b5a55cd3c5bd0
|
/boot_device.py
|
ec1c782e9e183d1a324f82ecfac9d27109542ede
|
[
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-other-permissive"
] |
permissive
|
rockokw/vesper
|
d8db2c69e3f69c4b7feb0553021c91b52dc20c00
|
caaae6c06fb3df6745b9485f40e3cc799c795a75
|
refs/heads/master
| 2022-12-29T21:44:54.008370
| 2020-10-18T20:34:15
| 2020-10-18T20:36:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 869
|
py
|
#!/usr/bin/env python
import argparse
import copy
import logging.config
import config as cfg
from device import Device
if __name__ == '__main__':
# Command-line arguments
parser = argparse.ArgumentParser(description='Device boot script')
parser.add_argument('name', type=str, help='device name')
parser.add_argument('-l', '--loglevel', default='WARNING',
help='log level (DEBUG|INFO|WARNING|ERROR|CRITICAL)')
args = parser.parse_args()
# Logging setup
mylogcfg = copy.deepcopy(cfg.LOGCFG)
mylogcfg['handlers']['default']['level'] = args.loglevel.upper()
mylogcfg['handlers']['file']['filename'] = '/dev/null'
logging.config.dictConfig(mylogcfg)
log = logging.getLogger('boot_device')
name = args.name
device = Device(name)
log.info('booting device %s...', name)
device.start()
|
[
"kwamelaw@usc.edu"
] |
kwamelaw@usc.edu
|
e136760c66ba06b8f27043bc427a323157a0c0a0
|
a0e4e123e5eb5f91eb5edd7d6d6bac268ca43c22
|
/holistic.py
|
0c4512029023a4b59d23865638c1ddf4746531f3
|
[] |
no_license
|
borodedamie/pose-python
|
eb50b322d1a327a88b3c851b7c1f650eb1a4d67f
|
728135c4de033aeec5d2fcf4c3fc98e1dc4de56f
|
refs/heads/main
| 2023-08-21T13:06:51.120094
| 2021-10-05T06:06:00
| 2021-10-05T06:06:00
| 412,929,437
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,336
|
py
|
import cv2
import mediapipe as mp
mp_drawing = mp.solutions.drawing_utils
mp_drawing_styles = mp.solutions.drawing_styles
mp_holistic = mp.solutions.holistic
cap = cv2.VideoCapture(0)
with mp_holistic.Holistic(
min_detection_confidence=0.5,
min_tracking_confidence=0.5) as holistic:
while cap.isOpened():
success, image = cap.read()
if not success:
print("Ignoring empty camera frame.")
# If loading a video, use 'break' instead of 'continue'.
continue
image = cv2.cvtColor(cv2.flip(image, 1), cv2.COLOR_BGR2RGB)
image.flags.writeable = False
results = holistic.process(image)
# Draw landmark annotation on the image.
image.flags.writeable = True
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
mp_drawing.draw_landmarks(
image,
results.face_landmarks,
mp_holistic.FACEMESH_CONTOURS,
landmark_drawing_spec=None,
connection_drawing_spec=mp_drawing_styles
.get_default_face_mesh_contours_style())
mp_drawing.draw_landmarks(
image,
results.pose_landmarks,
mp_holistic.POSE_CONNECTIONS,
landmark_drawing_spec=mp_drawing_styles
.get_default_pose_landmarks_style())
cv2.imshow('MediaPipe Holistic', image)
if cv2.waitKey(5) & 0xFF == 27:
break
cap.release()
|
[
"opeoluborode@yahoo.com"
] |
opeoluborode@yahoo.com
|
6ef9ff461f84c97d055693cb74ea084ecc008185
|
7173b2d4b647263449174a1c1acd326ee0d85467
|
/certbot-dns-ovh/docs/conf.py
|
57194666ec5ed0e6633a0bb7e95e43796aefd3f9
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
robstradling/certbot
|
0ee92d2f362d69342900a6be5e19175666bbab58
|
d0f1a3e205902f15b9608ef514cc1f0685da25ea
|
refs/heads/master
| 2020-06-01T04:44:20.728356
| 2019-06-05T21:51:17
| 2019-06-05T21:51:17
| 190,642,181
| 4
| 0
|
NOASSERTION
| 2019-06-06T20:02:01
| 2019-06-06T20:02:01
| null |
UTF-8
|
Python
| false
| false
| 5,779
|
py
|
# -*- coding: utf-8 -*-
#
# certbot-dns-ovh documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 12 10:14:31 2018.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.viewcode']
autodoc_member_order = 'bysource'
autodoc_default_flags = ['show-inheritance', 'private-members']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'certbot-dns-ovh'
copyright = u'2018, Certbot Project'
author = u'Certbot Project'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0'
# The full version, including alpha/beta/rc tags.
release = u'0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
default_role = 'py:obj'
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# http://docs.readthedocs.org/en/latest/theme.html#how-do-i-use-this-locally-and-on-read-the-docs
# on_rtd is whether we are on readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# otherwise, readthedocs.org uses their theme by default, so no need to specify it
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'certbot-dns-ovhdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'certbot-dns-ovh.tex', u'certbot-dns-ovh Documentation',
u'Certbot Project', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'certbot-dns-ovh', u'certbot-dns-ovh Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'certbot-dns-ovh', u'certbot-dns-ovh Documentation',
author, 'certbot-dns-ovh', 'One line description of project.',
'Miscellaneous'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('https://docs.python.org/', None),
'acme': ('https://acme-python.readthedocs.org/en/latest/', None),
'certbot': ('https://certbot.eff.org/docs/', None),
}
|
[
"bmw@users.noreply.github.com"
] |
bmw@users.noreply.github.com
|
36ff8d5e5b50b26b9b5c7383e666b8d5b76ec9ec
|
90bd88fbc3676551432d4e4f1ad64260e1d62573
|
/Data_processing/feature_extraction.py
|
8bb08b7e2599bb30089fa5f215fbb527ea21caca
|
[] |
no_license
|
LeanneNortje/MultimodalSpeech-to-ImageMatching
|
648d009dbbb7a2f6c127e31a15193ab989a58998
|
a0ce92dc95d5052fbcd53a9a41cd3b6020345f9d
|
refs/heads/master
| 2023-04-28T10:11:45.132063
| 2021-04-26T13:36:32
| 2021-04-26T13:36:32
| 216,018,780
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,895
|
py
|
#!/usr/bin/env python
#_________________________________________________________________________________________________
#
# Author: Leanne Nortje
# Year: 2020
# Email: nortjeleanne@gmail.com
#_________________________________________________________________________________________________
#
# This sets up the feature extraction of a spesific dataset and all the information required and
# associated with this dataset.
#
from datetime import datetime
from os import path
import argparse
import glob
import numpy as np
import os
from os import path
from scipy.io import wavfile
import matplotlib.pyplot as plt
import logging
import tensorflow as tf
import subprocess
import sys
from tqdm import tqdm
sys.path.append("..")
from paths import data_path
data_path = path.join("..", data_path)
import speech_library
#_____________________________________________________________________________________________________________________________________
#
# Argument function
#
#_____________________________________________________________________________________________________________________________________
def arguments():
parser = argparse.ArgumentParser()
parser.add_argument(
"--dataset", type=str, choices=["buckeye", "TIDigits"],
default="buckeye"
)
parser.add_argument(
"--feat_type", type=str, choices=["fbank", "mfcc"],
default="fbank"
)
return parser.parse_args()
#_____________________________________________________________________________________________________________________________________
#
# Main
#
#_____________________________________________________________________________________________________________________________________
def dataset_library(args):
dataset_lib = {}
if args.dataset == "buckeye":
dataset_lib["feats_type"] = args.feat_type
dataset_lib["dataset"] = args.dataset
dataset_lib["out_dir"] = args.dataset
dataset_lib["wavs"] = path.join(data_path, args.dataset, "*", "*.wav")
dataset_lib["vads"] = path.join(data_path, dataset_lib["dataset"], "english.wrd")
dataset_lib["training_speakers_path"] = path.join(data_path, dataset_lib["dataset"], "devpart1_speakers.list")
dataset_lib["validation_speakers_path"] = path.join(data_path, dataset_lib["dataset"], "devpart2_speakers.list")
dataset_lib["testing_speakers_path"] = path.join(data_path, dataset_lib["dataset"], "zs_speakers.list")
dataset_lib["labels_to_exclude"] = ["SIL", "SPN"]
dataset_lib["include_labels"] = True
dataset_lib["labels_given"] = True
dataset_lib["extract_words_or_not"] = True
elif args.dataset == "TIDigits":
dataset_lib["feats_type"] = args.feat_type
dataset_lib["dataset"] = args.dataset
dataset_lib["out_dir"] = args.dataset
dataset_lib["wavs"] = path.join(data_path, args.dataset, "tidigits_wavs", "*", "*", "*","*.wav")
dataset_lib["vads"] = path.join(data_path, dataset_lib["dataset"], "tidigits_fa", "words.wrd")
dataset_lib["training_speakers_path"] = path.join(data_path, dataset_lib["dataset"], "tidigits_fa", "train_speakers.list")
dataset_lib["validation_speakers_path"] = path.join(data_path, dataset_lib["dataset"], "tidigits_fa", "val_speakers.list")
dataset_lib["testing_speakers_path"] = path.join(data_path, dataset_lib["dataset"], "tidigits_fa", "test_speakers.list")
dataset_lib["labels_to_exclude"] = []
dataset_lib["include_labels"] = True
dataset_lib["labels_given"] = True
dataset_lib["extract_words_or_not"] = True
return dataset_lib
def main():
args = arguments()
lib = dataset_library(args)
feats = speech_library.extract_features(lib)
speech_library.extract_segments(feats, lib)
if __name__ == "__main__":
main()
|
[
"nortjeleanne@gmail.com"
] |
nortjeleanne@gmail.com
|
3073d91c6b25644a57b79bd4617df05083ecfa66
|
9d1b52e3fa86f0873d9f03b79054273a43896e15
|
/source code for BOSSdataset/main.py
|
ce3a8ec23e1d5a1ecd7a39728b0a36178b820592
|
[] |
no_license
|
zhenglisec/DeepKeyStego
|
5f4b18e3b190f56aa3faa7833a114290fb09e263
|
d40a4661414f21b69f2e2023fda094db668df242
|
refs/heads/master
| 2021-06-17T08:46:04.034153
| 2021-03-19T15:05:55
| 2021-03-19T15:05:55
| 182,971,396
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,304
|
py
|
from __future__ import print_function
import argparse
import os
import random
import warnings
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
from model_boss import SSIM
from model_boss.encoder import Encoder
from model_boss.decoder import syDecoder, asyDecoder
from model_boss.discriminator import Discriminator
from model_boss.pkgenerator import pkgenerator
GPU = '0,1,2,3,4,5,6,7'
os.environ['CUDA_VISIBLE_DEVICES'] = GPU
parser = argparse.ArgumentParser(
description='Pytorch Implement with ImageNet')
parser.add_argument('--type', default='symmeric', help='symmeric or asymmeric')
parser.add_argument('--dataroot', default='/data/lizheng/DATASET/BOSSbase_1.01_20/') #'/data/lizheng/DATASET/BOSSbase_1.01/'
parser.add_argument('--train', type=bool, default=True)
parser.add_argument('--num_epochs', type=int, default=100)
parser.add_argument('--batchsize', type=int, default=32)
parser.add_argument('--lr', type=float, default=0.001)
parser.add_argument('--beta', '--list', nargs='+',
default=[0.5, 0.5, 0.03, 0.1])
parser.add_argument('--seed', default=22, type=int,
help='seed for initializing training. ')
parser.add_argument('--secret_len', type=int, default=512*256)
parser.add_argument('--key_len', type=int, default=1024)
args = parser.parse_args()
if torch.cuda.is_available():
cudnn.benchmark = True
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
cudnn.deterministic = True
warnings.warn('You have cho5sen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
print('==> Preparing data..')
transform_train = transforms.Compose([
#transforms.Resize((128, 128)),
transforms.ToTensor(),
])
transform_test = transforms.Compose([
# transforms.Resize((128, 128)),
transforms.ToTensor(),])
trainset = torchvision.datasets.ImageFolder(
root=args.dataroot+'train', transform=transform_train)
trainloader = torch.utils.data.DataLoader(
trainset, batch_size=args.batchsize, shuffle=True, num_workers=2, drop_last=True)
testset = torchvision.datasets.ImageFolder(
root=args.dataroot+'test', transform=transform_test)
testloader = torch.utils.data.DataLoader(
testset, batch_size=args.batchsize, shuffle=False, num_workers=2, drop_last=True)
# Adversarial ground truths
valid = torch.cuda.FloatTensor(args.batchsize, 1).fill_(1.0)
fake = torch.cuda.FloatTensor(args.batchsize, 1).fill_(0.0)
best_real_acc, best_wm_acc, best_wm_input_acc = 0, 0, 0
start_epoch = 0 # start from epoch 0 or last checkpoint epoch
train_loss, test_loss = [[], []], [[], []]
train_acc, test_acc = [[], []], [[], []]
# Model
print('==> Building model..')
if args.type == 'symmeric':
Decoder = syDecoder(sec_len = args.secret_len, output_function=nn.Sigmoid)
elif args.type == 'asymmeric':
Decoder = asyDecoder(sec_len = args.secret_len, output_function=nn.Sigmoid)
Pkgenerator = pkgenerator()
Encoder = Encoder(sec_len = args.secret_len)
Discriminator = Discriminator()
Encoder = nn.DataParallel(Encoder.cuda())
Decoder = nn.DataParallel(Decoder.cuda())
Discriminator = nn.DataParallel(Discriminator.cuda())
if args.type == 'asymmeric':
Pkgenerator = nn.DataParallel(Pkgenerator.cuda())
# loss function
criterionE_mse = nn.MSELoss().cuda()
criterionE_ssim = SSIM().cuda()
criterionD = nn.L1Loss().cuda()
optimizerE = optim.Adam(Encoder.parameters(), lr=args.lr, betas=(0.5, 0.999))
optimizerD = optim.Adam(Decoder.parameters(), lr=args.lr, betas=(0.5, 0.999))
criterionDis = nn.BCEWithLogitsLoss().cuda()
optimizerDis = optim.Adam(Discriminator.parameters(), lr=args.lr, betas=(0.5, 0.999))
if args.type == 'asymmeric':
optimizerPkGen = optim.Adam(Pkgenerator.parameters(), lr=args.lr, betas=(0.5, 0.999))
print(Encoder)
print(Decoder)
print(Discriminator)
def train(epoch):
print('\nEpoch: %d' % epoch)
Encoder.train()
Decoder.train()
Discriminator.train()
if args.type == 'asymmeric':
Pkgenerator.train()
for batch_idx, (input, _) in enumerate(trainloader):
input = input.cuda()[:, 0:1, :, :]
messages = torch.from_numpy(np.random.randint(2, size=(args.batchsize, args.secret_len))).float().cuda()
skey = torch.from_numpy(np.random.randint(2, size=(args.batchsize, args.key_len))).float().cuda() # secrect key
if args.type == 'asymmeric':
pkey = Pkgenerator(skey)
#############optimize Discriminator##############
optimizerDis.zero_grad()
if args.type == 'symmeric':
stego = Encoder(input, messages, skey)
elif args.type == 'asymmeric':
stego = Encoder(input, messages, pkey)
stego_dis_output = Discriminator(stego.detach())
real_dis_output = Discriminator(input)
loss_D_stego = criterionDis(stego_dis_output, fake)
loss_D_real = criterionDis(real_dis_output, valid)
loss_D = loss_D_stego + loss_D_real
loss_D.backward()
optimizerDis.step()
################optimize Encoder Decoder or Pkgenerator#############
optimizerE.zero_grad()
optimizerD.zero_grad()
if args.type == 'symmeric':
decoded_messages = Decoder(stego, skey)
elif args.type == 'asymmeric':
optimizerPkGen.zero_grad()
decoded_messages = Decoder(stego, pkey, skey)
stego_dis_output = Discriminator(stego)
loss_mse = criterionE_mse(input, stego)
loss_ssim = criterionE_ssim(input, stego)
loss_adv = criterionDis(stego_dis_output, valid)
loss_message = criterionD(decoded_messages, messages)
loss_H = args.beta[0] * loss_mse + args.beta[1] * \
(1 - loss_ssim) + args.beta[2] * loss_adv + args.beta[3] * loss_message
loss_H.backward()
optimizerE.step()
optimizerD.step()
if args.type == 'asymmeric':
optimizerPkGen.step()
decoded_rounded = torch.round(decoded_messages.detach())
bitwise_avg_correct = torch.sum(torch.eq(messages, decoded_rounded))/args.batchsize
print('[%d/%d][%d/%d] Loss D: %.4f () Loss_H: %.4f (mse: %.4f ssim: %.4f adv: %.4f) bitcorrect: %.4f' % (
epoch, args.num_epochs, batch_idx, len(trainloader),
loss_D.item(), loss_H.item(), loss_mse.item(
), loss_ssim.item(), loss_adv.item(), bitwise_avg_correct))
def test(epoch):
Encoder.eval()
Decoder.eval()
if args.type == 'asymmeric':
Pkgenerator.eval()
with torch.no_grad():
for batch_idx, (input, _) in enumerate(testloader):
input = input.cuda()[:, 0:1, :, :]
messages = torch.from_numpy(np.random.randint(2, size=(args.batchsize, args.secret_len))).float().cuda()
skey = torch.from_numpy(np.random.randint(2, size=(args.batchsize, args.key_len))).float().cuda()
if args.type == 'symmeric':
stego = Encoder(input, messages, skey)
decoded_messages = Decoder(stego, skey)
save_img = 'results/symmeric.pgm'
if args.type == 'asymmeric':
pkey = Pkgenerator(skey)
stego = Encoder(input, messages, pkey)
decoded_messages = Decoder(stego, pkey, skey)
save_img = 'results/asymmeric.pgm'
decoded_rounded = torch.round(decoded_messages.detach())#.cpu().numpy().round().clip(0, 1)
bitwise_avg_correct = torch.sum(torch.eq(messages, decoded_rounded))/args.batchsize
concat_img = torch.cat([input[0:10], stego[0:10]], dim=0)
torchvision.utils.save_image(concat_img, save_img, nrow=10, padding=0)
print('BitCorrect: %.4f' % (bitwise_avg_correct))
for epoch in range(args.num_epochs):
test(epoch)
train(epoch)
|
[
"zhenglisec@gmail.com"
] |
zhenglisec@gmail.com
|
fc11fbea3f492caf574da8adef94d0978313dd7f
|
33116209c77798529db2cfe5ea21dfcac13c5e38
|
/update_version.py
|
a82034ad6f96c32f395cd11a4ab4374180bd1335
|
[] |
no_license
|
RolfMaster/basicSynth
|
5ba7c8de684cecefe3fa4e361aeec4940709b40b
|
40153a11b6b9ead769389c989eb3be39c15d3fa2
|
refs/heads/master
| 2020-04-18T10:15:55.308826
| 2019-01-23T00:08:51
| 2019-01-23T00:08:51
| 167,462,140
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,405
|
py
|
#!/usr/bin/python
# this script will update the versions in plist and installer files to match that in resource.h
import plistlib, os, datetime, fileinput, glob, sys, string
scriptpath = os.path.dirname(os.path.realpath(__file__))
def replacestrs(filename, s, r):
files = glob.glob(filename)
for line in fileinput.input(files,inplace=1):
string.find(line, s)
line = line.replace(s, r)
sys.stdout.write(line)
def main():
MajorStr = ""
MinorStr = ""
BugfixStr = ""
for line in fileinput.input(scriptpath + "/resource.h",inplace=0):
if "#define PLUG_VER " in line:
FullVersion = int(string.lstrip(line, "#define PLUG_VER "), 16)
major = FullVersion & 0xFFFF0000
MajorStr = str(major >> 16)
minor = FullVersion & 0x0000FF00
MinorStr = str(minor >> 8)
BugfixStr = str(FullVersion & 0x000000FF)
FullVersionStr = MajorStr + "." + MinorStr + "." + BugfixStr
today = datetime.date.today()
CFBundleGetInfoString = FullVersionStr + ", Copyright BasicSynthPlugin, " + str(today.year)
CFBundleVersion = FullVersionStr
print "update_version.py - setting version to " + FullVersionStr
print "Updating plist version info..."
plistpath = scriptpath + "/resources/Synthesis-VST2-Info.plist"
vst2 = plistlib.readPlist(plistpath)
vst2['CFBundleGetInfoString'] = CFBundleGetInfoString
vst2['CFBundleVersion'] = CFBundleVersion
vst2['CFBundleShortVersionString'] = CFBundleVersion
plistlib.writePlist(vst2, plistpath)
replacestrs(plistpath, "//Apple//", "//Apple Computer//");
plistpath = scriptpath + "/resources/Synthesis-AU-Info.plist"
au = plistlib.readPlist(plistpath)
au['CFBundleGetInfoString'] = CFBundleGetInfoString
au['CFBundleVersion'] = CFBundleVersion
au['CFBundleShortVersionString'] = CFBundleVersion
plistlib.writePlist(au, plistpath)
replacestrs(plistpath, "//Apple//", "//Apple Computer//");
plistpath = scriptpath + "/resources/Synthesis-VST3-Info.plist"
vst3 = plistlib.readPlist(plistpath)
vst3['CFBundleGetInfoString'] = CFBundleGetInfoString
vst3['CFBundleVersion'] = CFBundleVersion
vst3['CFBundleShortVersionString'] = CFBundleVersion
plistlib.writePlist(vst3, plistpath)
replacestrs(plistpath, "//Apple//", "//Apple Computer//");
plistpath = scriptpath + "/resources/Synthesis-OSXAPP-Info.plist"
app = plistlib.readPlist(plistpath)
app['CFBundleGetInfoString'] = CFBundleGetInfoString
app['CFBundleVersion'] = CFBundleVersion
app['CFBundleShortVersionString'] = CFBundleVersion
plistlib.writePlist(app, plistpath)
replacestrs(plistpath, "//Apple//", "//Apple Computer//");
plistpath = scriptpath + "/resources/Synthesis-RTAS-Info.plist"
rtas = plistlib.readPlist(plistpath)
rtas['CFBundleGetInfoString'] = CFBundleGetInfoString
rtas['CFBundleVersion'] = CFBundleVersion
rtas['CFBundleShortVersionString'] = CFBundleVersion
plistlib.writePlist(rtas, plistpath)
replacestrs(plistpath, "//Apple//", "//Apple Computer//");
plistpath = scriptpath + "/resources/Synthesis-AAX-Info.plist"
aax = plistlib.readPlist(plistpath)
aax['CFBundleGetInfoString'] = CFBundleGetInfoString
aax['CFBundleVersion'] = CFBundleVersion
aax['CFBundleShortVersionString'] = CFBundleVersion
plistlib.writePlist(aax, plistpath)
replacestrs(plistpath, "//Apple//", "//Apple Computer//");
# plistpath = scriptpath + "/resources/Synthesis-IOSAPP-Info.plist"
# iosapp = plistlib.readPlist(plistpath)
# iosapp['CFBundleGetInfoString'] = CFBundleGetInfoString
# iosapp['CFBundleVersion'] = CFBundleVersion
# iosapp['CFBundleShortVersionString'] = CFBundleVersion
# plistlib.writePlist(iosapp, plistpath)
# replacestrs(plistpath, "//Apple//", "//Apple Computer//");
print "Updating Mac Installer version info..."
plistpath = scriptpath + "/installer/Synthesis.pkgproj"
installer = plistlib.readPlist(plistpath)
for x in range(0,6):
installer['PACKAGES'][x]['PACKAGE_SETTINGS']['VERSION'] = FullVersionStr
plistlib.writePlist(installer, plistpath)
replacestrs(plistpath, "//Apple//", "//Apple Computer//");
print "Updating Windows Installer version info..."
for line in fileinput.input(scriptpath + "/installer/Synthesis.iss",inplace=1):
if "AppVersion" in line:
line="AppVersion=" + FullVersionStr + "\n"
sys.stdout.write(line)
if __name__ == '__main__':
main()
|
[
"prvo.slovo.a@gmail.com"
] |
prvo.slovo.a@gmail.com
|
bceaab930c75e6fc131b1c92e95c21d9403e01e5
|
28e554c3677c3192de4ae8bfeebdec47ab4e6bb9
|
/Autoencoders/autoencoders.py
|
4f76b720856b9491751fb462ef281b739a3fe46c
|
[] |
no_license
|
shrinidhi99/Computer-Vision
|
4d94f5ed54b9af78737a73aee5f7b577b6b6e470
|
75147678b56c2d977e06677eab58c67da95e6db7
|
refs/heads/main
| 2023-04-25T12:06:28.012599
| 2021-05-05T07:49:52
| 2021-05-05T07:49:52
| 334,365,085
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,176
|
py
|
import random
import tensorflow as tf
from tensorflow import keras
import cv2
import numpy as np
import matplotlib.pyplot as plt
# loads the popular "mnist" training dataset
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
# scales the data. pixel values range from 0 to 255, so this makes it range 0 to 1
x_train = x_train/255.0
# scales the data. pixel values range from 0 to 255, so this makes it range 0 to 1
x_test = x_test/255.0
encoder_input = keras.Input(shape=(28, 28), name='img')
x = keras.layers.Flatten()(encoder_input)
encoder_output = keras.layers.Dense(64, activation="relu")(x)
encoder = keras.Model(encoder_input, encoder_output, name='encoder')
decoder_input = keras.layers.Dense(64, activation="relu")(encoder_output)
x = keras.layers.Dense(784, activation="relu")(decoder_input)
decoder_output = keras.layers.Reshape((28, 28))(x)
opt = tf.keras.optimizers.Adam(lr=0.001, decay=1e-6)
autoencoder = keras.Model(encoder_input, decoder_output, name='autoencoder')
autoencoder.summary()
autoencoder.compile(opt, loss='mse')
epochs = 3
for epoch in range(epochs):
history = autoencoder.fit(
x_train,
x_train,
epochs=1,
batch_size=32, validation_split=0.10
)
# autoencoder.save(f"models/AE-{epoch+1}.model")
for d in x_test[:30]: # just show 5 examples, feel free to show all or however many you want!
ae_out = autoencoder.predict([d.reshape(-1, 28, 28)])
img = ae_out[0]
cv2.imshow("decoded", img)
cv2.imshow("original", np.array(d))
cv2.waitKey(1000) # wait 1000ms, 1 second, and then show the next.
def add_noise(img, random_chance=5):
noisy = []
for row in img:
new_row = []
for pix in row:
if random.choice(range(100)) <= random_chance:
new_val = random.uniform(0, 1)
new_row.append(new_val)
else:
new_row.append(pix)
noisy.append(new_row)
return np.array(noisy)
def remove_values(img, random_chance=5):
noisy = []
for row in img:
new_row = []
for pix in row:
if random.choice(range(100)) <= random_chance:
new_val = 0 # changing this to be 0
new_row.append(new_val)
else:
new_row.append(pix)
noisy.append(new_row)
return np.array(noisy)
# slightly higher chance so we see more impact
some_hidden = remove_values(x_train[0], random_chance=15)
plt.imshow(some_hidden, cmap="gray")
plt.show()
ae_out = autoencoder.predict([some_hidden.reshape(-1, 28, 28)])
# predict is done on a vector, and returns a vector, even if its just 1 element, so we still need to grab the 0th
img = ae_out[0]
plt.imshow(ae_out[0], cmap="gray")
plt.show()
# slightly higher chance so we see more impact
some_hidden = remove_values(x_train[0], random_chance=35)
plt.imshow(some_hidden, cmap="gray")
plt.show()
ae_out = autoencoder.predict([some_hidden.reshape(-1, 28, 28)])
# predict is done on a vector, and returns a vector, even if its just 1 element, so we still need to grab the 0th
img = ae_out[0]
plt.imshow(ae_out[0], cmap="gray")
plt.show()
|
[
"shrinidhi99.varna@gmail.com"
] |
shrinidhi99.varna@gmail.com
|
c61ff42baf9127aaa9763d25b963025cd51cae7c
|
af3a6d84270f21fad514f62cbcd3a1e353a3e7b8
|
/utils.py
|
6cb7e4191064a09eb74188dc550b5af0a9452e09
|
[] |
no_license
|
shrish23/Telegram-newsBot
|
8690e45a6105032fb1a7b3a3992e7d61c79b556d
|
6f87f19dfbe792efe1b5a11e4be67746566685e6
|
refs/heads/master
| 2023-08-24T08:18:10.362281
| 2021-10-21T10:21:38
| 2021-10-21T10:21:38
| 419,597,964
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,294
|
py
|
import os
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "Client.json"
import dialogflow_v2 as dialogflow
dialogflow_session_client = dialogflow.SessionsClient()
PROJECT_ID = "newsbot-tuqv"
from gnewsclient import gnewsclient
client = gnewsclient.NewsClient()
def detect_intent_from_text(text, session_id, language_code='en'):
session = dialogflow_session_client.session_path(PROJECT_ID, session_id)
text_input = dialogflow.types.TextInput(text=text, language_code=language_code)
query_input = dialogflow.types.QueryInput(text=text_input)
response = dialogflow_session_client.detect_intent(session=session,query_input=query_input)
return response.query_result
def get_reply(query, chat_id):
response = detect_intent_from_text(query, chat_id)
if response.intent.display_name == 'get_news':
return "get_news", dict(response.parameters)
else:
return "small_talk", response.fulfillment_text
def fetch_news(parameters):
client.language = parameters.get('language')
client.location = parameters.get('geo-country')
client.topic = parameters.get('topic')
return client.get_news()[:5]
topics_keyboard = [
['Top Stories', 'World','Nation'],
['Business','Technology','Entertainment'],
['Sports','Science','Health']
]
|
[
"shrishsharma@outlook.com"
] |
shrishsharma@outlook.com
|
188cde0f74544b63ad4efac44b60ccebb54020fa
|
cabe9566267e24ee9ca827ccf47e75bfc1cd5021
|
/features/migrations/0002_features_status.py
|
07aa745905af3aef8a5e2b45fb0c0d8678873f50
|
[] |
no_license
|
Code-Institute-Submissions/ecommerce-project
|
0a73d98feeb7f667fa541f4f5181a818ffb347a7
|
1c35f760b4f82fc513a1c3f27005b03922c60020
|
refs/heads/master
| 2020-05-18T09:40:33.804158
| 2019-04-30T21:09:42
| 2019-04-30T21:09:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 456
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-04-17 13:03
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('features', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='features',
name='status',
field=models.CharField(default='Todo', max_length=254),
),
]
|
[
"noelle.browne@yahoo.ie"
] |
noelle.browne@yahoo.ie
|
695e2a94c90888c0a054c8a98f9c352950ce7682
|
187a4c23f446e1d82efe2fba2f935c32087a1922
|
/usbdeath.py
|
5725f4f32be649254379d0f0007dd1f0225ff0fc
|
[] |
no_license
|
semeion/usbdeath
|
f38730a23473525633a925360d2c1cf6716ca374
|
8b449a8866e15114562162c40f0cac11feea16c0
|
refs/heads/master
| 2021-01-22T00:28:46.744110
| 2014-06-02T19:16:00
| 2014-06-02T19:16:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 779
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Coded by Sam (info@sam3.se)
import subprocess, time
from os import system
usb_whitelist =[]
p = subprocess.Popen('lsusb', stdout=subprocess.PIPE)
while True:
line = p.stdout.readline()
if line != '':
usb_whitelist.append(line.strip('\n'))
else:
break
while True:
live_usbs = []
p = subprocess.Popen('lsusb', stdout=subprocess.PIPE)
intruder = True
while True:
line = p.stdout.readline()
if line != '':
live_usbs.append(line.strip('\n'))
else:
break
for usb in live_usbs:
if not usb in usb_whitelist:
system('echo 1 > /proc/sys/kernel/sysrq && echo o > /proc/sysrq-trigger')
time.sleep(1)
|
[
"info@sam3.se"
] |
info@sam3.se
|
3ea0bb442577424dd93a06877b4cb480971dc827
|
d7f4e330f5d803c8cd495729fd86da61b89565f3
|
/torch/_meta_registrations.py
|
0511b5188fbea63e9c0427f06428dc9859aa3885
|
[
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
] |
permissive
|
awf/pytorch
|
55ff84549c17579a1f62910ef2ac7b1dcd6fa897
|
0dceaf07cd1236859953b6f85a61dc4411d10f87
|
refs/heads/master
| 2023-02-08T13:19:22.073279
| 2023-01-29T10:36:40
| 2023-01-29T10:36:43
| 239,372,903
| 0
| 0
|
NOASSERTION
| 2020-02-09T20:55:23
| 2020-02-09T20:55:22
| null |
UTF-8
|
Python
| false
| false
| 82,649
|
py
|
import math
from typing import List, Optional, Union
import torch
import torch._prims_common as utils
from torch import Tensor
from torch._decomp import _add_op_to_registry, global_decomposition_table, meta_table
from torch._ops import OpOverload
from torch._prims import _elementwise_meta, ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND
from torch._prims_common import (
check,
corresponding_complex_dtype,
corresponding_real_dtype,
elementwise_dtypes,
ELEMENTWISE_TYPE_PROMOTION_KIND,
FloatLike,
IntLike,
make_contiguous_strides_for,
)
from torch._prims_common.wrappers import out_wrapper
from torch._refs import _broadcast_shapes
from torch._subclasses.fake_tensor import check_no_bool_index_tensors
from torch.utils._pytree import tree_map
aten = torch.ops.aten
_meta_lib_dont_use_me_use_register_meta = torch.library.Library("aten", "IMPL", "Meta")
def register_meta(op):
def wrapper(fn):
def register(op):
_add_op_to_registry(meta_table, op, fn)
tree_map(register, op)
return fn
return wrapper
def toRealValueType(dtype):
from_complex = {
torch.complex32: torch.half,
torch.cfloat: torch.float,
torch.cdouble: torch.double,
}
return from_complex.get(dtype, dtype)
@register_meta([aten._fft_c2c.default, aten._fft_c2c.out])
@out_wrapper()
def meta_fft_c2c(self, dim, normalization, forward):
assert self.dtype.is_complex
return self.new_empty(self.size())
@register_meta([aten._fft_r2c.default, aten._fft_r2c.out])
@out_wrapper()
def meta_fft_r2c(self, dim, normalization, onesided):
assert self.dtype.is_floating_point
output_sizes = list(self.size())
if onesided:
last_dim = dim[-1]
last_dim_halfsize = (output_sizes[last_dim] // 2) + 1
output_sizes[last_dim] = last_dim_halfsize
return self.new_empty(
output_sizes, dtype=utils.corresponding_complex_dtype(self.dtype)
)
@register_meta(aten.randperm.generator_out)
def meta_randperm(n, *, generator=None, out):
assert out.ndim == 1 and out.size(0) == n
return out
@register_meta(aten.randint.default)
def meta_randint(
high, size, *, dtype=torch.long, layout=None, device=None, pin_memory=None
):
return torch.empty(
size, dtype=dtype, layout=layout, device=device, pin_memory=pin_memory
)
@register_meta(aten.randint.low)
def meta_randint_low(
low, high, size, *, dtype=torch.long, layout=None, device=None, pin_memory=None
):
return torch.empty(
size, dtype=dtype, layout=layout, device=device, pin_memory=pin_memory
)
@register_meta(aten.rand.default)
def meta_rand_default(size, *, dtype=None, layout=None, device=None, pin_memory=None):
return torch.empty(
size, dtype=dtype, layout=layout, device=device, pin_memory=pin_memory
)
@register_meta([aten._fft_c2r.default, aten._fft_c2r.out])
@out_wrapper()
def meta_fft_c2r(self, dim, normalization, lastdim):
assert self.dtype.is_complex
output_sizes = list(self.size())
output_sizes[dim[-1]] = lastdim
return self.new_empty(output_sizes, dtype=toRealValueType(self.dtype))
@register_meta(aten.copy_.default)
def meta_copy_(self, src, non_blocking=False):
return self
def inferUnsqueezeGeometry(tensor, dim):
result_sizes = list(tensor.size())
result_strides = list(tensor.stride())
new_stride = 1 if dim >= tensor.dim() else result_sizes[dim] * result_strides[dim]
result_sizes.insert(dim, 1)
result_strides.insert(dim, new_stride)
return result_sizes, result_strides
@register_meta(aten.unsqueeze_.default)
def meta_unsqueeze_(self, dim):
dim = maybe_wrap_dim(dim, self.dim() + 1)
g_sizes, g_strides = inferUnsqueezeGeometry(self, dim)
self.as_strided_(g_sizes, g_strides)
return self
# Implementations below are taken from https://github.com/albanD/subclass_zoo/blob/main/python_meta_tensor.py
@register_meta(aten.index_select.default)
def meta_index_select(self, dim, index):
result_size = list(self.size())
if self.dim() > 0:
result_size[dim] = index.numel()
return self.new_empty(result_size)
@register_meta(aten.index_select.out)
def meta_index_select_out(self, dim, index, out):
torch._resize_output_(out, self.size(), self.device)
return out.copy_(torch.index_select(self, dim, index))
@register_meta([aten.max.default, aten.max.unary_out])
@out_wrapper()
def meta_max(self):
return self.new_empty(())
@register_meta(aten.max.dim)
def meta_max_dim(self, dim, keepdim=False):
dim = utils.reduction_dims(self.shape, (dim,))
output_shape = _compute_reduction_shape(self, dim, keepdim)
return (
self.new_empty(output_shape),
self.new_empty(output_shape, dtype=torch.long),
)
@register_meta([aten.min.default])
def meta_min(self):
return self.new_empty(())
@register_meta(aten.angle.default)
def meta_angle(self):
if self.is_complex():
result_dtype = corresponding_real_dtype(self.dtype)
else:
_, result_dtype = elementwise_dtypes(
self, type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT
)
return torch.empty_like(self, dtype=result_dtype)
@register_meta(aten.angle.out)
def meta_angle_out(self, out):
torch._resize_output_(out, self.size(), self.device)
return out.copy_(torch.angle(self))
# From aten/src/ATen/native/LinearAlgebraUtils.h
def squareCheckInputs(self: Tensor, f_name: str):
assert (
self.dim() >= 2
), f"{f_name}: The input tensor must have at least 2 dimensions."
assert self.size(-1) == self.size(
-2
), f"{f_name}: A must be batches of square matrices, but they are {self.size(-2)} by {self.size(-1)} matrices"
# From aten/src/ATen/native/LinearAlgebraUtils.h
def checkFloatingOrComplex(
t: Tensor, f_name: str, allow_low_precision_dtypes: bool = True
):
dtype = t.dtype
check(
t.is_floating_point() or t.is_complex(),
lambda: f"{f_name}, : Expected a floating point or complex tensor as input. Got , {dtype}",
)
if allow_low_precision_dtypes:
check(
dtype in (torch.float, torch.double, torch.cfloat, torch.cdouble),
lambda: f"{f_name} : Low precision dtypes not supported. Got {dtype}",
)
# From aten/src/ATen/native/LinearAlgebraUtils.h
def checkIsMatrix(A: Tensor, f_name: str, arg_name: str = "A"):
check(
A.dim() >= 2,
lambda: f"{f_name}: The input tensor {arg_name} must have at least 2 dimensions.",
)
def checkUplo(uplo: str):
uplo_uppercase = uplo.upper()
assert (
len(uplo) == 1 and uplo_uppercase == "U" or uplo_uppercase == "L"
), f"Expected UPLO argument to be 'L' or 'U', but got {uplo}"
# @register_meta(aten.linalg_eigh.default)
def meta_linalg_eigh(self, uplo="L"):
squareCheckInputs(self, "linalg_eigh")
checkUplo(uplo)
real_dtype = toRealValueType(self.dtype)
assert self.dim() >= 2
values = self.new_empty(self.shape, dtype=real_dtype)
values.transpose_(-2, -1)
vectors = self.new_empty(self.shape[:-1])
return (values, vectors)
# From aten/src/ATen/native/BatchLinearAlgebra.cpp
@register_meta(aten.linalg_cholesky_ex.default)
def linalg_cholesky_ex(A: Tensor, upper: bool = False, check_errors: bool = False):
squareCheckInputs(A, "linalg.cholesky")
checkFloatingOrComplex(A, "linalg.cholesky")
A_shape = A.shape
ndim = len(A_shape)
# L
L_strides = make_contiguous_strides_for(A_shape, False)
L = A.new_empty(A_shape)
L.as_strided_(A_shape, L_strides)
# infos
infos = A.new_empty(A_shape[0 : ndim - 2], dtype=torch.int32)
return L, infos
# From aten/src/ATen/native/BatchLinearAlgebra.cpp
@register_meta(aten.linalg_inv_ex.default)
def linalg_inv_ex_meta(A: Tensor, check_errors: bool = False):
squareCheckInputs(A, "linalg.inv_ex")
checkFloatingOrComplex(A, "linalg.inv_ex", allow_low_precision_dtypes=False)
L = A.new_empty(A.shape)
L.as_strided_(A.shape, make_contiguous_strides_for(A.shape, row_major=False))
infos = A.new_empty(A.shape[:-2], dtype=torch.int32)
return L, infos
# From aten/src/ATen/native/BatchLinearAlgebra.cpp
# NOTE: matching defaults in aten/src/ATen/native/native_functions.yaml
@register_meta(aten._linalg_svd.default)
def _linalg_svd_meta(
A: Tensor, full_matrices: bool = False, compute_uv: bool = True, driver: str = None
):
checkIsMatrix(A, "linalg.svd")
checkFloatingOrComplex(A, "linalg.svd")
batch_dims = list(A.shape[:-2])
m = A.shape[-2]
n = A.shape[-1]
k = min(m, n)
if compute_uv:
U_shape = batch_dims + [m, m if full_matrices else k]
U = A.new_empty(U_shape)
U.as_strided_(U_shape, make_contiguous_strides_for(U_shape, row_major=False))
V_shape = batch_dims + [n if full_matrices else k, n]
V = A.new_empty(V_shape)
# TODO: need to distinguish cuSOLVER case? (see original code)
V.as_strided_(V_shape, make_contiguous_strides_for(V_shape, row_major=False))
else:
# doesn't matter
U = A.new_empty([0])
V = A.new_empty([0])
# S is always real, even when A is complex.
S = A.new_empty(batch_dims + [k], dtype=toRealValueType(A.dtype))
return U, S, V
# From aten/src/ATen/native/LinearAlgebra.cpp
@register_meta(aten._linalg_det.default)
def _linalg_det_meta(A):
squareCheckInputs(A, "linalg.det")
checkFloatingOrComplex(A, "linalg.det")
det = A.new_empty(A.shape[:-2])
LU = A.new_empty(A.shape)
LU.as_strided_(A.shape, make_contiguous_strides_for(A.shape, row_major=False))
pivots = A.new_empty(A.shape[:-1], dtype=torch.int32)
return det, LU, pivots
# From aten/src/ATen/native/ReflectionPad.cpp
@register_meta(
[aten.reflection_pad2d_backward.default, aten.replication_pad2d_backward.default]
)
def meta_pad2d_backward(grad_output, self, padding):
dim_w = 2
dim_h = 1
dim_plane = 0
nbatch = 1
self_shape = self.shape
if self.dim() == 4:
nbatch = self_shape[0]
dim_w += 1
dim_h += 1
dim_plane += 1
pad_l = padding[0]
pad_r = padding[1]
pad_t = padding[2]
pad_b = padding[3]
nplane = self_shape[dim_plane]
input_h = self_shape[dim_h]
input_w = self_shape[dim_w]
output_h = input_h + pad_t + pad_b
output_w = input_w + pad_l + pad_r
check(
output_w == grad_output.shape[dim_w],
lambda: f"gradOutput width unexpected. Expected: {output_w}, Got: {grad_output.shape[dim_w]}",
)
check(
output_h == grad_output.shape[dim_h],
lambda: f"gradOutput height unexpected. Expected: {output_h}, Got: {grad_output.shape[dim_h]}",
)
return self.new_empty(self.shape)
@register_meta(aten.reflection_pad2d.default)
def meta_pad2d(self, padding):
valid_dims = self.size(1) != 0 and self.size(2) != 0
check(
(self.ndim == 3 and valid_dims)
or (self.ndim == 4 and valid_dims and self.size(3) != 0),
lambda: f"3D or 4D (batch mode) tensor expected for input, but got: {self}",
)
if self.ndim == 4:
nbatch, nplane, input_h, input_w = self.shape
else:
nbatch = 1
nplane, input_h, input_w = self.shape
pad_l, pad_r, pad_t, pad_b = padding
output_h = input_h + pad_t + pad_b
output_w = input_w + pad_l + pad_r
if self.ndim == 3:
return self.new_empty((nplane, output_h, output_w))
else:
return self.new_empty((nbatch, nplane, output_h, output_w))
@register_meta([aten.bernoulli.default, aten.bernoulli.out])
@out_wrapper()
def meta_bernoulli(self, *, generator=None):
# https://github.com/pytorch/pytorch/issues/88612
return torch.empty_like(self).contiguous()
@register_meta(aten.bernoulli_.float)
def meta_bernoulli_(self, p=0.5, generator=None):
return self
@register_meta(aten.bernoulli.p)
def meta_bernoulli_p(self, p=0.5, generator=None):
# https://github.com/pytorch/pytorch/issues/88612
return torch.empty_like(self).contiguous()
@register_meta(aten._fused_moving_avg_obs_fq_helper.default)
def meta__fused_moving_avg_obs_fq_helper(
self,
observer_on,
fake_quant_on,
running_min,
running_max,
scale,
zero_point,
averaging_const,
quant_min,
quant_max,
ch_axis,
per_row_fake_quant=False,
symmetric_quant=False,
):
check(
ch_axis < self.dim(),
lambda: "Error in fused_moving_avg_obs_fake_quant_cpu: ch_axis must be < self.dim()",
)
mask = torch.empty_like(self, dtype=torch.bool)
return (torch.empty_like(self), mask)
def dot_check(self, other):
check(
self.dim() == 1 and other.dim() == 1,
lambda: f"1D tensors expected, but got {self.dim()}D and {other.dim()}D tensors",
)
@register_meta(aten.dot.default)
def meta_dot(self, tensor):
dot_check(self, tensor)
return self.new_empty(())
@register_meta([aten.mm.default])
def meta_mm(a, b):
check(a.dim() == 2, lambda: "a must be 2D")
check(b.dim() == 2, lambda: "b must be 2D")
N, M1 = a.shape
M2, P = b.shape
check(M1 == M2, lambda: "a and b must have same reduction dim")
return a.new_empty(N, P)
def _compute_reduction_shape(self, dims, keepdim):
if keepdim:
return tuple(self.shape[i] if i not in dims else 1 for i in range(self.ndim))
return utils.compute_reduction_output_shape(self.shape, dims)
# FakeTensors (meta tensors with a device) will report device as meta
# when running meta kernels. Here, access the "fake device" of FakeTensor if it
# exists so meta kernels which have diverge per device will be more
# accurate when run with FakeTensors
def device_hint(tensor) -> "str":
if isinstance(tensor, torch._subclasses.FakeTensor):
return tensor.fake_device.type
else:
return "cuda" # default to cuda
def calc_conv_nd_return_shape(
input_tensor: torch.Tensor,
weight: torch.Tensor,
stride: Union[List[int], int],
padding: Union[List[int], int],
dilation: Union[List[int], int],
is_transposed: bool,
groups: int,
output_padding: Optional[Union[List[int], int]] = None,
):
def _formula(ln: int, p: int, d: int, k: int, s: int) -> int:
"""
Formula to apply to calculate the length of some dimension of the output
See: https://pytorch.org/docs/stable/generated/torch.nn.Conv2d.html
Args:
ln: length of the dimension
p: padding in that dim
d: dilation in that dim
k: kernel size in that dim
s: stride in that dim
Returns:
The output length
"""
return (ln + 2 * p - d * (k - 1) - 1) // s + 1
def _formula_transposed(ln: int, p: int, d: int, k: int, s: int, op: int) -> int:
"""
Formula to apply to calculate the length of some dimension of the output
if transposed convolution is used.
See: https://pytorch.org/docs/stable/generated/torch.nn.ConvTranspose2d.html
Args:
ln: length of the dimension
p: padding in that dim
d: dilation in that dim
k: kernel size in that dim
s: stride in that dim
op: output padding in that dim
Returns:
The output length
"""
return (ln - 1) * s - 2 * p + d * (k - 1) + op + 1
kernel_size = weight.shape[2:]
dims = input_tensor.shape[2:]
if is_transposed:
out_channels = groups * weight.shape[1]
else:
out_channels = weight.shape[0]
if weight.shape[1] * groups != input_tensor.shape[1]:
raise RuntimeError("Invalid channel dimensions")
ret_shape = [input_tensor.shape[0], out_channels]
if isinstance(stride, IntLike):
stride = [stride] * len(dims)
elif len(stride) == 1:
stride = [stride[0]] * len(dims)
if isinstance(padding, IntLike):
padding = [padding] * len(dims)
elif len(padding) == 1:
padding = [padding[0]] * len(dims)
if isinstance(dilation, IntLike):
dilation = [dilation] * len(dims)
elif len(dilation) == 1:
dilation = [dilation[0]] * len(dims)
output_padding_list: Optional[List[int]] = None
if output_padding:
if isinstance(output_padding, IntLike):
output_padding_list = [output_padding] * len(dims)
elif len(output_padding) == 1:
output_padding_list = [output_padding[0]] * len(dims)
else:
output_padding_list = output_padding
for i in range(len(dims)):
# If output_padding is present, we are dealing with a transposed convolution
if output_padding_list:
ret_shape.append(
_formula_transposed(
dims[i],
padding[i],
dilation[i],
kernel_size[i],
stride[i],
output_padding_list[i],
)
)
else:
ret_shape.append(
_formula(dims[i], padding[i], dilation[i], kernel_size[i], stride[i])
)
return ret_shape
def is_channels_last(ten):
return torch._prims_common.suggest_memory_format(ten) == torch.channels_last
@register_meta(aten.convolution.default)
def meta_conv(
input_tensor: torch.Tensor,
weight: torch.Tensor,
bias: torch.Tensor,
stride: List[int],
padding: List[int],
dilation: List[int],
is_transposed: bool,
output_padding: List[int],
groups: int,
):
def pick_memory_format():
if device_hint(input_tensor) == "cuda":
if is_channels_last(input_tensor) or is_channels_last(weight):
return torch.channels_last
else:
if is_channels_last(input_tensor):
return torch.channels_last
if input_tensor.is_contiguous(memory_format=torch.contiguous_format):
return torch.contiguous_format
elif input_tensor.is_contiguous(memory_format=torch.preserve_format):
return torch.preserve_format
shape_out = calc_conv_nd_return_shape(
input_tensor,
weight,
stride,
padding,
dilation,
is_transposed,
groups,
output_padding if is_transposed else None,
)
out = input_tensor.new_empty(shape_out)
out = out.to(memory_format=pick_memory_format()) # type: ignore[call-overload]
return out
if torch._C.has_mkldnn:
_meta_lib_dont_use_me_use_register_meta_for_mkldnn = torch.library.Library(
"mkldnn", "IMPL", "Meta"
)
def pick_mkldnn_conv_memory_format(input_tensor, weight):
if weight.is_mkldnn:
return torch.channels_last
if is_channels_last(input_tensor) or is_channels_last(weight):
return torch.channels_last
if input_tensor.is_contiguous(memory_format=torch.contiguous_format):
return torch.contiguous_format
elif input_tensor.is_contiguous(memory_format=torch.preserve_format):
return torch.preserve_format
@register_meta(torch.ops.mkldnn._convolution_pointwise.default)
def meta_mkldnn_convolution_default(
input_tensor,
weight,
bias,
padding,
stride,
dilation,
groups,
attr,
scalars,
algorithm,
):
shape_out = calc_conv_nd_return_shape(
input_tensor, weight, stride, padding, dilation, False, groups, []
)
out = input_tensor.new_empty(shape_out)
out_memory_format = torch.channels_last
out = out.to(memory_format=out_memory_format) # type: ignore[call-overload]
return out
@register_meta(torch.ops.mkldnn._convolution_pointwise.binary)
def meta_mkldnn_convolution_binary(
input_tensor,
other,
weight,
bias,
padding,
stride,
dilation,
groups,
binary_attr,
alpha,
unary_attr,
unary_scalars,
unary_algorithm,
):
out = input_tensor.new_empty(other.size())
out = out.to(memory_format=torch.channels_last) # type: ignore[call-overload]
return out
@register_meta(torch.ops.mkldnn._convolution_pointwise_.binary)
def meta_mkldnn_convolution_binary_inplace(
input_tensor,
other,
weight,
bias,
padding,
stride,
dilation,
groups,
binary_attr,
alpha,
unary_attr,
unary_scalars,
unary_algorithm,
):
return other
@register_meta(torch.ops.mkldnn._linear_pointwise.default)
def meta_linear_pointwise_default(
input_tensor, weight, bias, attr, scalars, algorithm
):
return input_tensor.new_empty((*input_tensor.shape[:-1], weight.shape[0]))
@register_meta(torch.ops.mkldnn._linear_pointwise.binary)
def meta_linear_pointwise_binary(input_tensor, other, weight, bias, attr):
out = input_tensor.new_empty(other.size())
return out
if torch._C.has_mkl:
_meta_lib_dont_use_me_use_register_meta_for_mkl = torch.library.Library(
"mkl", "IMPL", "Meta"
)
@register_meta(torch.ops.mkl._mkl_linear)
def meta_mkl_linear(
input_tensor,
packed_weight,
orig_weight,
bias,
batch_size,
):
return input_tensor.new_empty(
(*input_tensor.shape[:-1], orig_weight.shape[0])
)
# from check_dim_size() in aten/src/ATen/TensorUtils.cpp.
def check_dim_size(tensor, dim, dim_size, size):
check(
tensor.dim() == dim and tensor.shape[dim_size] == size,
lambda: f"Expected a tensor of dimension {dim} and tensor.size[{dim_size}] == {size}, "
+ f"but got : dimension {tensor.dim()} and tensor.size[{dim_size}] = {tensor.shape[dim_size]}",
)
@register_meta(aten.avg_pool2d.default)
def meta_avg_pool2d(
input,
kernel_size,
stride=(),
padding=(0,),
ceil_mode=False,
count_include_pad=True,
divisor_override=None,
):
def unpack(name, val):
check(
len(val) in [1, 2],
lambda: f"avg_pool2d: {name} must either be a single int, or a tuple of two ints",
)
H = val[0]
W = H if len(val) == 1 else val[1]
return H, W
kH, kW = unpack("kernel_size", kernel_size)
check(
len(stride) in [0, 1, 2],
lambda: "avg_pool2d: stride must either be omitted, a single int, or a tuple of two ints",
)
if len(stride) == 0:
dH, dW = kH, kW
elif len(stride) == 1:
dH, dW = stride[0], stride[0]
else:
dH, dW = unpack("stride", stride)
padH, padW = unpack("padding", padding)
check(
divisor_override is None or divisor_override != 0,
lambda: "divisor must be not zero",
)
nbatch = input.size(-4) if input.dim() == 4 else 1
nInputPlane = input.size(-3)
inputHeight = input.size(-2)
inputWidth = input.size(-1)
outputHeight = pooling_output_shape(inputHeight, kH, padH, dH, 1, ceil_mode)
outputWidth = pooling_output_shape(inputWidth, kW, padW, dW, 1, ceil_mode)
memory_format = utils.suggest_memory_format(input)
pool2d_shape_check(
input,
kH,
kW,
dH,
dW,
padH,
padW,
1,
1,
nInputPlane,
inputHeight,
inputWidth,
outputHeight,
outputWidth,
memory_format,
)
if input.dim() == 3:
size = [nInputPlane, outputHeight, outputWidth]
else:
size = [nbatch, nInputPlane, outputHeight, outputWidth]
return torch.empty(
size, dtype=input.dtype, device=input.device, memory_format=memory_format
)
# from avg_pool2d_backward_shape_check() in aten/src/ATen/native/Pool.h.
def avg_pool2d_backward_shape_check(
input,
gradOutput,
nbatch,
kH,
kW,
dH,
dW,
padH,
padW,
nInputPlane,
inputHeight,
inputWidth,
outputHeight,
outputWidth,
mem_format,
):
pool2d_shape_check(
input,
kH,
kW,
dH,
dW,
padH,
padW,
1,
1,
nInputPlane,
inputHeight,
inputWidth,
outputHeight,
outputWidth,
mem_format,
)
ndim = input.dim()
nOutputPlane = nInputPlane
check_dim_size(gradOutput, ndim, ndim - 3, nOutputPlane)
check_dim_size(gradOutput, ndim, ndim - 2, outputHeight)
check_dim_size(gradOutput, ndim, ndim - 1, outputWidth)
# Don't override the C++ registration.
@register_meta(aten.avg_pool2d_backward.default)
def meta_avg_pool2d_backward(
gradOutput_,
input,
kernel_size,
stride,
padding,
ceil_mode,
count_include_pad,
divisor_override,
):
# From aten/src/ATen/native/AveragePool2d.cpp structured kernel meta func.
check(
len(kernel_size) == 1 or len(kernel_size) == 2,
lambda: "avg_pool2d: kernel_size must either be a single int, or a tuple of two ints",
)
kH = kernel_size[0]
kW = kH if len(kernel_size) == 1 else kernel_size[1]
check(
len(stride) == 0 or len(stride) == 1 or len(stride) == 2,
lambda: "avg_pool2d: stride must either be omitted, a single int, or a tuple of two ints",
)
dH = kH if len(stride) == 0 else stride[0]
dW = kW if len(stride) == 0 else dH if len(stride) == 1 else stride[1]
check(
len(padding) == 1 or len(padding) == 2,
lambda: "avg_pool2d: padding must either be a single int, or a tuple of two ints",
)
padH = padding[0]
padW = padH if len(padding) == 1 else padding[1]
check(
divisor_override is None or divisor_override != 0,
lambda: "divisor must be not zero",
)
input_size = input.shape
nbatch = input_size[-4] if input.dim() == 4 else 1
nInputPlane = input_size[-3]
inputHeight = input_size[-2]
inputWidth = input_size[-1]
outputHeight = pooling_output_shape(inputHeight, kH, padH, dH, 1, ceil_mode)
outputWidth = pooling_output_shape(inputWidth, kW, padW, dW, 1, ceil_mode)
mem_format = utils.suggest_memory_format(input)
avg_pool2d_backward_shape_check(
input,
gradOutput_,
nbatch,
kH,
kW,
dH,
dW,
padH,
padW,
nInputPlane,
inputHeight,
inputWidth,
outputHeight,
outputWidth,
mem_format,
)
return torch.empty(
input_size, dtype=input.dtype, device=input.device, memory_format=mem_format
)
@register_meta(aten._adaptive_avg_pool2d.default)
def meta_adaptive_avg_pool2d(self, output_size):
check(
self.ndim == 3 or self.ndim == 4,
lambda: f"Expected 3D or 4D tensor, but got {self.shape}",
)
output_shape = self.shape[:-2] + tuple(output_size)
memory_format = utils.suggest_memory_format(self)
# need to set memory_format to preserve the memory format of the input
# channel last input should have channel last output
return torch.empty(
output_shape, dtype=self.dtype, device=self.device, memory_format=memory_format
)
@register_meta(aten._adaptive_avg_pool3d.default)
def meta_adaptive_avg_pool3d(self, output_size):
check(
self.ndim == 4 or self.ndim == 5,
lambda: f"Expected 4D or 5D tensor, but got {self.shape}",
)
return self.new_empty(self.shape[:-3] + tuple(output_size))
@register_meta(aten._adaptive_avg_pool2d_backward.default)
def meta__adaptive_avg_pool2d_backward(grad_out, self):
ndim = grad_out.ndim
for i in range(1, ndim):
check(
grad_out.size(i) > 0,
lambda: f"adaptive_avg_pool2d_backward(): Expected grad_output to have non-zero \
size for non-batch dimensions, {grad_out.shape} with dimension {i} being empty",
)
check(
ndim == 3 or ndim == 4,
lambda: f"adaptive_avg_pool2d_backward(): Expected 3D or 4D tensor, but got {self.shape}",
)
check(
self.dtype == grad_out.dtype,
lambda: f"expected dtype {self.dtype} for `grad_output` but got dtype {grad_out.dtype}",
)
return self.new_empty(self.shape)
@register_meta(aten.repeat_interleave.Tensor)
def meta_repeat_interleave_Tensor(repeats, output_size=None):
if output_size is None:
raise RuntimeError("cannot repeat_interleave a meta tensor without output_size")
return repeats.new_empty(output_size)
@register_meta([aten.complex.default, aten.complex.out])
@out_wrapper()
def meta_complex(real, imag):
assert real.dtype.is_floating_point
assert imag.dtype.is_floating_point
out_shape = _broadcast_shapes(real.shape, imag.shape)
return real.new_empty(out_shape, dtype=corresponding_complex_dtype(real.dtype))
@register_meta(aten.vdot.default)
def vdot(self, other):
if not self.is_complex:
return torch.dot(self, other)
if self.is_conj():
if other.is_conj():
return torch.vdot(other.conj(), self.conj())
else:
return torch.dot(self.conj(), other)
elif other.is_conj():
return torch.dot(self, other.conj()).conj()
dot_check(self, other)
return self.new_empty(())
# Leaving this function around because a python implementation
# of indexing shape inference is useful,
# but not registering it to the dispatcher because we already
# get shape inference through structured kernels
@register_meta(aten.index.Tensor)
def meta_index_Tensor(self, indices):
check_no_bool_index_tensors(aten.index.Tensor, self, indices)
check(indices, lambda: "at least one index must be provided")
# aten::index is the internal advanced indexing implementation
# checkIndexTensorTypes and expandTensors
result: List[Optional[Tensor]] = []
for i, index in enumerate(indices):
if index is not None:
check(
index.dtype in [torch.long, torch.int, torch.int8, torch.bool],
lambda: "tensors used as indices must be long, int, byte or bool tensors",
)
if index.dtype in [torch.int8, torch.bool]:
nonzero = index.nonzero()
k = len(result)
check(
k + index.ndim <= self.ndim,
lambda: f"too many indices for tensor of dimension {self.ndim}",
IndexError,
)
for j in range(index.ndim):
check(
index.shape[j] == self.shape[k + j],
lambda: f"The shape of the mask {index.shape} at index {i} "
f"does not match the shape of the indexed tensor {self.shape} at index {k + j}",
IndexError,
)
result.append(nonzero.select(1, j))
else:
result.append(index)
else:
result.append(index)
indices = result
check(
len(indices) <= self.ndim,
lambda: f"too many indices for tensor of dimension {self.ndim} (got {len(indices)})",
)
# expand_outplace
import torch._refs as refs # avoid import cycle in mypy
indices = list(refs._maybe_broadcast(*indices))
# add missing null tensors
while len(indices) < self.ndim:
indices.append(None)
# hasContiguousSubspace
# true if all non-null tensors are adjacent
# See:
# https://numpy.org/doc/stable/user/basics.indexing.html#combining-advanced-and-basic-indexing
# https://stackoverflow.com/questions/53841497/why-does-numpy-mixed-basic-advanced-indexing-depend-on-slice-adjacency
state = 0
has_contiguous_subspace = False
for index in indices:
if state == 0:
if index is not None:
state = 1
elif state == 1:
if index is None:
state = 2
else:
if index is not None:
break
else:
has_contiguous_subspace = True
# transposeToFront
# This is the logic that causes the newly inserted dimensions to show up
# at the beginning of the tensor, if they're not contiguous
if not has_contiguous_subspace:
dims = []
transposed_indices = []
for i, index in enumerate(indices):
if index is not None:
dims.append(i)
transposed_indices.append(index)
for i, index in enumerate(indices):
if index is None:
dims.append(i)
transposed_indices.append(index)
self = self.permute(dims)
indices = transposed_indices
# AdvancedIndex::AdvancedIndex
# Now we can assume the indices have contiguous subspace
# This is simplified from AdvancedIndex which goes to more effort
# to put the input and indices in a form so that TensorIterator can
# take them. If we write a ref for this, probably that logic should
# get implemented
before_shape: List[int] = []
after_shape: List[int] = []
replacement_shape: List[int] = []
for dim, index in enumerate(indices):
if index is None:
if replacement_shape:
after_shape.append(self.shape[dim])
else:
before_shape.append(self.shape[dim])
else:
replacement_shape = list(index.shape)
return self.new_empty(before_shape + replacement_shape + after_shape)
@register_meta([aten.convolution_backward.default])
def meta_convolution_backward(
grad_output_,
input_,
weight_,
bias_sizes_opt,
stride,
padding,
dilation,
transposed,
output_padding,
groups,
output_mask,
):
# High level logic taken from slow_conv3d_backward_cpu which should
# be representative of all convolution_backward impls
backend_grad_input = None
backend_grad_weight = None
backend_grad_bias = None
if output_mask[0]:
backend_grad_input = grad_output_.new_empty(input_.size())
if output_mask[1]:
backend_grad_weight = grad_output_.new_empty(weight_.size())
if output_mask[2]:
backend_grad_bias = grad_output_.new_empty(bias_sizes_opt)
return (backend_grad_input, backend_grad_weight, backend_grad_bias)
@register_meta([aten.addbmm.default, aten.addbmm.out])
@out_wrapper()
def meta_addbmm(self, batch1, batch2, *, beta=1, alpha=1):
dim1 = batch1.size(1)
dim2 = batch2.size(2)
self = self.expand((dim1, dim2))
check(batch1.dim() == 3, lambda: "batch1 must be a 3D tensor")
check(batch2.dim() == 3, lambda: "batch2 must be a 3D tensor")
check(
batch1.size(0) == batch2.size(0),
lambda: f"batch1 and batch2 must have same number of batches, got {batch1.size(0)} and {batch2.size(0)}",
)
check(
batch1.size(2) == batch2.size(1),
lambda: (
f"Incompatible matrix sizes for bmm ({batch1.size(1)}x{batch1.size(2)} "
f"and {batch2.size(1)}x{batch2.size(2)})"
),
)
check(
self.size(0) == dim1 and self.size(1) == dim2,
lambda: "self tensor does not match matmul output shape",
)
return self.new_empty(self.size())
@register_meta(aten._cdist_forward.default)
def meta_cdist_forward(x1, x2, p, compute_mode):
check(
x1.dim() >= 2,
lambda: f"cdist only supports at least 2D tensors, X1 got: {x1.dim()}D",
)
check(
x2.dim() >= 2,
lambda: f"cdist only supports at least 2D tensors, X2 got: {x2.dim()}D",
)
check(
x1.size(-1) == x2.size(-1),
lambda: f"X1 and X2 must have the same number of columns. X1: {x1.size(-1)} X2: {x2.size(-1)}",
)
check(
utils.is_float_dtype(x1.dtype),
lambda: "cdist only supports floating-point dtypes, X1 got: {x1.dtype}",
)
check(
utils.is_float_dtype(x2.dtype),
lambda: "cdist only supports floating-point dtypes, X2 got: {x2.dtype}",
)
check(p >= 0, lambda: "cdist only supports non-negative p values")
check(
compute_mode in (None, 1, 2),
lambda: f"possible modes: None, 1, 2, but was: {compute_mode}",
)
r1 = x1.size(-2)
r2 = x2.size(-2)
batch_tensor1 = x1.shape[:-2]
batch_tensor2 = x2.shape[:-2]
output_shape = list(torch.broadcast_shapes(batch_tensor1, batch_tensor2))
output_shape.extend([r1, r2])
return x1.new_empty(output_shape)
@register_meta(aten._embedding_bag.default)
def meta_embedding_bag(
weight,
indices,
offsets,
scale_grad_by_freq=False,
mode=0,
sparse=False,
per_sample_weights=None,
include_last_offset=False,
padding_idx=-1,
):
check(
indices.dtype in (torch.long, torch.int),
lambda: f"expected indices to be long or int, got {indices.dtype}",
)
check(
offsets.dtype in (torch.long, torch.int),
lambda: f"expected offsets to be long or int, got {offsets.dtype}",
)
check(
utils.is_float_dtype(weight.dtype),
lambda: f"expected weight to be floating point type, got {weight.dtype}",
)
num_bags = offsets.size(0)
if include_last_offset:
check(
num_bags >= 1, lambda: "include_last_offset: numBags should be at least 1"
)
num_bags -= 1
output = weight.new_empty(num_bags, weight.size(1))
MODE_SUM, MODE_MEAN, MODE_MAX = range(3)
if per_sample_weights is not None:
check(
mode == MODE_SUM,
lambda: "embedding_bag: per_sample_weights only supported with mode='sum'",
)
check(
per_sample_weights.dtype == weight.dtype,
lambda: f"expected weight ({weight.dtype}) and per_sample_weights ({per_sample_weights.dtype}) to have same dtype",
)
check(
per_sample_weights.ndim == 1,
lambda: f"expected per_sample_weights to be 1D tensor, got {per_sample_weights.ndim}D",
)
check(
per_sample_weights.numel() == indices.numel(),
lambda: (
f"expected per_sample_weights.numel() ({per_sample_weights.numel()} "
f"to be the same as indices.numel() ({indices.numel()})"
),
)
def is_fast_path_index_select_scale(src, scale, output, padding_idx):
return (
is_fast_path_index_select(src, output, padding_idx) and scale.stride(0) == 1
)
def is_fast_path_index_select(src, output, padding_idx):
return (
(src.dtype == torch.float or src.dtype == torch.half)
and src.stride(1) == 1
and output.stride(1) == 1
and padding_idx < 0
)
def is_fast_path(src, scale, output, padding_idx):
if scale is not None:
return is_fast_path_index_select_scale(src, scale, output, padding_idx)
else:
return is_fast_path_index_select(src, output, padding_idx)
if device_hint(offsets) != "cpu":
offset2bag = indices.new_empty(indices.size(0))
bag_size = indices.new_empty(offsets.size())
if mode == MODE_MAX:
max_indices = indices.new_empty(num_bags, weight.size(1))
else:
max_indices = indices.new_empty(0)
else:
fast_path_sum = is_fast_path(weight, per_sample_weights, output, padding_idx)
if mode == MODE_MEAN or mode == MODE_MAX or not fast_path_sum:
offset2bag = offsets.new_empty(indices.size(0))
else:
offset2bag = offsets.new_empty(0)
bag_size = offsets.new_empty(num_bags)
# This part of the logic comes from make_max_indices_out in EmbeddingBag.cpp
numBags = offsets.shape[0]
if mode == MODE_MAX:
if include_last_offset:
check(
numBags >= 1,
lambda: "include_last_offset: numBags should be at least 1",
)
numBags -= 1
max_indices = offsets.new_empty(numBags, weight.shape[1])
else:
max_indices = offsets.new_empty(bag_size.size())
return output, offset2bag, bag_size, max_indices
@register_meta(aten._embedding_bag_forward_only.default)
def meta_embedding_bag_forward_only(weight, indices, offsets, *args):
output, offset2bag, bag_size, max_indices = meta_embedding_bag(
weight, indices, offsets, *args
)
if device_hint(offsets) == "cpu":
bag_size = offsets.new_empty(offsets.size())
return output, offset2bag, bag_size, max_indices
def _get_reduction_dtype(input, dtype, promote_int_to_long=True):
# if specified, dtype takes precedence
if dtype:
return dtype
if input.dtype.is_floating_point or input.dtype.is_complex:
return input.dtype
elif promote_int_to_long:
return torch.long
return input.dtype
@register_meta([aten.nansum.default, aten.nansum.out])
@out_wrapper()
def meta_nansum(input, dims=None, keepdim=False, *, dtype=None):
output_dtype = _get_reduction_dtype(input, dtype, promote_int_to_long=True)
dims = utils.reduction_dims(input.shape, dims)
output_shape = _compute_reduction_shape(input, dims, keepdim)
return input.new_empty(output_shape, dtype=output_dtype)
@register_meta(aten.nanmedian.default)
def meta_nanmedian(input):
output_shape = utils.compute_reduction_output_shape(
input.shape, tuple(range(input.dim()))
)
return input.new_empty(output_shape)
@register_meta([aten.nanmedian.dim, aten.nanmedian.dim_values])
@out_wrapper("values", "indices")
def meta_nanmedian_dim(input, dim=-1, keepdim=False):
dim = utils.reduction_dims(input.shape, (dim,))
output_shape = _compute_reduction_shape(input, dim, keepdim)
return (
input.new_empty(output_shape),
input.new_empty(output_shape, dtype=torch.long),
)
@register_meta(aten.logical_not_.default)
def meta_logical_not_(self):
return self
@register_meta(aten.repeat.default)
def meta_repeat(self, repeats):
check(
len(repeats) >= self.dim(),
lambda: "Number of dimensions of repeat dims can not be smaller than number of dimensions of tensor",
)
# Add new leading dimensions to the tensor if the
# number of target dimensions is larger than the
# number of source dimensions.
num_new_dimensions = len(repeats) - self.dim()
padded_size = (1,) * num_new_dimensions + tuple(self.shape)
target_size = [padded_size[i] * repeats[i] for i in range(len(repeats))]
return self.new_empty(target_size)
@register_meta(aten.zero_.default)
def meta_zero_(self):
return self
@register_meta(
[
aten.mul_.Scalar,
aten.div_.Scalar,
aten.mul_.Tensor,
aten.div_.Tensor,
aten.logical_and_.default,
aten.logical_or_.default,
aten.logical_xor_.default,
],
)
def meta_binop_inplace(self, other):
return self
@register_meta(
[
aten.add_.Scalar,
aten.sub_.Scalar,
aten.add_.Tensor,
aten.sub_.Tensor,
],
)
def meta_binop_inplace_alpha(self, other, alpha=1):
return self
@register_meta([aten.round.default, aten.round.decimals])
def meta_round(self, **kwargs):
return _elementwise_meta(
self, type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT
)
@register_meta(aten.zero.default)
def meta_zero(self):
return self.new_empty(self.shape)
@register_meta([aten.fill_.Tensor, aten.fill_.Scalar])
def meta_fill_(self, val):
return self
@register_meta([aten.fill.Tensor, aten.fill.Scalar])
def meta_fill(self, val):
return torch.empty_like(self)
@register_meta(aten.relu_.default)
def meta_relu_(self):
return self
@register_meta(aten.index_put.default)
def meta_index_put(self, indices, values, accumulate=False):
return torch.empty_like(self)
@register_meta(aten.masked_fill_.Scalar)
def meta_masked_fill_(self, mask, value):
return self
@register_meta(aten.index_put_.default)
def meta_index_put_(self, indices, values, accumulate=False):
return self
@register_meta(aten.alias.default)
def meta_alias(self):
return self.view(self.shape)
def common_meta_baddbmm_bmm(batch1, batch2, is_bmm, self_baddbmm=None):
check(batch1.dim() == 3, lambda: "batch1 must be a 3D tensor")
check(batch2.dim() == 3, lambda: "batch2 must be a 3D tensor")
batch1_sizes = batch1.size()
batch2_sizes = batch2.size()
bs = batch1_sizes[0]
contraction_size = batch1_sizes[2]
res_rows = batch1_sizes[1]
res_cols = batch2_sizes[2]
output_size = (bs, res_rows, res_cols)
check(
batch2_sizes[0] == bs and batch2_sizes[1] == contraction_size,
lambda: f"Expected size for first two dimensions of batch2 tensor to be: [{bs}"
f", {contraction_size}] but got: [{batch2_sizes[0]}, {batch2_sizes[1]}].",
)
# TODO: handle out
output = batch2.new_empty(output_size)
if not is_bmm and self_baddbmm is not None:
check(self_baddbmm.dim() == 3, lambda: "self must be a 3D tensor")
check(
self_baddbmm.size() == output_size,
lambda: "Expected an input tensor shape with shape {output_size} but got shape: {self.size()}",
)
return output
@register_meta(aten.bmm.default)
def meta_bmm(self, mat2):
return common_meta_baddbmm_bmm(self, mat2, True)
def div_rtn(x, y):
q = x // y
r = x % y
# WARNING: explicit bool conversion here is necessary;
# would be fixed by SymBool
if r != 0 and (bool(r < 0) != bool(y < 0)):
q -= 1
return q
def pooling_output_shape_pad_lr(
inputSize, kernelSize, pad_l, pad_r, stride, dilation, ceil_mode
):
outputSize = (
div_rtn(
inputSize
+ pad_l
+ pad_r
- dilation * (kernelSize - 1)
- 1
+ (stride - 1 if ceil_mode else 0),
stride,
)
+ 1
)
if ceil_mode:
if (outputSize - 1) * stride >= inputSize + pad_l:
outputSize -= 1
return outputSize
def pooling_output_shape(inputSize, kernelSize, pad, stride, dilation, ceil_mode):
check(stride != 0, lambda: "stride should not be zero")
check(pad >= 0, lambda: f"pad must be non-negative, but got pad: {pad}")
check(
pad <= kernelSize // 2,
lambda: f"pad should be at most half of kernel size, but got pad={pad} and kernel_size={kernelSize}",
)
return pooling_output_shape_pad_lr(
inputSize, kernelSize, pad, pad, stride, dilation, ceil_mode
)
def pool2d_shape_check(
input,
kH,
kW,
dH,
dW,
padH,
padW,
dilationH,
dilationW,
nInputPlane,
inputHeight,
inputWidth,
outputHeight,
outputWidth,
memory_format,
):
ndim = input.dim()
nOutputPlane = nInputPlane
check(
kW > 0 and kH > 0,
lambda: "kernel size should be greater than zero, but got kH: {kH}, kW: {kW}",
)
check(
dW > 0 and dH > 0,
lambda: "stride should be greater than zero, but got dH: {dH}, dW: {dW}",
)
check(
dilationH > 0 and dilationW > 0,
lambda: "dilation should be greater than zero, but got dilationH: {dilationH}, dilationW: {dilationW}",
)
valid_dims = input.size(1) != 0 and input.size(2) != 0
if memory_format == torch.channels_last:
check(
ndim == 4 and valid_dims and input.size(3) != 0,
lambda: "Expected 4D (batch mode) tensor expected for input with channels_last layout"
" with optional 0 dim batch size for input, but got: {input.size()}",
)
else:
check(
(ndim == 3 and input.size(0) != 0 and valid_dims)
or (ndim == 4 and valid_dims and input.size(3) != 0),
lambda: f"Expected 3D or 4D (batch mode) tensor with optional 0 dim batch size for input, but got: {input.size()}",
)
check(
kW // 2 >= padW and kH // 2 >= padH,
lambda: "pad should be smaller than or equal to half of kernel size, but got "
f"padW = {padW}, padH = {padH}, kW = {kW}, kH = {kH}",
)
check(
outputWidth >= 1 and outputHeight >= 1,
lambda: f"Given input size: ({nInputPlane}x{inputHeight}x{inputWidth}). "
f"Calculated output size: ({nOutputPlane}x{outputHeight}x{outputWidth}). "
"Output size is too small",
)
def max_pool2d_checks_and_compute_shape(
input, kernel_size, stride, padding, dilation, ceil_mode
):
# Reference: aten/src/ATen/native/DilatedMaxPool2d.cpp
def unpack(name, val):
check(
len(val) in [1, 2],
lambda: f"max_pool2d: {name} must either be a single int, or a tuple of two ints",
)
H = val[0]
W = H if len(val) == 1 else val[1]
return H, W
kH, kW = unpack("kernel_size", kernel_size)
check(
len(stride) in [0, 1, 2],
lambda: "max_pool2d: stride must either be omitted, a single int, or a tuple of two ints",
)
if len(stride) == 0:
dH, dW = kH, kW
else:
dH, dW = unpack("stride", stride)
padH, padW = unpack("padding", padding)
dilationH, dilationW = unpack("dilation", dilation)
nInputPlane = input.size(-3)
inputHeight = input.size(-2)
inputWidth = input.size(-1)
memory_format = utils.suggest_memory_format(input)
if memory_format == torch.channels_last:
check(
input.dim() == 4,
lambda: "non-empty 4D (batch mode) tensor expected for input with channels_last layout",
)
elif memory_format == torch.contiguous_format:
check(
input.dim() in [3, 4],
lambda: "non-empty 3D or 4D (batch mode) tensor expected for input",
)
else:
check(
False,
lambda: "Unsupport memory format. Supports only ChannelsLast, Contiguous",
)
outputHeight = pooling_output_shape(inputHeight, kH, padH, dH, dilationH, ceil_mode)
outputWidth = pooling_output_shape(inputWidth, kW, padW, dW, dilationW, ceil_mode)
pool2d_shape_check(
input,
kH,
kW,
dH,
dW,
padH,
padW,
dilationH,
dilationW,
nInputPlane,
inputHeight,
inputWidth,
outputHeight,
outputWidth,
memory_format,
)
return nInputPlane, outputHeight, outputWidth
@register_meta(aten.max_pool2d_with_indices_backward.default)
def meta_max_pool2d_with_indices_backward(
grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices
):
nInputPlane, outputHeight, outputWidth = max_pool2d_checks_and_compute_shape(
self, kernel_size, stride, padding, dilation, ceil_mode
)
check(
self.dtype == grad_output.dtype,
lambda: "expected dtype {self.dtype} for `gradOutput` but got dtype {grad_output.dtype}",
)
nOutputPlane = nInputPlane
ndim = self.ndim
def _check_dim_size(t):
check_dim_size(t, ndim, ndim - 3, nOutputPlane)
check_dim_size(t, ndim, ndim - 2, outputHeight)
check_dim_size(t, ndim, ndim - 1, outputWidth)
_check_dim_size(grad_output)
_check_dim_size(indices)
memory_format = utils.suggest_memory_format(self)
return torch.empty(
self.shape, dtype=self.dtype, device=self.device, memory_format=memory_format
)
@register_meta(aten.max_pool2d_with_indices.default)
def meta_max_pool2d_with_indices(
input, kernel_size, stride=(), padding=(0,), dilation=(1,), ceil_mode=False
):
nInputPlane, outputHeight, outputWidth = max_pool2d_checks_and_compute_shape(
input, kernel_size, stride, padding, dilation, ceil_mode
)
nbatch = input.size(-4) if input.dim() == 4 else 1
memory_format = utils.suggest_memory_format(input)
if input.dim() == 3:
size = [nInputPlane, outputHeight, outputWidth]
else:
size = [nbatch, nInputPlane, outputHeight, outputWidth]
return (
torch.empty(
size, dtype=input.dtype, device=input.device, memory_format=memory_format
),
torch.empty(
size, dtype=torch.int64, device=input.device, memory_format=memory_format
),
)
@register_meta(aten.grid_sampler_2d_backward.default)
def grid_sampler_2d_backward_meta(
grad_output,
input,
grid,
interpolation_mode,
padding_mode,
align_corners,
output_mask,
):
input_requires_grad = output_mask[0]
if input_requires_grad:
grad_input = torch.zeros_like(input, memory_format=torch.contiguous_format)
else:
grad_input = None
grad_grid = torch.empty_like(grid, memory_format=torch.contiguous_format)
return (grad_input, grad_grid)
@register_meta([aten.full.default])
def full(size, fill_value, *args, **kwargs):
return torch.empty(size, *args, **kwargs)
@register_meta(
[
aten.randint_like.default,
aten.randint_like.low_dtype,
aten.randn_like.default,
aten.rand_like.default,
aten.full_like.default,
aten.ones_like.default,
]
)
def meta_like(self, *args, **kwargs):
return aten.empty_like.default(self, **kwargs)
# zeros_like is special cased to work for sparse
@register_meta(aten.zeros_like.default)
def zeros_like(
self, dtype=None, layout=None, device=None, pin_memory=None, memory_format=None
):
if layout == torch.sparse_coo:
check(
memory_format is None,
lambda: "memory format option is only supported by strided tensors",
)
res = torch.empty(
0,
dtype=self.dtype if dtype is None else dtype,
layout=layout,
device=self.device if device is None else device,
pin_memory=pin_memory,
)
if self.is_sparse:
res.sparse_resize_and_clear_(
self.size(), self.sparse_dim(), self.dense_dim()
)
else:
res.sparse_resize_and_clear_(self.size(), self.dim(), 0)
res._coalesced_(True)
return res
return aten.empty_like.default(
self,
dtype=dtype,
layout=layout,
device=device,
pin_memory=pin_memory,
memory_format=memory_format,
)
# hacky: Please remove after math.ceil works with arange
@register_meta(aten.arange.default)
def arange(end, **kwargs):
if isinstance(end, FloatLike):
end = math.ceil(end) # type: ignore[arg-type]
def is_integral(x):
return isinstance(x, IntLike) or isinstance(x, bool)
set_to_integral_dtype = kwargs.get("dtype", None) is None and is_integral(end)
if set_to_integral_dtype:
kwargs["dtype"] = torch.int64
return aten.empty([end], **kwargs)
@register_meta(aten.arange.start)
def arange_start(start, end, **kwargs):
return aten.arange(end - start, **kwargs)
@register_meta(aten.select.int)
def meta_select(self, dim, index):
ndim = self.dim()
check(
ndim != 0, lambda: "select() cannot be applied to a 0-dim tensor.", IndexError
)
dim = dim if dim >= 0 else dim + ndim
size = self.size(dim)
check(
not (-index > size or index >= size),
lambda: f"select(): index {index} out of range for tensor of size "
f"{self.size()} at dimension {dim}",
IndexError,
)
index = index if index >= 0 else index + size
new_size = list(self.size())
new_stride = list(self.stride())
new_storage_offset = self.storage_offset() + index * new_stride[dim]
del new_size[dim]
del new_stride[dim]
return self.as_strided(new_size, new_stride, new_storage_offset)
@register_meta(aten.select_scatter.default)
def meta_select_scatter(self, src, dim, index):
return utils.clone_preserve_strides(self)
@register_meta(aten.slice_scatter.default)
def meta_slice_scatter(self, src, dim=0, start=None, end=None, step=1):
return utils.clone_preserve_strides(self)
# TODO: Deduplicate this with canonicalize_dim
def maybe_wrap_dim(dim: int, dim_post_expr: int, wrap_scalar: bool = True):
if dim_post_expr <= 0:
assert wrap_scalar
dim_post_expr = 1
min = -dim_post_expr
max = dim_post_expr - 1
assert not (dim < min or dim > max), f"dim {dim} out of bounds ({min}, {max})"
if dim < 0:
dim += dim_post_expr
return dim
def ensure_nonempty_size(t, dim):
return 1 if t.dim() == 0 else t.shape[dim]
# From aten/src/ATen/native/ScatterGatherChecks.h
def gather_shape_check(self, dim, index):
self_dims = max(self.dim(), 1)
index_dims = max(index.dim(), 1)
check(
self_dims == index_dims,
lambda: "Index tensor must have the same number of dimensions as input tensor",
)
for i in range(self_dims):
if i != dim:
check(
ensure_nonempty_size(index, i) <= ensure_nonempty_size(self, i),
lambda: f"Size does not match at dimension {i} expected index {index.shape}"
+ f" to be smaller than self {self.shape} apart from dimension {dim}",
)
@register_meta(aten.gather.default)
def meta_gather(self, dim, index, sparse_grad=False):
wrapped_dim = maybe_wrap_dim(dim, self.dim())
is_index_empty = index.numel() == 0
if not is_index_empty:
check(
index.dtype == torch.long,
lambda: f"gather(): Expected dtype int64 for index, but got {index.dtype}",
)
gather_shape_check(self, wrapped_dim, index)
return self.new_empty(index.shape)
# From aten/src/ATen/native/TensorAdvancedIndexing.cpp
def get_operator_enum(reduce_, use_new_options=False):
if use_new_options:
if reduce_ == "sum":
return "REDUCE_ADD"
elif reduce_ == "prod":
return "REDUCE_MULTIPLY"
elif reduce_ == "mean":
return "REDUCE_MEAN"
elif reduce_ == "amax":
return "REDUCE_MAXIMUM"
elif reduce_ == "amin":
return "REDUCE_MINIMUM"
check(
False,
lambda: "reduce argument must be either sum, prod, mean, amax or amin.",
)
return
else:
if reduce_ == "add":
return "REDUCE_ADD"
elif reduce_ == "multiply":
return "REDUCE_MULTIPLY"
check(False, lambda: "reduce argument must be either add or multiply.")
return
# From aten/src/ATen/native/ScatterGatherChecks.h
def scatter_gather_dtype_check(method_name, self, index, src_opt=None):
if index.numel() != 0:
check(
index.dtype == torch.long,
lambda: f"{method_name}(): Expected dtype int64 for index",
)
if src_opt is not None:
check(
self.dtype == src_opt.dtype,
lambda: f"{method_name}(): Expected self.dtype to be equal to src.dtype",
)
def ensure_nonempty_dim(dim):
return max(dim, 1)
# From aten/src/ATen/native/ScatterGatherChecks.h
def scatter_shape_check(self, dim, index, src_opt=None):
if index.numel() == 0:
return
check(
ensure_nonempty_dim(self.dim()) == ensure_nonempty_dim(index.dim()),
lambda: "Index tensor must have the same number of dimensions as self tensor",
)
is_wrong_shape = False
self_dims = ensure_nonempty_dim(self.dim())
# Check: index.size(d) <= self.size(d) for all d != dim
for d in range(self_dims):
index_d_size = ensure_nonempty_size(index, d)
if d == dim:
continue
if index_d_size > ensure_nonempty_size(self, d):
is_wrong_shape = True
break
# Check: index.size(d) <= src.size(d) for all d if src is Tensor
if not is_wrong_shape and src_opt is not None:
for d in range(self_dims):
index_d_size = ensure_nonempty_size(index, d)
if index_d_size > ensure_nonempty_size(src_opt, d):
is_wrong_shape = True
break
if src_opt is not None:
check(
ensure_nonempty_dim(self.dim()) == ensure_nonempty_dim(index.dim()),
lambda: "Index tensor must have the same number of dimensions as self tensor",
)
check(
not is_wrong_shape,
lambda: f"Expected index {index.shape} to be smaller than self {self.shape}"
+ f" apart from dimension {dim} and to be smaller than src {src_opt.shape}",
)
else:
check(
not is_wrong_shape,
lambda: f"Expected index {index.shape} to be smaller than self {self.shape}"
+ f" apart from dimension {dim}",
)
# From aten/src/ATen/native/TensorAdvancedIndexing.cpp
def scatter_meta_impl(self, dim, index, src=None, reduce_=None, use_new_options=False):
wrapped_dim = maybe_wrap_dim(dim, self.dim())
scatter_gather_dtype_check("scatter", self, index, src)
scatter_shape_check(self, wrapped_dim, index, src)
if reduce_ is not None:
# Check if we have a valid reduce operator.
get_operator_enum(reduce_, use_new_options)
@register_meta(aten.scatter_add.default)
def meta_scatter_add(self, dim, index, src):
scatter_meta_impl(self, dim, index, src, "add")
return self.new_empty(self.shape)
@register_meta(aten.scatter_add_)
def meta_scatter_add_(self, dim, index, src):
scatter_meta_impl(self, dim, index, src, "add")
return self
@register_meta(
[
aten.scatter.src,
aten.scatter.value,
aten.scatter.reduce,
aten.scatter.value_reduce,
]
)
@out_wrapper()
def meta_scatter(self, dim, index, src_or_value, reduce=None):
src = src_or_value if isinstance(src_or_value, torch.Tensor) else None
scatter_meta_impl(self, dim, index, src, reduce)
return self.new_empty(self.shape)
@register_meta(
[
aten.scatter_.src,
aten.scatter_.value,
aten.scatter_.reduce,
aten.scatter_.value_reduce,
]
)
def meta_scatter_(self, dim, index, src_or_value, reduce=None):
src = src_or_value if isinstance(src_or_value, torch.Tensor) else None
scatter_meta_impl(self, dim, index, src, reduce)
return self
@register_meta(
[
aten._scaled_dot_product_flash_attention,
]
)
def meta__scaled_dot_product_flash(
query: Tensor,
key: Tensor,
value: Tensor,
dropout_p: float = 0.0,
is_causal: bool = False,
):
batch_size = query.size(0)
num_heads = query.size(1)
max_seqlen_batch_q = query.size(2)
head_dim = query.size(3)
max_seqlen_batch_k = key.size(2)
query = query.transpose(1, 2)
key = key.transpose(1, 2)
value = value.transpose(1, 2)
Nnz_q = batch_size * max_seqlen_batch_q
output = torch.empty(
(Nnz_q, num_heads, head_dim), dtype=query.dtype, device=query.device
)
ouput = output.view(batch_size, max_seqlen_batch_q, num_heads, head_dim).transpose(
1, 2
)
max_seqlen_q = math.ceil(max_seqlen_batch_q / 16) * 16
logsumexp = torch.empty(
(batch_size, num_heads, max_seqlen_q),
dtype=torch.float,
device=query.device,
)
is_sm80 = torch.cuda.is_available() and torch.cuda.get_device_capability() >= (8, 0)
is_sm75 = torch.cuda.is_available() and torch.cuda.get_device_capability() >= (7, 5)
head_size_rounded = 64 if head_dim <= 64 else 128
blocksize_c = (
128
if (head_size_rounded == 128 and (dropout_p != 0.0 or not is_sm80))
or (is_sm75 and head_size_rounded == 64 and dropout_p != 0.0)
else 256
)
max_seqlen_k = math.ceil(max_seqlen_batch_k / blocksize_c) * blocksize_c
if max_seqlen_k <= 128:
max_seqlen_k = 128
elif max_seqlen_k <= 256:
max_seqlen_k = 256
return ouput, logsumexp
@register_meta(
[
aten._scaled_dot_product_efficient_attention,
]
)
def meta__scaled_dot_product_efficient(
query: Tensor,
key: Tensor,
value: Tensor,
compute_log_sumexp: bool,
is_causal: bool = False,
):
query = query.transpose(1, 2)
key = key.transpose(1, 2)
value = value.transpose(1, 2)
B = query.size(0)
M = query.size(1)
N = key.size(1)
num_heads = query.size(-2)
K = query.size(-1)
Kv = value.size(-1)
res = torch.empty(B, M, num_heads, Kv, dtype=query.dtype, device=query.device)
logsumexp_dim = math.ceil(M / 32) * 32 if compute_log_sumexp else 0
logsum_exp = torch.empty(
(B, num_heads, logsumexp_dim),
dtype=torch.float,
device=query.device,
)
res = res.transpose(1, 2)
return res, logsum_exp
@register_meta(
[
aten._scaled_dot_product_efficient_attention_backward,
]
)
def meta__scaled_dot_product_efficient_backward(
grad_out: Tensor,
query: Tensor,
key: Tensor,
value: Tensor,
out: Tensor,
logsumexp: Tensor,
is_causal: bool = False,
chunk_grad_outputs=False,
):
grad_out = grad_out.transpose(1, 2)
query = query.transpose(1, 2)
key = key.transpose(1, 2)
value = value.transpose(1, 2)
B = query.size(0)
M = query.size(1)
N = key.size(1)
nH = query.size(2)
K = query.size(3)
grad_kv_needs_init = is_causal and N > M
if chunk_grad_outputs:
chunk = torch.empty((B, M, 3, nH, K), dtype=query.dtype, device=query.device)
grad_q = chunk.select(2, 0)
grad_k = chunk.select(2, 1)
grad_v = chunk.select(2, 2)
else:
grad_q = torch.empty(query.shape, dtype=query.dtype, device=query.device)
grad_k = (
torch.zeros(key.shape, dtype=key.dtype, device=key.device)
if grad_kv_needs_init
else torch.empty(key.shape, dtype=key.dtype, device=key.device)
)
grad_v = (
torch.zeros(value.shape, dtype=value.dtype, device=value.device)
if grad_kv_needs_init
else torch.empty(value.shape, dtype=value.dtype, device=value.device)
)
return grad_q.transpose(1, 2), grad_k.transpose(1, 2), grad_v.transpose(1, 2)
@register_meta([aten.scatter_reduce.two, aten.scatter_reduce.two_out])
@out_wrapper()
def meta_scatter_reduce_two(self, dim, index, src, reduce, include_self=True):
scatter_meta_impl(self, dim, index, src, reduce, use_new_options=True)
return self.new_empty(self.shape)
@register_meta(aten.scatter_reduce_.two)
def meta_scatter_reduce__two(self, dim, index, src, reduce, include_self=True):
scatter_meta_impl(self, dim, index, src, reduce, use_new_options=True)
return self
def multiply_integers(vs):
r = 1
for v in vs:
r *= v
return r
def upsample_common_check(input_size, output_size, num_spatial_dims):
check(
len(output_size) == num_spatial_dims,
lambda: f"It is expected output_size equals to {num_spatial_dims}, but got size {len(output_size)}",
)
expected_input_dims = num_spatial_dims + 2 # N, C, ...
check(
len(input_size) == expected_input_dims,
lambda: f"It is expected input_size equals to {expected_input_dims}, but got size {len(input_size)}",
)
check(
all([s > 0 for s in input_size[2:]]) and all([s > 0 for s in output_size]),
lambda: f"Input and output sizes should be greater than 0, but got "
f"input size {input_size} and output size {output_size}",
)
nbatch, channels = input_size[:2]
return (nbatch, channels, *output_size)
@register_meta(aten.upsample_nearest1d.default)
def upsample_nearest1d(input, output_size, scales=None):
check(
input.numel() != 0 or multiply_integers(input.size()[1:]),
lambda: "Non-empty 3D data tensor expected but got a tensor with sizes {input.size()}",
)
full_output_size = upsample_common_check(
input.size(), output_size, num_spatial_dims=1
)
return input.new_empty(full_output_size).to(
memory_format=utils.suggest_memory_format(input)
)
@register_meta(aten.upsample_nearest2d.default)
def upsample_nearest2d(input, output_size, scales_h=None, scales_w=None):
check(
input.numel() != 0 or multiply_integers(input.size()[1:]),
lambda: "Non-empty 4D data tensor expected but got a tensor with sizes {input.size()}",
)
full_output_size = upsample_common_check(
input.size(), output_size, num_spatial_dims=2
)
output = input.new_empty(full_output_size)
# convert output to correct memory format, if necessary
memory_format = utils.suggest_memory_format(input)
# following "heuristic: only use channels_last path when it's faster than the contiguous path"
_, n_channels, _, _ = input.shape
if input.device.type == "cuda" and n_channels < 4:
memory_format = torch.contiguous_format
output = output.contiguous(memory_format=memory_format)
return output
@register_meta(aten.upsample_nearest3d.default)
def upsample_nearest3d(input, output_size, scales_d=None, scales_h=None, scales_w=None):
check(
input.numel() != 0 or multiply_integers(input.size()[1:]),
lambda: "Non-empty 5D data tensor expected but got a tensor with sizes {input.size()}",
)
full_output_size = upsample_common_check(
input.size(), output_size, num_spatial_dims=3
)
return input.new_empty(full_output_size).to(
memory_format=utils.suggest_memory_format(input)
)
@register_meta([aten.sort.default, aten.sort.stable])
def meta_sort(self, stable=None, dim=-1, descending=False):
return torch.empty_like(self), torch.empty_like(self, dtype=torch.int64)
def rnn_cell_checkSizes(
input_gates, hidden_gates, input_bias, hidden_bias, factor, prev_hidden
):
check(input_gates.ndim == 2, lambda: f"{input_gates.ndim} != 2")
check(
input_gates.shape == hidden_gates.shape,
lambda: f"{input_gates.shape} != {hidden_gates.shape}",
)
gates_size = input_gates.size(1)
if input_bias is not None:
check(input_bias.ndim == 1, lambda: f"{input_bias.ndim} != 1")
check(
input_bias.numel() == gates_size,
lambda: f"{input_bias.numel()} != {gates_size}",
)
check(
input_bias.shape == hidden_bias.shape,
lambda: f"{input_bias.shape} != {hidden_bias.shape}",
)
check(prev_hidden.ndim == 2, lambda: f"{prev_hidden.ndim} != 2")
expected_prev_hidden_numel = input_gates.size(0) * gates_size // factor
check(
prev_hidden.numel() == expected_prev_hidden_numel,
lambda: f"{prev_hidden.numel()} != {input_gates.size(0)} * {gates_size} // {factor} (aka {expected_prev_hidden_numel})",
)
check(
all(
x.device == input_gates.device
for x in [hidden_gates, input_bias, hidden_bias, prev_hidden]
),
lambda: "expected all inputs to be same device",
)
@register_meta(aten._thnn_fused_lstm_cell.default)
def _thnn_fused_lstm_cell_meta(
input_gates, hidden_gates, cx, input_bias=None, hidden_bias=None
):
rnn_cell_checkSizes(input_gates, hidden_gates, input_bias, hidden_bias, 4, cx)
workspace = torch.empty_like(input_gates, memory_format=torch.contiguous_format)
hy = torch.empty_like(cx, memory_format=torch.contiguous_format)
cy = torch.empty_like(cx, memory_format=torch.contiguous_format)
return (hy, cy, workspace)
@register_meta(aten._cudnn_rnn.default)
def _cudnn_rnn(
input,
weight,
weight_stride0,
weight_buf,
hx,
cx,
mode,
hidden_size,
proj_size,
num_layers,
batch_first,
dropout,
train,
bidirectional,
batch_sizes,
dropout_state,
):
is_input_packed = len(batch_sizes) != 0
if is_input_packed:
seq_length = len(batch_sizes)
mini_batch = batch_sizes[0]
batch_sizes_sum = input.shape[0]
else:
seq_length = input.shape[1] if batch_first else input.shape[0]
mini_batch = input.shape[0] if batch_first else input.shape[1]
batch_sizes_sum = -1
num_directions = 2 if bidirectional else 1
out_size = proj_size if proj_size != 0 else hidden_size
if is_input_packed:
out_shape = [batch_sizes_sum, out_size * num_directions]
else:
out_shape = (
[mini_batch, seq_length, out_size * num_directions]
if batch_first
else [seq_length, mini_batch, out_size * num_directions]
)
output = input.new_empty(out_shape)
cell_shape = [num_layers * num_directions, mini_batch, hidden_size]
if cx is None:
cy = torch.empty(0, device=input.device)
else:
cy = cx.new_empty(cell_shape)
hy = hx.new_empty([num_layers * num_directions, mini_batch, out_size])
# TODO: Query cudnnGetRNNTrainingReserveSize (expose to python)
reserve_shape = 0 if train else 0
reserve = input.new_empty(reserve_shape, dtype=torch.uint8)
return output, hy, cy, reserve, weight_buf
@register_meta(aten.mkldnn_rnn_layer.default)
def mkldnn_rnn_layer(
input,
w0,
w1,
w2,
w3,
hx_,
cx_,
reverse,
batch_sizes,
mode,
hidden_size,
num_layers,
has_biases,
bidirectional,
batch_first,
train,
):
seq_length = input.shape[1] if batch_first else input.shape[0]
mini_batch = input.shape[0] if batch_first else input.shape[1]
output_chanels = hidden_size
out_shape = (
[mini_batch, seq_length, output_chanels]
if batch_first
else [seq_length, mini_batch, output_chanels]
)
output = input.new_empty(out_shape)
if hx_ is None:
hy = torch.empty(0, device=input.device)
else:
hy = hx_.new_empty(hx_.shape)
if cx_ is None:
cy = torch.empty(0, device=input.device)
else:
cy = cx_.new_empty(cx_.shape)
workspace = torch.empty(0, device=input.device, dtype=torch.uint8)
return output, hy, cy, workspace
def zero_numel_check_dims(self, dim, fn_name):
if self.ndim == 0:
check(
dim == 0 or dim == -1,
lambda: f"{fn_name}: Expected reduction dim -1 or 0 for scalar but got {dim}",
IndexError,
)
else:
check(
self.size(dim) != 0,
lambda: f"{fn_name}: Expected reduction dim {dim} to have non-zero size.",
IndexError,
)
# From aten/src/ATen/native/ReduceOps.cpp
def check_argmax_argmin(name, self, dim):
if dim is not None:
dim = maybe_wrap_dim(dim, self.dim())
zero_numel_check_dims(self, dim, name)
else:
check(
self.numel() != 0,
lambda: f"{name}: Expected reduction dim to be specified for input.numel() == 0.",
)
@register_meta([aten.argmax.default, aten.argmin.default])
def argmax_argmin_meta(self, dim=None, keepdim=False):
check_argmax_argmin("argmax", self, dim)
dims = utils.reduction_dims(self.shape, (dim,) if dim is not None else None)
shape = _compute_reduction_shape(self, dims, keepdim)
return self.new_empty(shape, dtype=torch.int64)
@register_meta(aten.scalar_tensor.default)
def scalar_tensor(s, dtype=None, layout=None, device=None, pin_memory=None):
return torch.empty(
(), dtype=dtype, layout=layout, device=device, pin_memory=pin_memory
)
@register_meta(aten.topk.default)
def topk_meta(self, k, dim=-1, largest=True, sorted=True):
# From aten/src/ATen/native/Sorting.cpp
dim = maybe_wrap_dim(dim, self.dim(), wrap_scalar=True)
check(
k >= 0 and k <= (self.size(dim) if self.dim() > 0 else 1),
lambda: "selected index k out of range",
)
sliceSize = 1 if self.dim() == 0 else self.size(dim)
check(k >= 0 and k <= sliceSize, lambda: "k not in range for dimension")
topKSize = list(self.shape)
if len(topKSize) > 0:
topKSize[dim] = k
return self.new_empty(topKSize), self.new_empty(topKSize, dtype=torch.int64)
legacy_contiguous_memory_format = torch.contiguous_format
# From aten/src/ATen/native/cuda/RNN.cu
def checkLSTMBackwardSizes(grad_hy, grad_cy, cx, cy, workspace):
defined_grad = grad_hy if grad_hy is not None else grad_cy
check(defined_grad.dim() == 2, lambda: "")
exp_size = defined_grad.size()
if grad_hy is not None:
check(grad_hy.size() == exp_size, lambda: "")
if grad_cy is not None:
check(grad_cy.size() == exp_size, lambda: "")
check(cx.size() == exp_size, lambda: "")
check(cy.size() == exp_size, lambda: "")
check(workspace.dim() == 2, lambda: "")
check(workspace.numel() == exp_size[0] * exp_size[1] * 4, lambda: "")
# From aten/src/ATen/native/cuda/RNN.cu
@register_meta(aten._thnn_fused_lstm_cell_backward_impl.default)
def _thnn_fused_lstm_cell_backward_impl(grad_hy, grad_cy, cx, cy, workspace, has_bias):
if grad_hy is None and grad_cy is None:
return None, None, None
checkLSTMBackwardSizes(grad_hy, grad_cy, cx, cy, workspace)
grad_gates = torch.empty_like(
workspace, memory_format=legacy_contiguous_memory_format
)
grad_cx = torch.empty_like(cx, memory_format=legacy_contiguous_memory_format)
grad_bias = grad_gates.sum(0, keepdim=False) if has_bias else None
return grad_gates, grad_cx, grad_bias
@register_meta(aten.pixel_shuffle.default)
def meta_pixel_shuffle(self, upscale_factor):
assert (
len(self.shape) > 2 and self.shape[-3] % (upscale_factor * upscale_factor) == 0
), f"Invalid input shape for pixel_shuffle: {self.shape} with upscale_factor = {upscale_factor}"
def is_channels_last(ten):
return torch._prims_common.suggest_memory_format(ten) == torch.channels_last
def pick_memory_format():
if is_channels_last(self):
if device_hint(self) == "cuda":
return torch.contiguous_format
else:
return torch.channels_last
elif self.is_contiguous(memory_format=torch.contiguous_format):
return torch.contiguous_format
elif self.is_contiguous(memory_format=torch.preserve_format):
return torch.preserve_format
C = self.shape[-3] // (upscale_factor * upscale_factor)
Hr = self.shape[-2] * upscale_factor
Wr = self.shape[-1] * upscale_factor
out_shape = (*self.shape[:-3], C, Hr, Wr)
out = self.new_empty(out_shape)
out = out.to(memory_format=pick_memory_format()) # type: ignore[call-overload]
return out
@register_meta(aten.mkldnn_rnn_layer_backward.default)
def mkldnn_rnn_layer_backward(
input,
weight0,
weight1,
weight2,
weight3,
hx_,
cx_tmp,
output,
hy_,
cy_,
grad_output_r_opt,
grad_hy_r_opt,
grad_cy_r_opt,
reverse,
mode,
hidden_size,
num_layers,
has_biases,
train,
bidirectional,
batch_sizes,
batch_first,
workspace,
):
diff_x = input.new_empty(input.shape)
diff_hx = hx_.new_empty(hx_.shape)
diff_cx = cx_tmp.new_empty(cx_tmp.shape)
diff_w1 = weight0.new_empty(weight0.shape)
diff_w2 = weight1.new_empty(weight1.shape)
diff_b = weight2.new_empty(weight2.shape)
return diff_x, diff_w1, diff_w2, diff_b, diff_b, diff_hx, diff_cx
# We must also trigger meta registrations from PrimTorch ref
# decompositions
import torch._refs
import torch._refs.nn.functional
import torch._refs.special
def activate_meta():
activate_meta_table = {}
# For a given op, we pick the most specific decomp function from
# global_decomp_table in the precedence order of meta > post_autograd > pre_autograd
for type in ["meta", "post_autograd", "pre_autograd"]:
registry = global_decomposition_table[type]
for opo in registry:
if opo not in activate_meta_table:
activate_meta_table[opo] = registry[opo]
for op_overload, fn in activate_meta_table.items():
assert isinstance(op_overload, OpOverload)
op_overload.py_impl(torch._C.DispatchKey.Meta)(fn)
if torch._C._dispatch_has_kernel_for_dispatch_key(
op_overload.name(), "CompositeImplicitAutograd"
):
# Internally, we shouldn't be registering meta kernels for any operators that
# have CompositeImplicitAutograd kernels.
# Instead, we should be letting those decompositions run, and writing meta kernels
# only for the base operators.
if op_overload in global_decomposition_table["meta"]:
raise RuntimeError(
f"{op_overload} is a CompositeImplicitAutograd op, we shouldn't "
"register meta function for it. Instead, we should let the decomposition run and write "
"meta kernels for the base operators."
)
pass
elif op_overload.is_view:
# Attempting to register a python meta kernel for a view operator.
# We shouldn't do this, because the output will report as not having aliased storages.
# All view ops have meta kernels in C++ today, so we should use those instead.
pass
elif op_overload.name() in {
"aten::empty_strided", # causing infinite recursion, test_meta.py
"aten::clone", # causing infinite recursion
"aten::_to_copy", # causing infinite recursion, test_serialization.py -k test_tensor_subclass_getstate_overwrite # noqa: B950
"aten::copy_", # Exception not raised, test_torch.py -k test_storage_meta_errors_cpu_int64 # noqa: B950
"aten::constant_pad_nd", # requires_grad mismatch, test_ops.py -k test_fake_crossref_backward_amp_istft_cuda_float32 # noqa: B950
"aten::rot90", # requires_grad mismatch! test_ops.py -k test_fake_crossref_backward_amp_rot90_cuda_float32 # noqa: B950
"aten::as_strided_scatter", # requires_grad mismatch, test_ops.py -k test_fake_crossref_backward_no_amp_as_strided_scatter_cuda_float32 # noqa: B950
}:
pass
else:
if "mkldnn::" in op_overload.name():
_meta_lib_dont_use_me_use_register_meta_for_mkldnn.impl(op_overload, fn)
elif "mkl::" in op_overload.name():
_meta_lib_dont_use_me_use_register_meta_for_mkl.impl(op_overload, fn)
else:
_meta_lib_dont_use_me_use_register_meta.impl(op_overload, fn)
activate_meta()
|
[
"pytorchmergebot@users.noreply.github.com"
] |
pytorchmergebot@users.noreply.github.com
|
604c1e106f08e0be7286bba0d9ef1a3bc66b63e5
|
708a6c274432fee2d25c7e86581f3655cd4be0de
|
/src/updater/app/lib/packages/vehicle_eco_balance/geo.py
|
64ab2e71d7d114cb5d7db70d98dfc40097f995db
|
[
"MIT"
] |
permissive
|
sampittko/tuke-beautofuel
|
f65fa2865c1402421224e3ff9182e3ab5e6cd3c9
|
0e7b2528af5f3a96c0abf6dc963d2a5b29779401
|
refs/heads/main
| 2023-04-11T00:18:19.498448
| 2021-04-23T13:16:10
| 2021-04-23T13:16:10
| 316,191,448
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,753
|
py
|
import numpy as np
import requests as req
from requests.exceptions import HTTPError
import time
from geopy import distance
import osmnx as ox
def calc_gradient_angle(point1, point2):
""" Calculate the gradient angle between two points on the earth's surface
Parameters
----------
point1: tuple (latitude, longitude, altitude)
first coordinate
point2: tuple (latitude, longitude, altitude)
second coordinate
Returns
-------
gradient_angle: float
gradient angle in radians between -pi/2 and pi/2
"""
coord1, alt1 = point1[:-1], point1[-1]
coord2, alt2 = point2[:-1], point2[-1]
dist = calc_distance(coord1, coord2)
if dist != 0:
return np.arctan((alt2 - alt1) / dist)
else:
return 0.0
def calc_distance(coord1, coord2, distance_type="geodetic", ellipsoid="WGS-84"):
""" Calculate distance between two points on the earth's surface using geopy
Great-circle distance is calculated using Vincenty's formula.
Default ellipsoid of the geodetic distance is WGS-84.
Parameters
----------
coord1: tuple (latitude, longitude)
first coordinate
coord2: tuple (latitude, longitude)
second coordinate
distance_type: str
'geodetic' or 'great-circle' (default 'geodetic')
ellipsoid: str
ellipsoid for geodetic distance (default 'WGS-84')
Returns
-------
distance: float
distance in meters
"""
if distance_type == "geodetic":
return distance.geodesic(coord1, coord2, ellipsoid=ellipsoid).km * 1000
elif distance_type == "great-circle":
return distance.great_circle(coord1, coord2).km * 1000
else:
print("distance_type " + distance_type + " is unknown!")
class ElevationAPI:
"""
ElevationAPI
Example APIs:
- Open Topo Data (https://www.opentopodata.org/)
- API: https://api.opentopodata.org/v1/
- Open
- Example: https://api.opentopodata.org/v1/eudem25m?locations=39.7391536,-104.9847034
- Limits
- Max 100 locations per request.
- Max 1 call per second.
- Max 1000 calls per day.
- Google Elevation API (https://developers.google.com/maps/documentation/elevation/overview)
- API: https://maps.googleapis.com/maps/api/elevation/
- Commercial, API key needed
- Example: https://maps.googleapis.com/maps/api/elevation/json?locations=39.7391536,-104.9847034&key=YOUR_API_KEY
Parameters
----------
base_url: str
API base url (default 'https://api.opentopodata.org/v1/')
dataset: str
eudem25m, aster30m, srtm30m, ... (default 'eudem25m', check https://www.opentopodata.org/ for details)
api_key: str (default None)
API key for the service
Attributes
----------
base_url: str
API base url
location_limit: int
number of allowed locations per request
params: dictionary
parameters for the get request (e.g. locations, key)
"""
def __init__(self, base_url='https://api.opentopodata.org/v1/', dataset='eudem25m', api_key=None):
self.base_url = base_url
if self.base_url != 'https://api.opentopodata.org/v1/':
self.location_limit = None
else:
self.location_limit = 100
self.base_url = self.base_url + dataset
self.params = {'key': api_key}
def get_elevation(self, coordinates):
""" Get elevation for the given coordinates from an elevation API
Parameters
----------
coordinates: list of tuples (latitude, longitude)
coordinates in EPSG:4326 (WGS-84)
Returns
-------
elevation: numpy array
elevation for each coordinate
"""
elevation = np.zeros(len(coordinates))
if self.location_limit is None:
print('Download elevation for all {} coordinates'.format(len(coordinates)))
elevation[0, len(coordinates)] = self._make_request(coordinates)
return elevation
# Split request into multiple requests if location limit is provided
for i in range(int(len(coordinates) / self.location_limit) + 1):
start = i * self.location_limit
end = (i + 1) * self.location_limit
print('Download elevation for coordinates {start} to {end}'.format(start=start + 1, end=end))
elevation[start:end] = self._make_request(coordinates[start:end])
time.sleep(1) # for OpenTopoData the limit is max 1 call per second
return elevation
def _make_request(self, coordinates):
locations_str = self._coordinates2param(coordinates)
self.params.update({'locations': locations_str})
elevation_list = []
try:
response = req.get(self.base_url, params=self.params)
response.raise_for_status()
except HTTPError as http_err:
print('An http error occurred during the request: {}'.format(http_err))
except Exception as err:
print('An error occurred during the request: {}'.format(err))
else:
results = response.json()['results']
elevation_list = [result['elevation'] for result in results]
return elevation_list
def _coordinates2param(self, coordinates):
""" Transform coordinates to string in order to set the locations request parameter """
return ''.join([str(coordinate[0]) + ',' + str(coordinate[1]) + '|' for coordinate in coordinates])
def get_cr_from_osm(coordinates):
""" Get rolling coefficient (cr) from osm surface attribute
1) Determine nearest osm edge for each coordinate
2) Determine surface attribute for each osm edge
3) Get rolling coefficient (cr) for the corresponding surface type from literature
Hint: function will take some time when coordinates have a large spatial extent.
Parameters
----------
coordinates: list of tuples (latitude, longitude)
coordinates
Returns
-------
[cr, surface]: list of numpy arrays
first array are rolling coefficient (cr) values and second array are surface attributes
"""
# TODO: Improve performance
# TODO: Check scientific literature for rolling coefficient values
lats = [coordinate[0] for coordinate in coordinates]
lngs = [coordinate[1] for coordinate in coordinates]
min_y = np.min(lats)
max_y = np.max(lats)
min_x = np.min(lngs)
max_x = np.max(lngs)
ox.settings.useful_tags_way = ["surface"]
print('Get graph from bounding box: min_y={}, max_y={}, min_x={}, max_x={}'.format(min_y, max_y, min_x, max_x))
graph = ox.graph_from_bbox(max_y, min_y, max_x, min_x, network_type='drive')
surface = []
cr = []
i = 0
print('Find nearest osm edge and set rolling coefficient according to the surface type of the edge.')
for lat, lng in coordinates:
x = ox.get_nearest_edge(graph, (lat, lng))
p = [x[0], x[1]]
a = ox.utils_graph.get_route_edge_attributes(graph, p)
dic = a[0]
if "surface" in dic:
surface.append(dic["surface"])
else:
surface.append(None)
# Get the rolling resistance coefficient
# Sources
# https://www.engineeringtoolbox.com/rolling-friction-resistance-d_1303.html
# The Automotive Chassis book
if surface[i] == "asphalt":
cr.append(0.02)
elif surface[i] == "cobblestone":
cr.append(0.015)
elif surface[i] == "paving_stones":
cr.append(0.033)
else:
cr.append(0.02)
i = i + 1
return [np.array(cr), np.array(surface)]
|
[
"sampittko@gmail.com"
] |
sampittko@gmail.com
|
e72f4db6ed6a6653152baab96d0fa3235cbf675b
|
9c13bffaf12c83b049375cf24e12183fcab3a2aa
|
/venv/lib/python3.6/site-packages/pip/_vendor/requests/sessions.py
|
a8e60f360279eb602a4e07bb27447e0a0d22f3b3
|
[] |
no_license
|
brielino/SDCCTestAnsible
|
b702d48c934c8bde9638ceba3b27fabf9dd40071
|
857f66860de2ad889455789b60a162506d3125a1
|
refs/heads/master
| 2022-12-09T13:15:29.030558
| 2020-09-12T14:51:31
| 2020-09-12T14:51:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 29,310
|
py
|
# -*- coding: utf-8 -*-
"""
requests.session
~~~~~~~~~~~~~~~~
This module provides a Session object to manage and persist settings across
requests (cookies, auth, proxies).
"""
import os
import sys
import time
from datetime import timedelta
from collections import OrderedDict
from .auth import _basic_auth_str
from .compat import cookielib, is_py3, urljoin, urlparse, Mapping
from .cookies import (
cookiejar_from_dict, extract_cookies_to_jar, RequestsCookieJar, merge_cookies)
from .models import Request, PreparedRequest, DEFAULT_REDIRECT_LIMIT
from .hooks import default_hooks, dispatch_hook
from ._internal_utils import to_native_string
from .utils import to_key_val_list, default_headers, DEFAULT_PORTS
from .exceptions import (
TooManyRedirects, InvalidSchema, ChunkedEncodingError, ContentDecodingError)
from .structures import CaseInsensitiveDict
from .adapters import HTTPAdapter
from .utils import (
requote_uri, get_environ_proxies, get_netrc_auth, should_bypass_proxies,
get_auth_from_url, rewind_body
)
from .status_codes import codes
# formerly defined here, reexposed here for backward compatibility
from .models import REDIRECT_STATI
# Preferred clock, based on which one is more accurate on a given system.
if sys.platform == 'win32':
try: # Python 3.4+
preferred_clock = time.perf_counter
except AttributeError: # Earlier than Python 3.
preferred_clock = time.clock
else:
preferred_clock = time.time
def merge_setting(request_setting, session_setting, dict_class=OrderedDict):
"""Determines appropriate setting for a given request, taking into account
the explicit setting on that request, and the setting in the session. If a
setting is a dictionary, they will be merged together using `dict_class`
"""
if session_setting is None:
return request_setting
if request_setting is None:
return session_setting
# Bypass if not a dictionary (e.g. verify)
if not (
isinstance(session_setting, Mapping) and
isinstance(request_setting, Mapping)
):
return request_setting
merged_setting = dict_class(to_key_val_list(session_setting))
merged_setting.update(to_key_val_list(request_setting))
# Remove keys that are set to None. Extract keys first to avoid altering
# the dictionary during iteration.
none_keys = [k for (k, v) in merged_setting.items() if v is None]
for key in none_keys:
del merged_setting[key]
return merged_setting
def merge_hooks(request_hooks, session_hooks, dict_class=OrderedDict):
"""Properly merges both requests and session hooks.
This is necessary because when request_hooks == {'response': []}, the
merge breaks Session hooks entirely.
"""
if session_hooks is None or session_hooks.get('response') == []:
return request_hooks
if request_hooks is None or request_hooks.get('response') == []:
return session_hooks
return merge_setting(request_hooks, session_hooks, dict_class)
class SessionRedirectMixin(object):
def get_redirect_target(self, resp):
"""Receives a Response. Returns a redirect URI or ``None``"""
# Due to the nature of how requests processes redirects this method will
# be called at least once upon the original response and at least twice
# on each subsequent redirect response (if any).
# If a custom mixin is used to handle this logic, it may be advantageous
# to cache the redirect location onto the response object as a private
# attribute.
if resp.is_redirect:
location = resp.headers['location']
# Currently the underlying http module on py3 decode headers
# in latin1, but empirical evidence suggests that latin1 is very
# rarely used with non-ASCII characters in HTTP headers.
# It is more likely to get UTF8 header rather than latin1.
# This causes incorrect handling of UTF8 encoded location headers.
# To solve this, we re-encode the location in latin1.
if is_py3:
location = location.encode('latin1')
return to_native_string(location, 'utf8')
return None
def should_strip_auth(self, old_url, new_url):
"""Decide whether Authorization header should be removed when redirecting"""
old_parsed = urlparse(old_url)
new_parsed = urlparse(new_url)
if old_parsed.hostname != new_parsed.hostname:
return True
# Special case: allow http -> https redirect when using the standard
# ports. This isn't specified by RFC 7235, but is kept to avoid
# breaking backwards compatibility with older versions of requests
# that allowed any redirects on the same host.
if (old_parsed.scheme == 'http' and old_parsed.port in (80, None)
and new_parsed.scheme == 'https' and new_parsed.port in (443, None)):
return False
# Handle default port usage corresponding to scheme.
changed_port = old_parsed.port != new_parsed.port
changed_scheme = old_parsed.scheme != new_parsed.scheme
default_port = (DEFAULT_PORTS.get(old_parsed.scheme, None), None)
if (not changed_scheme and old_parsed.port in default_port
and new_parsed.port in default_port):
return False
# Standard case: root URI must match
return changed_port or changed_scheme
def resolve_redirects(self, resp, req, stream=False, timeout=None,
verify=True, cert=None, proxies=None, yield_requests=False, **adapter_kwargs):
"""Receives a Response. Returns a generator of Responses or Requests."""
hist = [] # keep track of history
url = self.get_redirect_target(resp)
previous_fragment = urlparse(req.url).fragment
while url:
prepared_request = req.copy()
# Update history and keep track of redirects.
# resp.history must ignore the original request in this loop
hist.append(resp)
resp.history = hist[1:]
try:
resp.content # Consume socket so it can be released
except (ChunkedEncodingError, ContentDecodingError, RuntimeError):
resp.raw.read(decode_content=False)
if len(resp.history) >= self.max_redirects:
raise TooManyRedirects('Exceeded {} redirects.'.format(self.max_redirects), response=resp)
# Release the connection back into the pool.
resp.close()
# Handle redirection without scheme (see: RFC 1808 Section 4)
if url.startswith('//'):
parsed_rurl = urlparse(resp.url)
url = ':'.join([to_native_string(parsed_rurl.scheme), url])
# Normalize url case and attach previous fragment if needed (RFC 7231 7.1.2)
parsed = urlparse(url)
if parsed.fragment == '' and previous_fragment:
parsed = parsed._replace(fragment=previous_fragment)
elif parsed.fragment:
previous_fragment = parsed.fragment
url = parsed.geturl()
# Facilitate relative 'location' headers, as allowed by RFC 7231.
# (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource')
# Compliant with RFC3986, we percent encode the url.
if not parsed.netloc:
url = urljoin(resp.url, requote_uri(url))
else:
url = requote_uri(url)
prepared_request.url = to_native_string(url)
self.rebuild_method(prepared_request, resp)
# https://github.com/psf/requests/issues/1084
if resp.status_code not in (codes.temporary_redirect, codes.permanent_redirect):
# https://github.com/psf/requests/issues/3490
purged_headers = ('Content-Length', 'Content-Type', 'Transfer-Encoding')
for header in purged_headers:
prepared_request.headers.pop(header, None)
prepared_request.body = None
headers = prepared_request.headers
headers.pop('Cookie', None)
# Extract any cookies sent on the response to the cookiejar
# in the new request. Because we've mutated our copied prepared
# request, use the old one that we haven't yet touched.
extract_cookies_to_jar(prepared_request._cookies, req, resp.raw)
merge_cookies(prepared_request._cookies, self.cookies)
prepared_request.prepare_cookies(prepared_request._cookies)
# Rebuild auth and proxy information.
proxies = self.rebuild_proxies(prepared_request, proxies)
self.rebuild_auth(prepared_request, resp)
# A failed tell() sets `_body_position` to `object()`. This non-None
# value ensures `rewindable` will be True, allowing us to raise an
# UnrewindableBodyError, instead of hanging the connection.
rewindable = (
prepared_request._body_position is not None and
('Content-Length' in headers or 'Transfer-Encoding' in headers)
)
# Attempt to rewind consumed file-like object.
if rewindable:
rewind_body(prepared_request)
# Override the original request.
req = prepared_request
if yield_requests:
yield req
else:
resp = self.send(
req,
stream=stream,
timeout=timeout,
verify=verify,
cert=cert,
proxies=proxies,
allow_redirects=False,
**adapter_kwargs
)
extract_cookies_to_jar(self.cookies, prepared_request, resp.raw)
# extract redirect url, if any, for the next loop
url = self.get_redirect_target(resp)
yield resp
def rebuild_auth(self, prepared_request, response):
"""When being redirected we may want to strip authentication from the
request to avoid leaking credentials. This method intelligently removes
and reapplies authentication where possible to avoid credential loss.
"""
headers = prepared_request.headers
url = prepared_request.url
if 'Authorization' in headers and self.should_strip_auth(response.request.url, url):
# If we get redirected to a new host, we should strip out any
# authentication headers.
del headers['Authorization']
# .netrc might have more auth for us on our new host.
new_auth = get_netrc_auth(url) if self.trust_env else None
if new_auth is not None:
prepared_request.prepare_auth(new_auth)
def rebuild_proxies(self, prepared_request, proxies):
"""This method re-evaluates the proxy configuration by considering the
environment variables. If we are redirected to a URL covered by
NO_PROXY, we strip the proxy configuration. Otherwise, we set missing
proxy keys for this URL (in case they were stripped by a previous
redirect).
This method also replaces the Proxy-Authorization header where
necessary.
:rtype: dict
"""
proxies = proxies if proxies is not None else {}
headers = prepared_request.headers
url = prepared_request.url
scheme = urlparse(url).scheme
new_proxies = proxies.copy()
no_proxy = proxies.get('no_proxy')
bypass_proxy = should_bypass_proxies(url, no_proxy=no_proxy)
if self.trust_env and not bypass_proxy:
environ_proxies = get_environ_proxies(url, no_proxy=no_proxy)
proxy = environ_proxies.get(scheme, environ_proxies.get('all'))
if proxy:
new_proxies.setdefault(scheme, proxy)
if 'Proxy-Authorization' in headers:
del headers['Proxy-Authorization']
try:
username, password = get_auth_from_url(new_proxies[scheme])
except KeyError:
username, password = None, None
if username and password:
headers['Proxy-Authorization'] = _basic_auth_str(username, password)
return new_proxies
def rebuild_method(self, prepared_request, response):
"""When being redirected we may want to change the method of the request
based on certain specs or browser behavior.
"""
method = prepared_request.method
# https://tools.ietf.org/html/rfc7231#section-6.4.4
if response.status_code == codes.see_other and method != 'HEAD':
method = 'GET'
# Do what the browsers do, despite standards...
# First, turn 302s into GETs.
if response.status_code == codes.found and method != 'HEAD':
method = 'GET'
# Second, if a POST is responded to with a 301, turn it into a GET.
# This bizarre behaviour is explained in Issue 1704.
if response.status_code == codes.moved and method == 'POST':
method = 'GET'
prepared_request.method = method
class Session(SessionRedirectMixin):
"""A Requests session.
Provides cookie persistence, connection-pooling, and configuration.
Basic Usage::
>>> import requests
>>> s = requests.Session()
>>> s.get('https://httpbin.org/get')
<Response [200]>
Or as a context manager::
>>> with requests.Session() as s:
... s.get('https://httpbin.org/get')
<Response [200]>
"""
__attrs__ = [
'headers', 'cookies', 'auth', 'proxies', 'hooks', 'params', 'verify',
'cert', 'adapters', 'stream', 'trust_env',
'max_redirects',
]
def __init__(self):
#: A case-insensitive dictionary of headers to be sent on each
#: :class:`Request <Request>` sent from this
#: :class:`Session <Session>`.
self.headers = default_headers()
#: Default Authentication tuple or object to attach to
#: :class:`Request <Request>`.
self.auth = None
#: Dictionary mapping protocol or protocol and host to the URL of the proxy
#: (e.g. {'http': 'foo.bar:3128', 'http://host.name': 'foo.bar:4012'}) to
#: be used on each :class:`Request <Request>`.
self.proxies = {}
#: Event-handling hooks.
self.hooks = default_hooks()
#: Dictionary of querystring data to attach to each
#: :class:`Request <Request>`. The dictionary values may be lists for
#: representing multivalued query parameters.
self.params = {}
#: Stream response content default.
self.stream = False
#: SSL Verification default.
self.verify = True
#: SSL client certificate default, if String, path to ssl client
#: cert file (.pem). If Tuple, ('cert', 'key') pair.
self.cert = None
#: Maximum number of redirects allowed. If the request exceeds this
#: limit, a :class:`TooManyRedirects` exception is raised.
#: This defaults to requests.models.DEFAULT_REDIRECT_LIMIT, which is
#: 30.
self.max_redirects = DEFAULT_REDIRECT_LIMIT
#: Trust environment settings for proxy configuration, default
#: authentication and similar.
self.trust_env = True
#: A CookieJar containing all currently outstanding cookies set on this
#: session. By default it is a
#: :class:`RequestsCookieJar <requests.cookies.RequestsCookieJar>`, but
#: may be any other ``cookielib.CookieJar`` compatible object.
self.cookies = cookiejar_from_dict({})
# Default connection adapters.
self.adapters = OrderedDict()
self.mount('https://', HTTPAdapter())
self.mount('http://', HTTPAdapter())
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def prepare_request(self, request):
"""Constructs a :class:`PreparedRequest <PreparedRequest>` for
transmission and returns it. The :class:`PreparedRequest` has settings
merged from the :class:`Request <Request>` instance and those of the
:class:`Session`.
:param request: :class:`Request` instance to prepare with this
session's settings.
:rtype: requests.PreparedRequest
"""
cookies = request.cookies or {}
# Bootstrap CookieJar.
if not isinstance(cookies, cookielib.CookieJar):
cookies = cookiejar_from_dict(cookies)
# Merge with session cookies
merged_cookies = merge_cookies(
merge_cookies(RequestsCookieJar(), self.cookies), cookies)
# Set environment's basic authentication if not explicitly set.
auth = request.auth
if self.trust_env and not auth and not self.auth:
auth = get_netrc_auth(request.url)
p = PreparedRequest()
p.prepare(
method=request.method.upper(),
url=request.url,
files=request.files,
data=request.data,
json=request.json,
headers=merge_setting(request.headers, self.headers, dict_class=CaseInsensitiveDict),
params=merge_setting(request.params, self.params),
auth=merge_setting(auth, self.auth),
cookies=merged_cookies,
hooks=merge_hooks(request.hooks, self.hooks),
)
return p
def request(self, method, url,
params=None, data=None, headers=None, cookies=None, files=None,
auth=None, timeout=None, allow_redirects=True, proxies=None,
hooks=None, stream=None, verify=None, cert=None, json=None):
"""Constructs a :class:`Request <Request>`, prepares it and sends it.
Returns :class:`Response <Response>` object.
:param method: method for the new :class:`Request` object.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary or bytes to be sent in the query
string for the :class:`Request`.
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
object to send in the body of the :class:`Request`.
:param json: (optional) json to send in the body of the
:class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the
:class:`Request`.
:param cookies: (optional) Dict or CookieJar object to send with the
:class:`Request`.
:param files: (optional) Dictionary of ``'filename': file-like-objects``
for multipart encoding upload.
:param auth: (optional) Auth tuple or callable to enable
Basic/Digest/Custom HTTP Auth.
:param timeout: (optional) How long to wait for the src to send
data before giving up, as a float, or a :ref:`(connect timeout,
read timeout) <timeouts>` tuple.
:type timeout: float or tuple
:param allow_redirects: (optional) Set to True by default.
:type allow_redirects: bool
:param proxies: (optional) Dictionary mapping protocol or protocol and
hostname to the URL of the proxy.
:param stream: (optional) whether to immediately download the response
content. Defaults to ``False``.
:param verify: (optional) Either a boolean, in which case it controls whether we verify
the src's TLS certificate, or a string, in which case it must be a path
to a CA bundle to use. Defaults to ``True``.
:param cert: (optional) if String, path to ssl client cert file (.pem).
If Tuple, ('cert', 'key') pair.
:rtype: requests.Response
"""
# Create the Request.
req = Request(
method=method.upper(),
url=url,
headers=headers,
files=files,
data=data or {},
json=json,
params=params or {},
auth=auth,
cookies=cookies,
hooks=hooks,
)
prep = self.prepare_request(req)
proxies = proxies or {}
settings = self.merge_environment_settings(
prep.url, proxies, stream, verify, cert
)
# Send the request.
send_kwargs = {
'timeout': timeout,
'allow_redirects': allow_redirects,
}
send_kwargs.update(settings)
resp = self.send(prep, **send_kwargs)
return resp
def get(self, url, **kwargs):
r"""Sends a GET request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', True)
return self.request('GET', url, **kwargs)
def options(self, url, **kwargs):
r"""Sends a OPTIONS request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', True)
return self.request('OPTIONS', url, **kwargs)
def head(self, url, **kwargs):
r"""Sends a HEAD request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', False)
return self.request('HEAD', url, **kwargs)
def post(self, url, data=None, json=None, **kwargs):
r"""Sends a POST request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
object to send in the body of the :class:`Request`.
:param json: (optional) json to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response
"""
return self.request('POST', url, data=data, json=json, **kwargs)
def put(self, url, data=None, **kwargs):
r"""Sends a PUT request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response
"""
return self.request('PUT', url, data=data, **kwargs)
def patch(self, url, data=None, **kwargs):
r"""Sends a PATCH request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response
"""
return self.request('PATCH', url, data=data, **kwargs)
def delete(self, url, **kwargs):
r"""Sends a DELETE request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response
"""
return self.request('DELETE', url, **kwargs)
def send(self, request, **kwargs):
"""Send a given PreparedRequest.
:rtype: requests.Response
"""
# Set defaults that the hooks can utilize to ensure they always have
# the correct parameters to reproduce the previous request.
kwargs.setdefault('stream', self.stream)
kwargs.setdefault('verify', self.verify)
kwargs.setdefault('cert', self.cert)
kwargs.setdefault('proxies', self.proxies)
# It's possible that users might accidentally send a Request object.
# Guard against that specific failure case.
if isinstance(request, Request):
raise ValueError('You can only send PreparedRequests.')
# Set up variables needed for resolve_redirects and dispatching of hooks
allow_redirects = kwargs.pop('allow_redirects', True)
stream = kwargs.get('stream')
hooks = request.hooks
# Get the appropriate adapter to use
adapter = self.get_adapter(url=request.url)
# Start time (approximately) of the request
start = preferred_clock()
# Send the request
r = adapter.send(request, **kwargs)
# Total elapsed time of the request (approximately)
elapsed = preferred_clock() - start
r.elapsed = timedelta(seconds=elapsed)
# Response manipulation hooks
r = dispatch_hook('response', hooks, r, **kwargs)
# Persist cookies
if r.history:
# If the hooks create history then we want those cookies too
for resp in r.history:
extract_cookies_to_jar(self.cookies, resp.request, resp.raw)
extract_cookies_to_jar(self.cookies, request, r.raw)
# Resolve redirects if allowed.
if allow_redirects:
# Redirect resolving generator.
gen = self.resolve_redirects(r, request, **kwargs)
history = [resp for resp in gen]
else:
history = []
# Shuffle things around if there's history.
if history:
# Insert the first (original) request at the start
history.insert(0, r)
# Get the last request made
r = history.pop()
r.history = history
# If redirects aren't being followed, store the response on the Request for Response.next().
if not allow_redirects:
try:
r._next = next(self.resolve_redirects(r, request, yield_requests=True, **kwargs))
except StopIteration:
pass
if not stream:
r.content
return r
def merge_environment_settings(self, url, proxies, stream, verify, cert):
"""
Check the environment and merge it with some settings.
:rtype: dict
"""
# Gather clues from the surrounding environment.
if self.trust_env:
# Set environment's proxies.
no_proxy = proxies.get('no_proxy') if proxies is not None else None
env_proxies = get_environ_proxies(url, no_proxy=no_proxy)
for (k, v) in env_proxies.items():
proxies.setdefault(k, v)
# Look for requests environment configuration and be compatible
# with cURL.
if verify is True or verify is None:
verify = (os.environ.get('REQUESTS_CA_BUNDLE') or
os.environ.get('CURL_CA_BUNDLE'))
# Merge all the kwargs.
proxies = merge_setting(proxies, self.proxies)
stream = merge_setting(stream, self.stream)
verify = merge_setting(verify, self.verify)
cert = merge_setting(cert, self.cert)
return {'verify': verify, 'proxies': proxies, 'stream': stream,
'cert': cert}
def get_adapter(self, url):
"""
Returns the appropriate connection adapter for the given URL.
:rtype: requests.adapters.BaseAdapter
"""
for (prefix, adapter) in self.adapters.items():
if url.lower().startswith(prefix.lower()):
return adapter
# Nothing matches :-/
raise InvalidSchema("No connection adapters were found for {!r}".format(url))
def close(self):
"""Closes all adapters and as such the session"""
for v in self.adapters.values():
v.close()
def mount(self, prefix, adapter):
"""Registers a connection adapter to a prefix.
Adapters are sorted in descending order by prefix length.
"""
self.adapters[prefix] = adapter
keys_to_move = [k for k in self.adapters if len(k) < len(prefix)]
for key in keys_to_move:
self.adapters[key] = self.adapters.pop(key)
def __getstate__(self):
state = {attr: getattr(self, attr, None) for attr in self.__attrs__}
return state
def __setstate__(self, state):
for attr, value in state.items():
setattr(self, attr, value)
def session():
"""
Returns a :class:`Session` for context-management.
.. deprecated:: 1.0.0
This method has been deprecated since version 1.0.0 and is only kept for
backwards compatibility. New code should use :class:`~requests.sessions.Session`
to create a session. This may be removed at a future date.
:rtype: Session
"""
return Session()
|
[
"melissaripaolo@gmail.com"
] |
melissaripaolo@gmail.com
|
4573c307c91eba94f7133e73ffb4e29b05316bfc
|
0bd6e56b5046391b5be4b466b8ce5b44626818f5
|
/mac_changer.py
|
8c55a63e8d4bd61b36e5da67151b61392b86a493
|
[] |
no_license
|
Ronlin1/python-hacking-scripts
|
af14f60610019474c07d76fd15f90d7c812a0165
|
291ccc7283e6c47f1f3a3385d729b7c08c1c24ed
|
refs/heads/master
| 2023-03-27T17:45:09.065699
| 2021-04-03T05:27:13
| 2021-04-03T05:27:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,702
|
py
|
#!/user/bin/env python
import subprocess
import optparse
import re
def get_arguments():
parser = optparse.OptionParser()
parser.add_option("-i", "--interface", dest="interface", help="Interface to change its Mac address")
parser.add_option("-m", "--mac", dest="new_mac", help="New MAC address")
(options, arguments) = parser.parse_args()
if not options.interface:
parser.error("[-] Please specify an interface, use --help for more info.")
elif not options.new_mac:
parser.error("[-] Please specify a new mac, use --help for more info.")
return options
def get_current_mac(interface):
ifconfig_result = subprocess.check_output(["ifconfig", interface])
mac_address_search_result = re.search(r"\w\w:\w\w:\w\w:\w\w:\w\w:\w\w", str(ifconfig_result))
if mac_address_search_result:
return mac_address_search_result.group(0)
else:
print("[-] Could not read MAC address")
def change_mac(interface, new_mac):
print("[+] Changing Mac address for " + interface + " to " + new_mac)
subprocess.call(["ifconfig", interface, "down"])
subprocess.call(["ifconfig", interface, "hw", "ether", new_mac])
subprocess.call(["ifconfig", interface, "up"])
options = get_arguments()
current_mac = get_current_mac(options.interface)
print("Current Mac = " + str(current_mac))
change_mac(options.interface, options.new_mac)
current_mac = get_current_mac(options.interface)
if current_mac == options.new_mac:
print("[+] MAC address was successfully changed to " + current_mac)
else:
print("[-] MAC address did not get changed")
"testing"
|
[
"tonyjcha3713@gmail.com"
] |
tonyjcha3713@gmail.com
|
e3f5bdca5d9a2bf0d000ba393a7b25ae175ccf9a
|
63f8b7a3c3b5ab4c67f3ec6c60c3c327245afe66
|
/experiments/scripts/compare_throughput.py
|
3e3109181a11913d7287b510eae2e8bd42115c33
|
[] |
no_license
|
DanielTakeshi/dqn
|
719da28568963f1b2ba041652e32a3d2a62ec191
|
6f9dc0d8aedb1319fd5333295e6561027c68bab2
|
refs/heads/main
| 2021-01-13T01:48:38.235101
| 2020-11-11T01:35:45
| 2020-11-11T01:35:45
| 311,830,436
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 18,967
|
py
|
"""This combines a bunch of learning curves for all the games.
For bar charts, see `combine_student_results.py`.
"""
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib import offsetbox
from matplotlib.ticker import FuncFormatter
import argparse, csv, math, os, pickle, sys, inspect, json
from os.path import join
import numpy as np
import pandas as pd
from dqn import common_config as cfg
from collections import defaultdict
import utils as U
plt.style.use('seaborn-darkgrid')
sns.set_style("darkgrid")
np.set_printoptions(linewidth=180, edgeitems=10)
# ------------------------------------------------------------------------------
# matplotlib stuff
# ------------------------------------------------------------------------------
titlesize = 53
xsize = 42
ysize = 42
ticksize = 42
legendsize = 48
scolors = ['gold', 'red', 'blue', 'purple', 'silver', 'orange']
tcolor = 'black'
error_region_alpha = 0.25
bwidth = 0.3
slw = 7
# ------------------------------------------------------------------------------
CONST = 1e6
LEN_REWARDS = 596 # this should be the length ...
def scale_steps(x):
x = np.array(x) / CONST
return x
def get_info(exp_path, w=100):
"""Gather information, in a similar manner as scripts/quick_student.py.
"""
title = U.get_title(exp_path)
summary_train = U.load_summary_data(exp_path, train=True)
s_steps, s_info = U.load_info_data(exp_path)
s_reward = s_info['true_avg_rew'].values
# Ah this is going to be a bit annoying but w/e, b/c one of Pong failed.
if '_pong_snapshot_2019-08-22-21-57_s64329' in exp_path:
print(' At the problematic: _pong_snapshot_2019-08-22-21-57_s64329')
print(' That one exited early due to Pong-specific stuff.')
s_steps = np.array([(x*10000+50000) for x in range(LEN_REWARDS)])
tmp = np.ones((LEN_REWARDS,)) * s_reward[-1]
for i in range(len(s_reward)):
tmp[i] = s_reward[i]
s_reward = tmp
s_steps = scale_steps(s_steps)
assert len(s_steps) == len(s_reward)
# Get the teacher info. Load teacher model, load path, then plot data. Be
# careful we are allowed to do this 'substitution' to get the expert data.
with open(join(exp_path,'params.txt'), 'r') as f:
params = json.load(f)
teacher_models = params['teacher']['models']
assert len(teacher_models) == 1, \
"assume len(teacher_models) = 1, {}".format(len(teacher_models))
s_last = os.path.basename(os.path.normpath(exp_path))
t_last = os.path.basename(os.path.normpath(teacher_models[0]))
teacher_path = exp_path.replace(s_last, t_last)
teacher_path = teacher_path.replace('students/', 'teachers/')
teacher_title = U.get_title(teacher_path)
# CANNOT DO THIS FOR EARLIER RUNS, dating back to before the summer, I think.
#t_steps, t_info = U.load_info_data(teacher_path)
# AH, we did not record 'true_avg_rew' in the teacher ... ugh. So for this
# just read the root file and parse like I do here. That gives us the same
# values that I use for the 'true_avg_rew' key.
t_steps = []
t_reward = []
teacher_root_file = join(teacher_path, 'root.log')
with open(teacher_root_file, 'r') as f:
for line in f:
if 'completed' in line and '**********' in line and 'steps' in line:
linesp = line.split()
assert linesp[0] == '**********', linesp
assert linesp[2] == 'steps', linesp
steps = int(linesp[1])
t_steps.append(steps)
if 'Last 100 results: avg' in line:
linesp = line.split()
assert linesp[0] == 'Last', linesp
assert linesp[1] == '100', linesp
assert linesp[2] == 'results:', linesp
assert linesp[3] == 'avg', linesp
assert ',' in linesp[4], linesp
rew = float(linesp[4].strip(','))
t_reward.append(rew)
t_steps = scale_steps(t_steps)
assert len(t_steps) == len(t_reward)
# More annoying stuff ...
if len(s_steps) > LEN_REWARDS:
print('for {}, len(s_steps) = {} so chopping to {}'.format(
exp_path, len(s_steps), LEN_REWARDS))
s_steps = s_steps[:LEN_REWARDS]
s_reward = s_reward[:LEN_REWARDS]
if len(t_steps) > LEN_REWARDS:
print('for {}, len(t_steps) = {} so chopping to {}'.format(
exp_path, len(t_steps), LEN_REWARDS))
t_steps = t_steps[:LEN_REWARDS]
t_reward = t_reward[:LEN_REWARDS]
assert len(s_steps) == LEN_REWARDS, len(s_steps)
assert len(s_reward) == LEN_REWARDS, len(s_reward)
assert len(t_steps) == LEN_REWARDS, len(t_steps)
assert len(t_reward) == LEN_REWARDS, len(t_reward)
t_lambda = params['teacher']['supervise_loss']['lambda']
t_condense = params['teacher']['condense_freq']
t_overlap_m = params['teacher']['overlap']['match_method']
if t_overlap_m == 'train_net':
t_overlap_p = params['teacher']['overlap']['overlap_target']
elif t_overlap_m == 'fixed_steps':
t_overlap_p = str(params['teacher']['num_snapshot_ahead']).zfill(2)
assert t_condense == 5, t_condense
else:
raise ValueError(t_overlap_m)
# For now
if 'beamrider' in s_last.lower() or 'pong' in s_last.lower() or \
'robotank' in s_last.lower():
assert t_lambda == 0.01, '{}, {}'.format(t_lambda, s_last)
else:
assert t_lambda == 0.1, '{}, {}'.format(t_lambda, s_last)
result = {
'game_name': U.get_game_name(s_last),
'overlap_param': t_overlap_p,
'match_method': t_overlap_m,
'supervise_lambda': t_lambda,
'student_rew': s_reward, # student reward every 10k steps (starts @ 50k)
'teacher_rew': t_reward, # teacher reward every 10k steps (starts @ 50k)
'student_steps': s_steps, # should be same among all trials but save anyway
'teacher_steps': t_steps, # should be same among all trials but save anyway
'mb_start': params['teacher']['blend']['start'],
'mb_end': params['teacher']['blend']['end'],
'train_freq': params['train']['train_freq_per_step'],
}
return result
def _get_array(list_of_items):
nb = len(list_of_items)
lengths = [len(x) for x in list_of_items]
if len(lengths) > 1 and np.std(lengths) > 0:
print('Error with lengths: {}'.format(lengths))
sys.exit()
return np.array(list_of_items)
def _info_for_plots(stats, t_stats, target_num_trials=2):
"""Go through and collect data for one experimental condition.
Calling this method several times means we should be able to compare many
different settings. Unlike earlier, game_info (and t_stats) needs to have
the x coordinates, since we're doing full learning curves.
Returns a list that has all the game stats we want. It should be a list
with ONE ITEM PER GAME, so a length 9 list here!
"""
all_game_stats = []
game_idx = 0
print('\n\n\t\tNEW GAME: {}'.format(U.GAMES[game_idx]))
game_info = {} # For each game, collect stats, put in `all_game_stats`.
for key in sorted(stats.keys()):
game = U.GAMES[game_idx]
if game.lower() not in key:
game_idx += 1
game = U.GAMES[game_idx]
print('\n\n\t\tNEW GAME: {}'.format(game))
# Add the previously accumulated states to the game_stats.
all_game_stats.append(game_info)
game_info = {}
num_trials = len(stats[key])
print('\n{} len(stats[key]): {}'.format(key, num_trials))
s_rews = _get_array([x['student_rew'] for x in stats[key]])
t_rews = _get_array([x['teacher_rew'] for x in stats[key]])
print('student/teacher rewards: {} {}'.format(s_rews.shape, t_rews.shape))
#print('std(student): {}'.format(np.std(s_rews, axis=0)))
#print('std(teacher): {}'.format(np.std(t_rews, axis=0)))
assert np.max( np.abs(np.std(t_rews,axis=0)) ) < 0.001, \
'We are using the same teacher, right? The StDev should be zero.'
assert num_trials == s_rews.shape[0] == t_rews.shape[0], num_trials
# Let's not do this in case we want to plot standard deviation
#s_rews = np.mean(s_rews, axis=0)
# Eh this could easily be a global list since all the games use the
# same number of steps (thus far) but this may give us flexibility later.
s_steps = np.mean(_get_array([x['student_steps'] for x in stats[key]]), axis=0)
t_steps = np.mean(_get_array([x['teacher_steps'] for x in stats[key]]), axis=0)
# Add teacher stats, should match for all in this loop so we just do once.
t_rews = np.mean(t_rews, axis=0)
if len(t_stats[game]) == 0:
t_stats[game].append( (t_steps,t_rews) )
# Only want student samples for statistics that we will actually be using.
info = key.split('__')
if info[1] == 'fixed_steps':
#assert num_trials == args.num_trials, num_trials
if num_trials != target_num_trials:
print('WARNING! we have {} trials, but should have {}'.format(
num_trials, target_num_trials))
num_ahead = info[2]
game_info[num_ahead] = (s_steps,s_rews)
elif info[1] == 'train_net':
continue
else:
raise ValueError(info)
# Add last game.
all_game_stats.append(game_info)
print('\n\nDone printing, len all games: {}'.format(len(all_game_stats)))
assert len(all_game_stats) == len(U.GAMES) == len(U.G_INDS_FAT)
return all_game_stats
def report_combined_stats(stats_3, stats_4, args, w=100):
"""Report combined stats, ideally for a plot.
:param stats: dict, with key --> list, where the list has one item per
random seed. This helps us combine results more easily.
"""
# Increase factor to `nrows` to make plot 'taller'.
nrows = 2
ncols = 5
fig, ax = plt.subplots(nrows, ncols, squeeze=False, sharex=False,
figsize=(11*ncols,8*nrows))
#gridspec_kw={'height_ratios': [5,5,5,1]})
INDICES = U.G_INDS_FAT
# Teacher data for plots later.
t_stats_3 = defaultdict(list)
t_stats_4 = defaultdict(list)
# Do what I did earlier, except for BOTH of the stats here. Yeah !!
print('\n*************************************************')
print('COLLECTING DATA FROM FIRST EXPERIMENTAL CONDITION')
print('*************************************************\n')
all_game_stats_3 = _info_for_plots(stats=stats_3, t_stats=t_stats_3)
print('\n*************************************************')
print('COLLECTING DATA FROM FIRST EXPERIMENTAL CONDITION')
print('*************************************************\n')
all_game_stats_4 = _info_for_plots(stats=stats_4, t_stats=t_stats_4)
# --------------------------------------------------------------------------
# Plot experiment condition 3 and 4 on the same plot. The shape of `s_y`
# here, i.e., the reward, is (num_trials, num_recorded) so we could do that
# as standard deviation, but might be noisy ... also these ALREADY include
# an implicit smoothing over the past 100 episodes.
# --------------------------------------------------------------------------
def _plot(r, c, key, s_stats_3, s_stats_4, color, label, force_color=False,
std_curves=False):
# Case 1, try to plot everything together w/same color codes:
if False:
s_x, s_y = s_stats_3[key]
s_y = np.mean(s_y, axis=0)
ax[r,c].plot(s_x, s_y, ls='--', lw=slw, color=color, label=label+', 4:1')
s_x, s_y = s_stats_4[key]
s_y = np.mean(s_y, axis=0)
ax[r,c].plot(s_x, s_y, lw=slw, color=color, label=label+', 2:1')
# Case 2, try to use standard deviations?
if True:
if force_color:
cc = 'gold'
else:
cc = 'blue'
s_x, s_y = s_stats_3[key]
s_y = np.mean(s_y, axis=0)
ax[r,c].plot(s_x, s_y, lw=slw, color=cc, label=label+', 4:1')
if std_curves:
ax[r,c].fill_between(s_x,
s_y+np.std(s_y, axis=0),
s_y-np.std(s_y, axis=0),
color=cc,
alpha=error_region_alpha)
if force_color:
cc = 'orange'
else:
cc = 'red'
s_x, s_y = s_stats_4[key]
s_y = np.mean(s_y, axis=0)
ax[r,c].plot(s_x, s_y, lw=slw, color=cc, label=label+', 2:1')
if std_curves:
ax[r,c].fill_between(s_x,
s_y+np.std(s_y, axis=0),
s_y-np.std(s_y, axis=0),
color=cc,
alpha=error_region_alpha)
# --------------------------------------------------------------------------
# Now go through this again, same logic, except plot. Alphabetical order
# from top row, w/one for legend to apply to subplots.
# --------------------------------------------------------------------------
for game, (r,c) in zip(U.GAMES, INDICES):
ax[r,c].set_title('{}'.format(game), fontsize=titlesize)
idx = U.GAMES.index(game)
# Keys: ['-1', '-2', '00', '02', '05', '10'] where -1 and -2 are BA and RA.
print('\nKeys for s_stats_3, and then s_stats_4:')
s_stats_3 = all_game_stats_3[idx]
print(game, ': ', sorted(s_stats_3.keys()))
s_stats_4 = all_game_stats_4[idx]
print(game, ': ', sorted(s_stats_4.keys()))
# Just take first one b/c teacher stats should be the same. Actually
# wait maybe we don't need the teacher here? Think about it ...
t_x, t_y = t_stats_3[game][0]
if True:
ax[r,c].plot(t_x, t_y, lw=10, ls='--', color=tcolor, label='DDQN Teacher')
_t_x, _t_y = t_stats_4[game][0]
assert np.allclose(t_x, _t_x), '{} {}'.format(t_x, _t_x)
assert np.allclose(t_y, _t_y), '{} {}'.format(t_y, _t_y)
# --------------------------------------------------------------------------
# NOTE: adjust based on how many of the student 'keys' I want to post.
# Toggle which ones we want on/off. SAME COLOR CODE AS PRIOR FIGURE, if
# we are using all select functions. But we prob. don't need best
# ahead. Honestly it seems best just to let ONE be used at a time.
# --------------------------------------------------------------------------
if True:
key = '-1'
_plot(r, c, key, s_stats_3, s_stats_4, scolors[0], label='S, Best Ahead',
force_color=True)
if False:
key = '-2'
_plot(r, c, key, s_stats_3, s_stats_4, scolors[1], label='S, Rand Ahead')
if False:
key = '00'
_plot(r, c, key, s_stats_3, s_stats_4, scolors[2], label='S, 0 Ahead')
if False:
key = '02'
_plot(r, c, key, s_stats_3, s_stats_4, scolors[3], label='S, 2 Ahead')
if False:
key = '05'
_plot(r, c, key, s_stats_3, s_stats_4, scolors[4], label='S, 5 Ahead')
if True:
key = '10'
_plot(r, c, key, s_stats_3, s_stats_4, scolors[5], label='S, 10 Ahead')
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
# Bells and whistles
for r in range(nrows):
for c in range(ncols):
#leg = ax[r,c].legend(loc="best", ncol=2, prop={'size':legendsize})
#for legobj in leg.legendHandles:
# legobj.set_linewidth(5.0)
ax[r,c].tick_params(axis='x', labelsize=ticksize)
ax[r,c].tick_params(axis='y', labelsize=ticksize)
# I think it's better to share axes in the x direction to be
# consistent with steps, but doing so removes the axis ticks. This
# reverts it so we get the ticks on all the axis.
#ax[r,c].xaxis.set_tick_params(which='both', labelbottom=True)
# Put this on r=0, c=0, then hide it, just to get legend to appear.
ax[0,0].set_visible(False)
handles, labels = ax[1,1].get_legend_handles_labels()
# Location (0,0) is bottom left. Doing (0,1) is upper left but the text
# isn't visible (because `loc` is the lower left part of the legend).
fig.legend(handles, labels, loc=(0.005,0.500), prop={'size':legendsize})
# Finally, save!! Can't do `.[...].png` since overleaf complains.
plt.tight_layout()
figname = 'fig_throughput_student.png'.format()
plt.savefig(figname)
print("Just saved: {}".format(figname))
if __name__ == "__main__":
# --------------------------------------------------------------------------
# NOW WE ASSUME WE'RE COMPARING EXP's 3 AND 4.
# --------------------------------------------------------------------------
EXP_PATH = cfg.SNAPS_STUDENT
pp = argparse.ArgumentParser()
args = pp.parse_args()
args.num_trials_exp_3 = 2
args.num_trials_exp_4 = 2
# Iterate through all the *student* models.
dirs = sorted( [join(EXP_PATH,x) for x in os.listdir(EXP_PATH) \
if U._criteria_for_experiments_throughput(x,args)] )
print("Currently plotting with these models, one trained agent per file:")
stats_3 = defaultdict(list)
stats_4 = defaultdict(list)
for dd in dirs:
last_part = os.path.basename(os.path.normpath(dd))
if last_part in U.STUFF_TO_SKIP:
print(" skipping {} due to STUFF_TO_SKIP".format(last_part))
continue
print("\nAnalyzing: {}".format(dd))
info = get_info(dd)
key = '{}__{}__{}'.format(info['game_name'], info['match_method'],
info['overlap_param'])
mb = info['mb_start']
tf = info['train_freq']
mm = info['match_method']
# We only want experiments 3 and 4.
if mb == 0.50 and tf == 4 and mm != 'train_net':
stats_3[key].append(info)
elif mb == 0.50 and tf == 2 and mm != 'train_net':
stats_4[key].append(info)
else:
print(' skipping {}, mm,tf,mm: {}, {}, {}'.format(key, mb,tf,mm))
continue
print('\nNow going to report on all these stats.')
print(' len stats 3, 4 dicts: {} and {}'.format(len(stats_3), len(stats_4)))
print('')
report_combined_stats(stats_3, stats_4, args)
|
[
"takeshidanny@gmail.com"
] |
takeshidanny@gmail.com
|
389cfec1280691576be87dab17cbace3b76cb636
|
06bf95f2d0310f2a740affdc9d36b3303ecb4645
|
/WebMallProj/WebMall/apps.py
|
39848b31b8cc3a26d2d91bbce9311d10a42491e8
|
[] |
no_license
|
vinee-sha/WebMall
|
3dcf9d1e8c1c91c62f15c0bd534f009995063c3e
|
e4dd622782a26d1afc0ff12ccda0972401b9a4ba
|
refs/heads/master
| 2022-12-25T16:47:04.462969
| 2020-10-04T13:36:33
| 2020-10-04T13:36:33
| 301,137,258
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 94
|
py
|
from django.apps import AppConfig
class WebmallConfig(AppConfig):
name = 'WebMall'
|
[
"vineeshanamana321@gmail.com"
] |
vineeshanamana321@gmail.com
|
1fc6096204e32445ea9a2db3b0692477f27c3235
|
99cff3a11eac3d1d5c0d16ee80e5b9c0efc2951d
|
/HttpTrigger/__init__.py
|
4ed8c0e66e13e66f103bc26630f69aa458fbf95d
|
[] |
no_license
|
kevin808/functionapp-python-schemaregistry-demo
|
af5b6de05fad1b60ef533bd0edfcfdd5100e73e9
|
b39a9bf42cee400f3be091cebdc6c14cd80c6bb5
|
refs/heads/master
| 2023-03-30T01:54:48.541836
| 2021-04-06T13:02:42
| 2021-04-06T13:02:42
| 354,515,352
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,553
|
py
|
import logging
import azure.functions as func
import os
from azure.identity import ClientSecretCredential
from azure.schemaregistry import SchemaRegistryClient
from azure.schemaregistry.serializer.avroserializer import SchemaRegistryAvroSerializer
TENANT_ID='YOUR TENANT_ID'
CLIENT_ID='YOUR CLIENT_ID'
CLIENT_SECRET='YOUR CLIENT_SECRET'
SCHEMA_REGISTRY_ENDPOINT='YOUR_STANDARD_EVENTHUB.servicebus.windows.net'
SCHEMA_GROUP='default'
SCHEMA_STRING = """
{"namespace": "example.avro",
"type": "record",
"name": "User",
"fields": [
{"name": "name", "type": "string"},
{"name": "favorite_number", "type": ["int", "null"]},
{"name": "favorite_color", "type": ["string", "null"]}
]
}"""
token_credential = ClientSecretCredential(
tenant_id=TENANT_ID,
client_id=CLIENT_ID,
client_secret=CLIENT_SECRET
)
# For Managed Identity
# token_credential = DefaultAzureCredential()
def serialize(serializer):
dict_data_ben = {"name": u"Ben", "favorite_number": 7, "favorite_color": u"red"}
dict_data_alice = {"name": u"Alice", "favorite_number": 15, "favorite_color": u"green"}
# Schema would be automatically registered into Schema Registry and cached locally.
payload_ben = serializer.serialize(dict_data_ben, SCHEMA_STRING)
# The second call won't trigger a service call.
payload_alice = serializer.serialize(dict_data_alice, SCHEMA_STRING)
print('Encoded bytes are: ', payload_ben)
print('Encoded bytes are: ', payload_alice)
return [payload_ben, payload_alice]
def deserialize(serializer, bytes_payload):
# serializer.deserialize would extract the schema id from the payload,
# retrieve schema from Schema Registry and cache the schema locally.
# If the schema id is the local cache, the call won't trigger a service call.
dict_data = serializer.deserialize(bytes_payload)
print('Deserialized data is: ', dict_data)
return dict_data
def main(req: func.HttpRequest) -> func.HttpResponse:
logging.info('Python HTTP trigger function processed a request.')
schema_registry = SchemaRegistryClient(endpoint=SCHEMA_REGISTRY_ENDPOINT, credential=token_credential)
serializer = SchemaRegistryAvroSerializer(schema_registry, SCHEMA_GROUP)
bytes_data_ben, bytes_data_alice = serialize(serializer)
dict_data_ben = deserialize(serializer, bytes_data_ben)
dict_data_alice = deserialize(serializer, bytes_data_alice)
serializer.close()
return func.HttpResponse(
"Schema Registry Executed.",
status_code=200
)
|
[
"kevin80828@gmail.com"
] |
kevin80828@gmail.com
|
bef2865099cfa242c3bdb6201a9e938682c2a4d7
|
7d39b91d61fcae881a4ac412974edb6028941d80
|
/test/espnet2/bin/test_enh_inference.py
|
6ec708df0a27c4d50ddfc008b7a71c595e0b3108
|
[
"Apache-2.0"
] |
permissive
|
pzelasko/espnet
|
4106826c5887bf7e203102b36a0129297ecfb5fe
|
629ac3b89b23e08cafbaab3dac1467daaf7d39fb
|
refs/heads/master
| 2021-08-30T17:10:15.178217
| 2021-08-25T20:50:53
| 2021-08-25T20:50:53
| 173,996,615
| 1
| 2
|
Apache-2.0
| 2020-12-05T05:30:26
| 2019-03-05T18:09:12
|
Shell
|
UTF-8
|
Python
| false
| false
| 1,198
|
py
|
from argparse import ArgumentParser
from pathlib import Path
import pytest
import torch
from espnet2.bin.enh_inference import get_parser
from espnet2.bin.enh_inference import main
from espnet2.bin.enh_inference import SeparateSpeech
from espnet2.tasks.enh import EnhancementTask
def test_get_parser():
assert isinstance(get_parser(), ArgumentParser)
def test_main():
with pytest.raises(SystemExit):
main()
@pytest.fixture()
def config_file(tmp_path: Path):
# Write default configuration file
EnhancementTask.main(
cmd=[
"--dry_run",
"true",
"--output_dir",
str(tmp_path),
]
)
return tmp_path / "config.yaml"
@pytest.mark.execution_timeout(5)
@pytest.mark.parametrize("batch_size", [1, 2])
@pytest.mark.parametrize(
"input_size, segment_size, hop_size", [(16000, None, None), (35000, 2.4, 0.8)]
)
def test_SeparateSpeech(config_file, batch_size, input_size, segment_size, hop_size):
separate_speech = SeparateSpeech(
enh_train_config=config_file, segment_size=segment_size, hop_size=hop_size
)
wav = torch.rand(batch_size, input_size)
separate_speech(wav, fs=8000)
|
[
"C0me_On@163.com"
] |
C0me_On@163.com
|
70917ef3ba2fae2c622f7b341e85affcd1aa530a
|
19d8c15700cbb2d0b8108c379edbc50bfcb1c149
|
/Source code/laptop.py
|
aa6adaadf30faa8e250fe0bfebe582a4ffbd0e33
|
[] |
no_license
|
trandaitai327/laptop-store-manage
|
41543be450ca68a973e754c0b2bfead741d88e44
|
dee8519d4c4602f7132b5fcbd4a2e03e9193223d
|
refs/heads/master
| 2023-09-03T21:43:59.694305
| 2021-11-05T03:14:07
| 2021-11-05T03:14:07
| 424,813,427
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 251
|
py
|
class Laptop:
def __init__(self,ten,hang,gia,soluong):
self.name = ten
self.brand = hang
self.price = gia
self.quantum = soluong
def xuatgia(self):
return self.price
new= Laptop("G3","Dell",25000,3)
|
[
"trandaitai327@gmail.com"
] |
trandaitai327@gmail.com
|
ce57dd3bfd78b96f46a44a4c20d89d7a8798c623
|
5b07f9a868de71ce61aea540f4e437d277611cd0
|
/AC04/produtoconsumidor.py
|
1ac330c2b8dc5762fd0df30a3fbc3bc48fb63a9c
|
[] |
no_license
|
DevAltOfCtrl/Arquitetura_e_Projeto_de_Sistemas
|
2d738b328ad51a8a92113a6cd77704dbabe8f2f7
|
c034ba33fd56601af68b2963a2f22e32f1fa146d
|
refs/heads/main
| 2023-09-06T02:48:09.106918
| 2021-10-23T03:00:22
| 2021-10-23T03:00:22
| 406,593,062
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,540
|
py
|
from threading import Thread, Condition
import time
import random
prateleiraMercado = []
controladorFila = Condition()
class Produto(Thread):
def run(self):
produtos = ["carne", "ovo", "arroz", "feijão", "macarrão", "banana",
"maça", "miojo", "abacaxi", "laranja", "cerveja", "vinho",
"cachaça", "creme de leite", "leite condensado", "frango",
"café", "óleo", "açucar", "leite", "sal", "detergente",
"chocolate", "batata", "cenoura", "quiabo", "acerola",
"agua", "suco", "refrigerante", "xuxu", "pepino"]
global prateleiraMercado
while True:
produto = random.choice(produtos)
controladorFila.acquire()
prateleiraMercado.append(produto)
print("Funcionário: Repositor incluiu", produto, "na prateleira.")
controladorFila.notify()
controladorFila.release()
time.sleep(random.random())
class Consumidor(Thread):
def run(self):
global prateleiraMercado
while True:
controladorFila.acquire()
if not prateleiraMercado:
print("Prateleira sem produtos, cliente aguardando repositor.")
controladorFila.wait()
produto = prateleiraMercado.pop(0)
print("Consumidor: Cliente pegou", produto, "da prateleira.")
controladorFila.release()
time.sleep(random.random())
|
[
"noreply@github.com"
] |
noreply@github.com
|
9568a5759861050bec15f02ba00c8e901ff92fc8
|
70f854c9a34be625b882bde0e5c5269522842ccf
|
/week 2/sumposled.py
|
2bd590a34df68a174f615b039d21e36b26771d83
|
[] |
no_license
|
MariaMedvede/coursera
|
6d3d897c05045e250d3b5e6e9b25a1d2de3a0df9
|
7ccc53845535bc9e341d3c42d9475e832b4cc7f4
|
refs/heads/master
| 2020-09-15T07:03:31.479505
| 2019-12-01T19:29:08
| 2019-12-01T19:29:08
| 223,374,208
| 0
| 2
| null | 2019-12-01T19:29:10
| 2019-11-22T09:55:15
|
Python
|
UTF-8
|
Python
| false
| false
| 109
|
py
|
now = int(input())
seqSum = 0
while now != 0:
seqSum = seqSum + now
now = int(input())
print(seqSum)
|
[
"manya1999m09@yandex.ru"
] |
manya1999m09@yandex.ru
|
fc99fb37d4a38af2dd88b91d6d660527ae7b23fb
|
e29922802cd85e6745ec3215d71ffba9ba4a1db8
|
/a_prep_data/a4_mate_plots_eunis.py
|
c53e37719fb89975e557ea673be59ea9080f695d
|
[] |
no_license
|
haro-nl/DOREN_WEnR
|
26855f49261a0b3ea93ab743b377e4f5dfb10583
|
dadf8b0bf56912d22eb5fe4a499d3ef3ad79e127
|
refs/heads/master
| 2020-04-16T18:41:39.259162
| 2019-01-15T11:20:09
| 2019-01-15T11:20:09
| 165,831,681
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,830
|
py
|
# -*- coding: utf-8 -*
#!/usr/bin/python3.5.3
'''Script to
1. read EUNIS type indicator species
2. generate look up dictionaries to get Dg, CoSp & DmSp species for each EUNIS types
3. read species list for all EVA vegetation plots and add sp list to each plot in a new column
4. define completeness score for a veg plot based on similarity between actual sp list and DgSp, CoSp & DmSp lists
5. calculate scores for all plots
Hans Roelofsen, 22 November 2018, Wageningen Environmental Research
'''
import os
import datetime
import pickle
from helper import do
if __name__ == "__main__":
print("starting at {0}".format(datetime.datetime.now().strftime("%Y%m%d_%H%M")))
eva_head = do.get_eva_emep_data() # EVA header data with EMEP already joined
eva_plot_comp = do.get_eva_plot_species(eva_head['PlotObservationID'].tolist()) # Species composition of all EVA plots
eunis_type_composition_lists = do.generate_types_species_lists() # Dictionary of CoSp, DgSp & DmSp species for all EUNIS types
# Add species list as column to the EVA plot table
eva_head['sp_list'] = eva_head.apply(lambda row: do.get_plot_species(eva_plot_comp, row['PlotObservationID']), axis=1)
print("Done 01 at {0}".format(datetime.datetime.now().strftime("%Y%m%d_%H%M")))
# Calculate DmSp score based on actual species list and DmSp species list for the applicable EUNIS type
eva_head['dmsp'] = eva_head.apply(lambda row: do.completeness(plot_species_list=row['sp_list'],
reference_species_list=eunis_type_composition_lists[row['EUNIScode']]['DmSp']), axis=1)
print("Done 02 at {0}".format(datetime.datetime.now().strftime("%Y%m%d_%H%M")))
# idem for DgSp
eva_head['dgsp'] = eva_head.apply(lambda row: do.completeness(plot_species_list=row['sp_list'],
reference_species_list=eunis_type_composition_lists[row['EUNIScode']]['DgSp']), axis=1)
print("Done 03 at {0}".format(datetime.datetime.now().strftime("%Y%m%d_%H%M")))
# idem for CoSp
eva_head['cosp'] = eva_head.apply(lambda row: do.completeness(plot_species_list=row['sp_list'],
reference_species_list=eunis_type_composition_lists[row['EUNIScode']]['CoSp']), axis=1)
print("Done 04 at {0}".format(datetime.datetime.now().strftime("%Y%m%d_%H%M")))
# write to pickle file for safe keeping
pickle_name = "eva_emep_score" + datetime.datetime.now().strftime("%Y%m%d_%H%M") + '.pkl'
with open(os.path.join(r'd:\temppickle', pickle_name), 'wb') as handle:
pickle.dump(eva_head, handle, protocol=pickle.HIGHEST_PROTOCOL)
print("Done all at {0}".format(datetime.datetime.now().strftime("%Y%m%d_%H%M")))
|
[
"hans.roelofsen@wur.nl"
] |
hans.roelofsen@wur.nl
|
3ac5c2036716fd233c20c1b5d0ed1d8bf60ea49a
|
49ae5bd9089a2b096fabc970156803b21b1be9d7
|
/env/Lib/site-packages/django_extensions/management/commands/sync_s3.py
|
7efb71ae0fe0937e29582c1f031e37010ee7bd81
|
[] |
no_license
|
scortes1989/sfotipy
|
ea7cfd4abe52dfb9b5094397a9f7a80e6d78713d
|
d3ed677f8bee0452f1ac14dfc718ca5091cf95eb
|
refs/heads/master
| 2022-11-06T21:06:47.534330
| 2015-05-15T20:26:48
| 2015-05-15T20:26:48
| 33,416,364
| 0
| 1
| null | 2022-10-24T08:55:08
| 2015-04-04T18:55:33
|
Python
|
UTF-8
|
Python
| false
| false
| 15,704
|
py
|
"""
Sync Media to S3
================
Django command that scans all files in your settings.MEDIA_ROOT and
settings.STATIC_ROOT folders and uploads them to S3 with the same directory
structure.
This command can optionally do the following but it is off by default:
* gzip compress any CSS and Javascript files it finds and adds the appropriate
'Content-Encoding' header.
* set a far future 'Expires' header for optimal caching.
* upload only media or static files.
* use any other provider compatible with Amazon S3.
* set other than 'public-read' ACL.
Note: This script requires the Python boto library and valid Amazon Web
Services API keys.
Required settings.py variables:
AWS_ACCESS_KEY_ID = ''
AWS_SECRET_ACCESS_KEY = ''
AWS_BUCKET_NAME = ''
When you call this command with the `--renamegzip` param, it will add
the '.gz' extension to the file name. But Safari just doesn't recognize
'.gz' files and your site won't work on it! To fix this problem, you can
set any other extension (like .jgz) in the `SYNC_S3_RENAME_GZIP_EXT`
variable.
Command options are:
-p PREFIX, --prefix=PREFIX
The prefix to prepend to the path on S3.
--gzip Enables gzipping CSS and Javascript files.
--expires Enables setting a far future expires header.
--force Skip the file mtime check to force upload of all
files.
--filter-list Override default directory and file exclusion
filters. (enter as comma separated line)
--renamegzip Enables renaming of gzipped files by appending '.gz'.
to the original file name. This way your original
assets will not be replaced by the gzipped ones.
You can change the extension setting the
`SYNC_S3_RENAME_GZIP_EXT` var in your settings.py
file.
--invalidate Invalidates the objects in CloudFront after uploading
stuff to s3.
--media-only Only MEDIA_ROOT files will be uploaded to S3.
--static-only Only STATIC_ROOT files will be uploaded to S3.
--s3host Override default s3 host.
--acl Override default ACL settings ('public-read' if
settings.AWS_DEFAULT_ACL is not defined).
TODO:
* Use fnmatch (or regex) to allow more complex FILTER_LIST rules.
"""
import datetime
import email
import gzip
import mimetypes
import os
import time
from optparse import make_option
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django_extensions.compat import StringIO
from django_extensions.management.utils import signalcommand
# Make sure boto is available
try:
import boto
import boto.exception
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
class Command(BaseCommand):
# Extra variables to avoid passing these around
AWS_ACCESS_KEY_ID = ''
AWS_SECRET_ACCESS_KEY = ''
AWS_BUCKET_NAME = ''
AWS_CLOUDFRONT_DISTRIBUTION = ''
SYNC_S3_RENAME_GZIP_EXT = ''
DIRECTORIES = ''
FILTER_LIST = ['.DS_Store', '.svn', '.hg', '.git', 'Thumbs.db']
GZIP_CONTENT_TYPES = (
'text/css',
'application/javascript',
'application/x-javascript',
'text/javascript'
)
uploaded_files = []
upload_count = 0
skip_count = 0
option_list = BaseCommand.option_list + (
make_option('-p', '--prefix',
dest='prefix',
default=getattr(settings, 'SYNC_S3_PREFIX', ''),
help="The prefix to prepend to the path on S3."),
make_option('-d', '--dir',
dest='dir',
help="Custom static root directory to use"),
make_option('--s3host',
dest='s3host',
default=getattr(settings, 'AWS_S3_HOST', ''),
help="The s3 host (enables connecting to other providers/regions)"),
make_option('--acl',
dest='acl',
default=getattr(settings, 'AWS_DEFAULT_ACL', 'public-read'),
help="Enables to override default acl (public-read)."),
make_option('--gzip',
action='store_true', dest='gzip', default=False,
help="Enables gzipping CSS and Javascript files."),
make_option('--renamegzip',
action='store_true', dest='renamegzip', default=False,
help="Enables renaming of gzipped assets to have '.gz' appended to the filename."),
make_option('--expires',
action='store_true', dest='expires', default=False,
help="Enables setting a far future expires header."),
make_option('--force',
action='store_true', dest='force', default=False,
help="Skip the file mtime check to force upload of all files."),
make_option('--filter-list', dest='filter_list',
action='store', default='',
help="Override default directory and file exclusion filters. (enter as comma seperated line)"),
make_option('--invalidate', dest='invalidate', default=False,
action='store_true',
help='Invalidates the associated objects in CloudFront'),
make_option('--media-only', dest='media_only', default='',
action='store_true',
help="Only MEDIA_ROOT files will be uploaded to S3"),
make_option('--static-only', dest='static_only', default='',
action='store_true',
help="Only STATIC_ROOT files will be uploaded to S3"),
)
help = 'Syncs the complete MEDIA_ROOT structure and files to S3 into the given bucket name.'
args = 'bucket_name'
can_import_settings = True
@signalcommand
def handle(self, *args, **options):
if not HAS_BOTO:
raise ImportError("The boto Python library is not installed.")
# Check for AWS keys in settings
if not hasattr(settings, 'AWS_ACCESS_KEY_ID') or not hasattr(settings, 'AWS_SECRET_ACCESS_KEY'):
raise CommandError('Missing AWS keys from settings file. Please supply both AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY.')
else:
self.AWS_ACCESS_KEY_ID = settings.AWS_ACCESS_KEY_ID
self.AWS_SECRET_ACCESS_KEY = settings.AWS_SECRET_ACCESS_KEY
if not hasattr(settings, 'AWS_BUCKET_NAME'):
raise CommandError('Missing bucket name from settings file. Please add the AWS_BUCKET_NAME to your settings file.')
else:
if not settings.AWS_BUCKET_NAME:
raise CommandError('AWS_BUCKET_NAME cannot be empty.')
self.AWS_BUCKET_NAME = settings.AWS_BUCKET_NAME
if not hasattr(settings, 'MEDIA_ROOT'):
raise CommandError('MEDIA_ROOT must be set in your settings.')
else:
if not settings.MEDIA_ROOT:
raise CommandError('MEDIA_ROOT must be set in your settings.')
self.AWS_CLOUDFRONT_DISTRIBUTION = getattr(settings, 'AWS_CLOUDFRONT_DISTRIBUTION', '')
self.SYNC_S3_RENAME_GZIP_EXT = \
getattr(settings, 'SYNC_S3_RENAME_GZIP_EXT', '.gz')
self.verbosity = int(options.get('verbosity'))
self.prefix = options.get('prefix')
self.do_gzip = options.get('gzip')
self.rename_gzip = options.get('renamegzip')
self.do_expires = options.get('expires')
self.do_force = options.get('force')
self.invalidate = options.get('invalidate')
self.DIRECTORIES = options.get('dir')
self.s3host = options.get('s3host')
self.default_acl = options.get('acl')
self.FILTER_LIST = getattr(settings, 'FILTER_LIST', self.FILTER_LIST)
filter_list = options.get('filter_list')
if filter_list:
# command line option overrides default filter_list and
# settings.filter_list
self.FILTER_LIST = filter_list.split(',')
self.media_only = options.get('media_only')
self.static_only = options.get('static_only')
# Get directories
if self.media_only and self.static_only:
raise CommandError("Can't use --media-only and --static-only together. Better not use anything...")
elif self.media_only:
self.DIRECTORIES = [settings.MEDIA_ROOT]
elif self.static_only:
self.DIRECTORIES = [settings.STATIC_ROOT]
elif self.DIRECTORIES:
self.DIRECTORIES = [self.DIRECTORIES]
else:
self.DIRECTORIES = [settings.MEDIA_ROOT, settings.STATIC_ROOT]
# Now call the syncing method to walk the MEDIA_ROOT directory and
# upload all files found.
self.sync_s3()
# Sending the invalidation request to CloudFront if the user
# requested this action
if self.invalidate:
self.invalidate_objects_cf()
print("")
print("%d files uploaded." % self.upload_count)
print("%d files skipped." % self.skip_count)
def open_cf(self):
"""
Returns an open connection to CloudFront
"""
return boto.connect_cloudfront(
self.AWS_ACCESS_KEY_ID, self.AWS_SECRET_ACCESS_KEY)
def invalidate_objects_cf(self):
"""
Split the invalidation request in groups of 1000 objects
"""
if not self.AWS_CLOUDFRONT_DISTRIBUTION:
raise CommandError(
'An object invalidation was requested but the variable '
'AWS_CLOUDFRONT_DISTRIBUTION is not present in your settings.')
# We can't send more than 1000 objects in the same invalidation
# request.
chunk = 1000
# Connecting to CloudFront
conn = self.open_cf()
# Splitting the object list
objs = self.uploaded_files
chunks = [objs[i:i + chunk] for i in range(0, len(objs), chunk)]
# Invalidation requests
for paths in chunks:
conn.create_invalidation_request(
self.AWS_CLOUDFRONT_DISTRIBUTION, paths)
def sync_s3(self):
"""
Walks the media/static directories and syncs files to S3
"""
bucket, key = self.open_s3()
for directory in self.DIRECTORIES:
os.path.walk(directory, self.upload_s3, (bucket, key, self.AWS_BUCKET_NAME, directory))
def compress_string(self, s):
"""Gzip a given string."""
zbuf = StringIO()
zfile = gzip.GzipFile(mode='wb', compresslevel=6, fileobj=zbuf)
zfile.write(s)
zfile.close()
return zbuf.getvalue()
def get_s3connection_kwargs(self):
"""Returns connection kwargs as a dict"""
kwargs = {}
if self.s3host:
kwargs['host'] = self.s3host
return kwargs
def open_s3(self):
"""
Opens connection to S3 returning bucket and key
"""
conn = boto.connect_s3(
self.AWS_ACCESS_KEY_ID,
self.AWS_SECRET_ACCESS_KEY,
**self.get_s3connection_kwargs())
try:
bucket = conn.get_bucket(self.AWS_BUCKET_NAME)
except boto.exception.S3ResponseError:
bucket = conn.create_bucket(self.AWS_BUCKET_NAME)
return bucket, boto.s3.key.Key(bucket)
def upload_s3(self, arg, dirname, names):
"""
This is the callback to os.path.walk and where much of the work happens
"""
bucket, key, bucket_name, root_dir = arg
# Skip directories we don't want to sync
if os.path.basename(dirname) in self.FILTER_LIST:
# prevent walk from processing subfiles/subdirs below the ignored one
del names[:]
return
# Later we assume the MEDIA_ROOT ends with a trailing slash
if not root_dir.endswith(os.path.sep):
root_dir = root_dir + os.path.sep
for file in names:
headers = {}
if file in self.FILTER_LIST:
continue # Skip files we don't want to sync
filename = os.path.join(dirname, file)
if os.path.isdir(filename):
continue # Don't try to upload directories
file_key = filename[len(root_dir):]
if self.prefix:
file_key = '%s/%s' % (self.prefix, file_key)
# Check if file on S3 is older than local file, if so, upload
if not self.do_force:
s3_key = bucket.get_key(file_key)
if s3_key:
s3_datetime = datetime.datetime(*time.strptime(
s3_key.last_modified, '%a, %d %b %Y %H:%M:%S %Z')[0:6])
local_datetime = datetime.datetime.utcfromtimestamp(
os.stat(filename).st_mtime)
if local_datetime < s3_datetime:
self.skip_count += 1
if self.verbosity > 1:
print("File %s hasn't been modified since last being uploaded" % file_key)
continue
# File is newer, let's process and upload
if self.verbosity > 0:
print("Uploading %s..." % file_key)
content_type = mimetypes.guess_type(filename)[0]
if content_type:
headers['Content-Type'] = content_type
else:
headers['Content-Type'] = 'application/octet-stream'
file_obj = open(filename, 'rb')
file_size = os.fstat(file_obj.fileno()).st_size
filedata = file_obj.read()
if self.do_gzip:
# Gzipping only if file is large enough (>1K is recommended)
# and only if file is a common text type (not a binary file)
if file_size > 1024 and content_type in self.GZIP_CONTENT_TYPES:
filedata = self.compress_string(filedata)
if self.rename_gzip:
# If rename_gzip is True, then rename the file
# by appending an extension (like '.gz)' to
# original filename.
file_key = '%s.%s' % (
file_key, self.SYNC_S3_RENAME_GZIP_EXT)
headers['Content-Encoding'] = 'gzip'
if self.verbosity > 1:
print("\tgzipped: %dk to %dk" % (file_size / 1024, len(filedata) / 1024))
if self.do_expires:
# HTTP/1.0
headers['Expires'] = '%s GMT' % (email.Utils.formatdate(time.mktime((datetime.datetime.now() + datetime.timedelta(days=365 * 2)).timetuple())))
# HTTP/1.1
headers['Cache-Control'] = 'max-age %d' % (3600 * 24 * 365 * 2)
if self.verbosity > 1:
print("\texpires: %s" % headers['Expires'])
print("\tcache-control: %s" % headers['Cache-Control'])
try:
key.name = file_key
key.set_contents_from_string(filedata, headers, replace=True,
policy=self.default_acl)
except boto.exception.S3CreateError as e:
print("Failed: %s" % e)
except Exception as e:
print(e)
raise
else:
self.upload_count += 1
self.uploaded_files.append(file_key)
file_obj.close()
|
[
"SCD@SCD-PC.inpact.net"
] |
SCD@SCD-PC.inpact.net
|
d1ca5c3c0478ab8a9e58042e82b7a9186a494789
|
fd93fbb2162423b66636c8576548b4ad7f0564d3
|
/Data_Structures/SkipList.py
|
d067f998de54422ab1108059a642ad543da37125
|
[] |
no_license
|
AlpacaMax/Algorithm_Miscellaneous
|
a595436386eb68353bf98f4ced09bf2ba06874c5
|
e0a731ce0642cd602bcb8e8b7542d4b3806d9916
|
refs/heads/master
| 2020-11-27T21:26:38.454722
| 2020-10-22T19:20:58
| 2020-10-22T19:20:58
| 229,607,388
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,169
|
py
|
import random
class SkipList:
class Item:
def __init__(self, key, value=None):
self.key = key
self.value = value
def __repr__(self):
return "({}, {})".format(self.key, self.value)
class Node:
def __init__(self, item=None, prev=None, next=None, above=None, below=None):
self.item = item
self.next = next
self.prev = prev
self.above = above
self.below = below
def disconnect(self):
self.__init__(self)
def __init__(self):
self.header = SkipList.Node()
self.trailer = SkipList.Node()
self.header.next = self.trailer
self.trailer.prev = self.header
self.g_header = SkipList.Node()
self.g_trailer = SkipList.Node()
self.g_header.next = self.g_trailer
self.g_trailer.prev = self.g_header
self.header.below = self.g_header
self.g_header.above = self.header
self.trailer.below = self.g_trailer
self.g_trailer.above = self.trailer
self.size = 0
self.height = 1
def __len__(self):
return self.size
def is_empty(self):
return self.size == 0
def find(self, key):
cursor = self.header
while (cursor.below is not None):
cursor = cursor.below
while (cursor.next.item is not None and key >= cursor.next.item.key):
cursor = cursor.next
return cursor
def insert(self, key, value=None):
node = self.find(key)
if (node.item is not None and node.item.key == key):
node.item.value = value
else:
cursor = self.add_after(node, key, value)
self.size += 1
index = 1
while (self.flip()):
index += 1
if (index > self.height):
self.add_level_below(self.header, self.trailer)
cursor = self.add_above(cursor, key)
def add_level_below(self, header, trailer):
above_header = header
above_trailer = trailer
below_header = header.below
below_trailer = trailer.below
new_header = SkipList.Node(above=above_header, below=below_header)
new_trailer = SkipList.Node(above=above_trailer, below=below_trailer)
new_header.next = new_trailer
new_trailer.prev = new_header
above_header.below = new_header
below_header.above = new_header
above_trailer.below = new_trailer
below_trailer.above = new_trailer
self.height += 1
def __getitem__(self, key):
node = self.find(key)
if (node.item is None or node.item.key != key):
raise KeyError(str(key) + " does not exist!")
return node.item.value
def __setitem__(self, key, value):
node = self.find(key)
if (node.item is not None and node.item.key == key):
node.item.value = value
else:
self.insert(key, value)
def __delitem__(self, key):
node = self.find(key)
if (node.item is None or node.item.key != key):
raise KeyError(str(key) + " does not exist!")
cursor = node
while (cursor is not None):
node_to_delete = cursor
cursor = cursor.above
self.delete_node(node_to_delete)
self.size -= 1
def __iter__(self):
cursor = self.g_header.next
while (cursor is not self.g_trailer):
yield cursor.item.key
cursor = cursor.next
def add_after(self, node, key, value=None):
prev_node = node
next_node = node.next
new_item = SkipList.Item(key, value)
new_node = SkipList.Node(item=new_item, next=next_node, prev=prev_node)
prev_node.next = new_node
next_node.prev = new_node
return new_node
def add_above(self, node, key, value=None):
cursor = node.prev
while (cursor.above is None):
cursor = cursor.prev
cursor = cursor.above
below_node = node
above_node = self.add_after(cursor, key, value)
below_node.above = above_node
above_node.below = below_node
return above_node
def delete_node(self, node):
prev_node = node.prev
next_node = node.next
prev_node.next = next_node
next_node.prev = prev_node
item = node.item
node.disconnect()
return item
def flip(self):
return random.random() > 0.5
def display(self):
header = self.header
while (header.below is not None):
header = header.below
cursor = header
while (header is not None):
while (cursor is not None):
print(cursor.item, end='-')
cursor = cursor.above
print()
header = header.next
cursor = header
if __name__ == "__main__":
sl = SkipList()
for i in range(10):
sl[i] = i
for i in sl:
print(i)
|
[
"gabrielyang233@outlook.com"
] |
gabrielyang233@outlook.com
|
c8a495c25d68757e7f04885e00d173531073ac78
|
efde64a427ec0e7a03c6227ea36e63c386924545
|
/test.py
|
295e11d4a904dc38869e71a681bae9ef5e109100
|
[] |
no_license
|
ZhihaoDU/du2020dan
|
5bfcc8ead7c9ac1f1e45e3cfb68f45c253e81403
|
4ec8d37a46c6d40e6d2f07ec1d299ac2e802ed69
|
refs/heads/master
| 2021-01-15T07:04:33.043159
| 2020-02-25T04:43:55
| 2020-02-25T04:43:55
| 242,909,472
| 7
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,432
|
py
|
import os
# os.environ['CUDA_VISIBLE_DEVICES'] = '2'
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from kaldi_helper import KaldiFeatHolder
from speech_utils import read_path_list, print_with_time, calc_rescale_c
from scipy.io import wavfile
from kaldi_fbank_extractor import log_fbank, get_fft_mel_mat
import scipy.io as sio
from multiprocessing import Pool
import librosa
from models import get_model
from enhancement_targets import get_target
import json
import argparse
def post_process(src, target):
print_with_time("Doing post process...")
os.system("cp %s/spk2utt %s/" % (src, target))
os.system("cp %s/text %s/" % (src, target))
os.system("cp %s/utt2spk %s/" % (src, target))
os.system("cp %s/wav.scp %s/" % (src, target))
os.system("cp %s/spk2gender %s/" % (src, target))
def calc_func(noisy_dir_path):
with torch.no_grad():
debug_model = args.debug_model
_method = method
model_opts = json.load(open(os.path.join("configs/%s.json" % args.model_config), 'r'))
gen_model = model_opts['gen_model_name']
calc_target = get_target(args.target_type)
device = torch.device("cuda")
print_with_time("Loading model...")
Generator, _ = get_model(gen_model, None)
model = Generator(model_opts['gen_model_opts']).to(device)
checkpoint = torch.load("Checkpoints/%s/checkpoint_%09d.pth" % (_method, args.global_step))
model.load_state_dict(checkpoint["generator"])
# model.load_state_dict(checkpoint["enhancer"])
model.eval()
melbank = get_fft_mel_mat(512, 16000, 40)
_method = "_".join([_method, str(args.global_step)])
if debug_model:
os.system('mkdir -p debug/%s' % _method)
print_with_time("Start to enhance wav file in %s with method %s\n" % (noisy_dir_path, _method))
udir_path = "%s_%s" % (noisy_dir_path, _method)
if not os.path.exists(udir_path):
os.mkdir(udir_path)
wav_scp = read_path_list(os.path.join(noisy_dir_path, "wav.scp"))
if not debug_model:
ark_file = open(os.path.join(udir_path, "feats.ark"), 'wb')
scp_file = open(os.path.join(udir_path, "feats.scp"), 'w')
key_len = wav_scp[0].find(' ')
kaldi_holder = KaldiFeatHolder(key_len, 3000, 40)
offset = key_len + 1
enhanced_number = 0
for it, (one_wav) in enumerate(wav_scp):
wav_id, wav_path = one_wav.split(' ')
sr, noisy_speech = wavfile.read(wav_path)
if len(noisy_speech.shape) > 1:
noisy_speech = np.mean(noisy_speech, 1)
early50_path = wav_path.replace('.wav', '_early50.wav')
sr, early50 = wavfile.read(early50_path)
if len(early50.shape) > 1:
early50 = np.mean(early50, 1)
# as the training dataset, use "power_norm" to normalize the waveform to match the input of model.
# c = np.sqrt(np.mean(np.square(noisy_speech)))
c = calc_rescale_c(noisy_speech, args.rescale_method)
noisy_speech = noisy_speech / c
early50 = early50 / c
noisy_fbank, noisy_mag = log_fbank(noisy_speech, False, True, True, None)
early50_fbank, early50_mag = log_fbank(early50, False, True, True, None)
noise_fbank, noise_mag = log_fbank(noisy_speech - early50, False, True, True, None)
if args.feature_domain == "mel":
feat = torch.Tensor(noisy_fbank.T).unsqueeze(0).to(device)
label = torch.Tensor(early50_fbank.T).unsqueeze(0).to(device)
noise = torch.Tensor(noise_fbank.T).unsqueeze(0).to(device)
else:
feat = torch.Tensor(np.square(noisy_mag).T).unsqueeze(0).to(device)
label = torch.Tensor(np.square(early50_mag).T).unsqueeze(0).to(device)
noise = torch.Tensor(np.square(noise_mag).T).unsqueeze(0).to(device)
if args.target_type.lower() == "mapping_mag":
predict = model.forward(feat.sqrt())
else:
predict = model.forward(torch.log(feat + opts['eps']))
results = calc_target(feat, label, noise, predict, opts)
enhanced = results["enhanced"]
predict = results["predict"]
target = results["target"]
if args.feature_domain == "mel":
enhanced_pow = 0
enhanced_fbank = enhanced[0, :, :].cpu().numpy()
else:
enhanced_pow = enhanced[0, :, :].cpu().numpy()
enhanced_fbank = np.matmul(enhanced_pow, melbank.T)
log_enhanced_fbank = np.log(enhanced_fbank * (c ** 2.) + opts['eps'])
if debug_model:
sio.savemat("debug/%s/%s_%s" % (_method, wav_id, wav_path.split('/')[-5]),
{'noisy_mag': noisy_mag, 'noisy_fbank': noisy_fbank,
'enhanced_mag': np.sqrt(enhanced_pow).T, 'enhanced_fbank': enhanced_fbank.T,
'early50_mag': early50_mag, 'early50_fbank': early50_fbank,
'predict': predict[0, :, :].cpu().numpy().T,
'target': target[0, :, :].cpu().numpy().T,
'log_enhanced_fbank': log_enhanced_fbank.T,
'log_early50_fbank': np.log(early50_fbank * (c ** 2.) + opts['eps']),
'c': c
})
if it >= 0:
return
else:
kaldi_holder.set_key(wav_id)
kaldi_holder.set_value(log_enhanced_fbank)
kaldi_holder.write_to(ark_file)
scp_file.write("%s %s/feats.ark:%d\n" % (wav_id, udir_path, offset))
offset += kaldi_holder.get_real_len()
enhanced_number += 1
if enhanced_number % 40 == 0:
print_with_time(
"Enhanced %5d(%6.2f%%) utterance" % (enhanced_number, 100. * enhanced_number / len(wav_scp)))
print_with_time("Enhanced %d utterance" % enhanced_number)
ark_file.close()
scp_file.close()
post_process(noisy_dir_path, udir_path)
print_with_time("Done %s." % _method)
if __name__ == '__main__':
opts = {}
opts['win_len'] = 400
opts['sr'] = 16000
opts['device'] = torch.device('cuda:0')
opts['mel_channels'] = 40
opts['win_type'] = 'hamming'
opts['eps'] = 1e-12
opts['clip_low'] = 0.
opts['clip_high'] = 1.
opts['log_power_offset'] = 10.
opts['compress_label'] = False
opts['log_label_min'] = -27.63
opts['log_label_max'] = 14.41
parser = argparse.ArgumentParser()
parser.add_argument('--script_note', type=str, default=None)
parser.add_argument('--feature_domain', type=str, default="mel")
parser.add_argument('--adversarial_loss', type=str, default=None)
parser.add_argument('--model_config', type=str, default='BiFreqMelCRN_DCGAN')
parser.add_argument('--target_type', type=str, default="mapping_log_pow")
parser.add_argument('--clean_type', type=str, default="early50")
parser.add_argument('--name_note', type=str, default=None)
parser.add_argument('--d_iter', type=int, default=0)
parser.add_argument('--rescale_method', type=str, default="power_norm", choices=["None", "value_norm", "power_norm",
"st_power_norm", "max_norm"])
parser.add_argument('--dist_alpha', type=float, default=0)
parser.add_argument('--data_augment', type=str, default="naive", choices=["None", "naive"])
parser.add_argument('--global_step', type=int, default=0)
parser.add_argument('--debug_model', type=bool, default=False)
parser.add_argument('--l1_alpha', type=float, default=0.)
parser.add_argument('--l2_alpha', type=float, default=0.)
parser.add_argument('--glc_alpha', type=float, default=0., help="Lipschitz continuous penalty for generator")
parser.add_argument('--feat_alpha', type=float, default=0.)
args = parser.parse_args()
if args.script_note is not None:
model_name_list = [args.script_note, args.feature_domain]
else:
model_name_list = [args.feature_domain]
# model_name_list.append("mse")
if args.adversarial_loss is not None:
model_name_list.append(args.adversarial_loss)
model_name_list.extend([args.model_config, args.target_type, args.clean_type])
if args.d_iter > 0:
model_name_list.append("D%d" % args.d_iter)
if args.name_note is not None:
model_name_list.append(args.name_note)
if args.rescale_method != "None":
model_name_list.append(args.rescale_method)
if args.l1_alpha > 0:
model_name_list.append("L1_%.6f" % args.l1_alpha)
if args.l2_alpha > 0:
model_name_list.append("L2_%.6f" % args.l2_alpha)
if args.glc_alpha > 0:
model_name_list.append("GLC_%.6f" % args.glc_alpha)
if args.dist_alpha > 0:
model_name_list.append("DIST_%.6f" % args.dist_alpha)
if args.feat_alpha > 0:
model_name_list.append("FEAT_%.6f" % args.feat_alpha)
if args.data_augment != "None":
model_name_list.append(args.data_augment)
method = "_".join(model_name_list)
print("|----------------------------------------------------------------------------|")
print("|", method.center(74), "|")
print("|----------------------------------------------------------------------------|")
print(args)
print(opts)
input("Press any key to continue.")
noisy_dir_list = [
"/data/duzhihao/kaldi/egs/chime2/s5/data-fbank/train_si84_noisy",
"/data/duzhihao/kaldi/egs/chime2/s5/data-fbank/dev_dt_05_noisy",
"/data/duzhihao/kaldi/egs/chime2/s5/data-fbank/test_eval92_5k_noisy",
]
if args.debug_model:
noisy_dir_list = [
"/data/duzhihao/kaldi/egs/chime2/s5/data-fbank/train_si84_noisy"
]
pool = Pool(len(noisy_dir_list))
pool.map(calc_func, noisy_dir_list)
pool.close()
|
[
"duzhihao.china@gmail.com"
] |
duzhihao.china@gmail.com
|
977efed259353b51d96b6ea3d218a036d809fef3
|
d456bae1077867108bc7cc3fcc34f18b0ef75a30
|
/linkedin/linkedin_top25byname_parser.py
|
b516009ad5cfacb9570a25261c707f0dd13eb0e8
|
[] |
no_license
|
vlivashkin/parsers
|
efb1cfc3f3bd909b2a8ffc92afbbb1c1154c9279
|
a0334df863b4cf94cb567f3b5bbd00aab07f4444
|
refs/heads/master
| 2021-05-30T18:09:50.482475
| 2016-03-11T19:17:04
| 2016-03-11T19:17:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,975
|
py
|
import requests
from bs4 import BeautifulSoup
from requests.exceptions import ChunkedEncodingError
class LinkedinNoAuthTop25ByNameParser:
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.1'}
top25_url = 'https://www.linkedin.com/pub/dir/{}/{}/'
def get_people_by_name(self, first_name, last_name):
url = self.top25_url.format(first_name, last_name)
try:
response = requests.get(url, headers=self.headers)
except ChunkedEncodingError:
print("Incomplete read(")
return []
if response.history: # if there are only one person with this name linkedin redirects you to his page
person = {
'name': first_name + " " + last_name,
'url': response.url,
}
return [person]
else:
page = response.text
soup = BeautifulSoup(page, "html.parser")
people = []
for profile in soup.find_all('div', 'profile-card'):
content = profile.find("div", "content")
img_url = profile.find("a", "profile-img").img['src']
person = {
'name': content.h3.a.text,
'url': content.h3.a['href'],
# 'headline': content.find('p', "headline").text,
# "location": content.find("dl", "basic").findAll("dd")[0].text,
# 'industry': content.find("dl", "basic").findAll("dd")[1].text,
'img_url': img_url if "ghost" not in img_url else "ghost"
}
people.append(person)
return people
def main():
parser = LinkedinNoAuthTop25ByNameParser()
people = parser.get_people_by_name("Vladimir", "Ivashkin")
# people = parser.get_people_by_name("Taras", "Pustovoy")
for person in people:
print(person)
if __name__ == "__main__":
main()
|
[
"illusionww@gmail.com"
] |
illusionww@gmail.com
|
1b32af6d4a9df22d779af09836fddb308d08536b
|
a331345b1269d863107ebaf525fb9e06443722c6
|
/drf-intro/simple_crud/measurements/admin.py
|
987929793feb8430a7621da7c283948f38f97a89
|
[] |
no_license
|
Pfliger/dj-homeworks
|
19de12f8b2146751b24c89e59bdd307c571ff71d
|
c9f5d5070a467d4f7b35d416b8f91ad290008ab6
|
refs/heads/master
| 2023-03-09T01:09:49.251297
| 2021-03-05T16:24:22
| 2021-03-05T16:24:22
| 335,931,936
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 241
|
py
|
from django.contrib import admin
from measurements.models import Project, Measurement
@admin.register(Project)
class ProjectAdmin(admin.ModelAdmin):
pass
@admin.register(Measurement)
class MeasurementAdmin(admin.ModelAdmin):
pass
|
[
"pfliger@list.ru"
] |
pfliger@list.ru
|
d5672859a1c11baa0302a06e15050c61a8db266f
|
70ac291bcf11d8452c6b1ade5fbadd0003d9e613
|
/machine_learning_机器学习/准确率(Accuracy)、精确率(Precision)、召回率(Recall)、F值(F-Measure)等评估指标的计算.py
|
8c51fb9bad3621e3c8e70198ca29c52253849f25
|
[] |
no_license
|
SnowWhiteZ/hello-world
|
10cc1faf508340f835fffbf3c587101e3e0e78a5
|
e43793b413016eb2f52b40990a8f1b493d29c983
|
refs/heads/master
| 2022-03-12T22:10:42.163091
| 2019-12-02T09:58:15
| 2019-12-02T09:58:15
| 225,376,506
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,456
|
py
|
#!/usr/bin/python3
# coding: utf-8
import numpy as np
from sklearn.metrics import f1_score, accuracy_score, fbeta_score, precision_score, recall_score
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score, f1_score, roc_auc_score
# 真实标签
y_true = [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]
# 模型预测结果
y_test = [0.8453712241207609, 0.8365137845084419, 0.8396024690959464, 0.8690716625950063, 0.801398983655787, 0.8353417405844167, 0.8887589815396711, 0.8274617726584338, 0.8901324702288052, 0.8515827665762914, 0.8008748432690203, 0.9129143613344268, 0.8213637332093631, 0.7926672650384551, 0.8715962551942291, 0.865989576549353, 0.8487118383625984, 0.893722366823937, 0.8683798090835637, 0.8258107838161615, 0.9067962552630583, 0.8896577622207299, 0.8287242449131549, 0.862162050742874, 0.9145984088092137, 0.8195240228832353, 0.8627208683955114, 0.8667420865435141, 0.833175478131922, 0.8338735760735464, 0.8609573544733866, 0.8270040835455006, 0.8438342928159803, 0.9162216060491829, 0.8681943043237748, 0.825237777063406, 0.9309199493779501, 0.847918698600505, 0.885842165942269, 0.845606331185933, 0.8867428557974891, 0.8569372316111383, 0.8374900840504085, 0.8495098728280119, 0.8475137546498668, 0.8509974354378016, 0.8545542968912262, 0.8369359268265817, 0.8881628216627452, 0.8553054247582024, 0.8715475068300871, 0.8608489638331329, 0.7871896522021451, 0.7986180814516614, 0.8679817198115483, 0.8555312604259576, 0.8737131993516944, 0.8570307159808236, 0.86943760267903, 0.8155454038368009, 0.8284627670247386, 0.7440460226630737, 0.8383901711678877, 0.9176876584197461, 0.8867356968591616, 0.8800298236584221, 0.8534696245512979, 0.9166524864925935, 0.8205450625187547, 0.8235830983361883, 0.8610359125511253, 0.8534495672661243, 0.8343550724006359, 0.826657313239454, 0.8327557274202153, 0.8263809690050867, 0.8449533999089178, 0.7403854533869694, 0.8862881836134406, 0.80930312554624, 0.8390349727384677, 0.7812820207595776, 0.8405256568966404, 0.7208619973606759, 0.8237972236612818, 0.8652031422452744, 0.7788070757633151, 0.8795942431527423, 0.8603826742129177, 0.83330392945359, 0.8487413534443429, 0.8085704307615089, 0.8862416492592033, 0.8154708608934949, 0.8949611666064037, 0.8189329260750865, 0.8328395987596068, 0.9158502403398057, 0.8066900361300818, 0.9277331317048729]
thre = 0.874 # 随机定义一个阈值
tp = 0 # 正真
tn = 0 # 真负
fp = 0 # 假正
fn = 0 # 假负
for t4, t5 in zip(y_true, y_test):
if t4 == 1 and t5 >= thre:
tp += 1
elif t4 == 1:
fn += 1
elif t4 == 0 and t5 < thre:
tn += 1
else:
fp += 1
data = {
"真正": tp,
"真负": tn,
"假正": fp,
"假负": fn
}
print("混淆矩阵数据:", data)
p = tp / (tp + fp ) # 精确率,预测为正的样本中有多少是真正的正样本
r = tp / (tp + fn ) # 召回率,样本中的正例有多少被预测正确了
acc = (tp + tn) / (tp + tn + fp + fn) # 准确率,被分对的样本数除以所有的样本数
f1 = 2 * p * r / (p + r )
beta = 2
# (1 + β × β) × P × R
# Fβ = ──────────────────────
# (β × β) × P + R
f2 = (1+beta*beta) * p * r / (beta*beta*p+r)
data2 = {
"准确率": acc,
"精确率": p,
"召回率": r,
"f1值": f1,
"f2值": f2,
}
print('通过精确率,召回率计算的结果:', data2)
# auc
auc = roc_auc_score(y_true, y_test)
# 精确率
p = precision_score(y_true, np.array(y_test)>thre)
# 召回率
r = recall_score(y_true, np.array(y_test) > thre)
# acc
acc = accuracy_score(y_true, np.array(y_test) > thre)
f1 = f1_score(y_true, np.array(y_test) > thre)
f2 = fbeta_score(y_true, np.array(y_test) > thre, beta=2)
data3 = {
"准确率": acc,
"ROC曲线下面积": auc,
"f1值": f1,
"f2值": f2,
"精确率": p,
"召回率": r,
}
print('通过sklearn计算的结果:', data3)
y_true = [0, 1, 2, 2, 2]
y_test = [0, 0, 2, 2, 1]
target_names = ['class 0', 'class 1', 'class 2']
print(classification_report(y_true, y_test, target_names=target_names))
def main():
pass
if __name__ == '__main__':
main()
|
[
"gswyhq@126.com"
] |
gswyhq@126.com
|
4c1785f655e01342cbdda1667b1a388889254f6b
|
2daa3894e6d6929fd04145100d8a3be5eedbe21c
|
/tests/artificial/transf_pow3/trend_poly/cycle_7/ar_12/test_artificial_32_pow3_poly_7_12_100.py
|
7f5a2931b6b59c48d8a1216fadd94ec7826eabbc
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Henri-Lo/pyaf
|
a1f73a0cc807873bd7b79648fe51de9cfd6c126a
|
08c968425d85dcace974d90db7f07c845a0fe914
|
refs/heads/master
| 2021-07-01T12:27:31.600232
| 2017-09-21T11:19:04
| 2017-09-21T11:19:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 305
|
py
|
import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
dataset = tsds.generate_random_TS(N = 32 , FREQ = 'D', seed = 0, trendtype = "poly", cycle_length = 7, transform = "pow3", sigma = 0.0, exog_count = 100, ar_order = 12);
art.process_dataset(dataset);
|
[
"antoine.carme@laposte.net"
] |
antoine.carme@laposte.net
|
942f5be0fdd8ad8c418f4b959e1310af74cb20ff
|
3c76dd3d7eda65236ff47da97c04d7b1b6eb7c15
|
/recursion/questions/pow.py
|
fa7c82cd42821e8d05266f7451e8c4c63bed16e0
|
[
"MIT"
] |
permissive
|
devclassio/200-interview-algorithm-questions
|
b672629c93ca99fcf626cb34254f0ef1b5e2731d
|
ab6a41f3399d8ae58acf0aebb285ca6de744433c
|
refs/heads/main
| 2023-02-21T01:39:03.060131
| 2021-01-25T16:11:24
| 2021-01-25T16:11:24
| 330,233,973
| 9
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,191
|
py
|
'''
Can't cache here! arr is too big! need to use this math trick :)
'''
class OmerSolution:
'''
Great example! to make this work with memoize i think arr must be of len n/2
'''
def myPow(self, x, n):
self.cache = [None] * (abs(n) + 1)
self.cache[0] = 1
def helper(x, n):
if self.cache[n]:
return self.cache[n]
if self.cache[n-1]:
return x * self.cache[n-1]
self.cache[n-1] = helper(x, n - 1)
return x * self.cache[n - 1]
return helper(x, n) if n > 0 else 1 / helper(x, -n)
class Solution:
def myPow(self, x, n):
def helper(x, n):
if n == 0:
return 1
# compute
if (n - 1) % 2 == 0:
oneBefore = helper(x * x, int(n / 2))
else:
oneBefore = x * helper(x * x, int((n - 1) / 2))
# solution
return x * oneBefore
if n < 0:
x = 1 / x
n = -n
squared = x * x
exp = n / 2 if n % 2 == 0 else (n - 1) / 2
return helper(squared, exp) if n % 2 == 0 else x * helper(squared, exp)
|
[
"arieg419@gmail.com"
] |
arieg419@gmail.com
|
cb03a855464cc9c2f80b4f406ed8aaac4d1c0f3f
|
e5acfe14534911fb42ab7715331abda164a3a93b
|
/devel/lib/python2.7/dist-packages/adhoc_communication/msg/_ExpCluster.py
|
a1eb356df41d275f3ee571397b570d141b72bb3b
|
[] |
no_license
|
mgr4dv/surf_inspec
|
964095590c58967d6183ac16e755192922bf8af4
|
ada5332edaebe622fa403dd8f5233b01b8b16559
|
refs/heads/master
| 2021-07-04T17:29:06.141297
| 2017-09-27T23:40:28
| 2017-09-27T23:40:28
| 103,476,523
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,101
|
py
|
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from adhoc_communication/ExpCluster.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import adhoc_communication.msg
class ExpCluster(genpy.Message):
_md5sum = "378b1f01ebed06706a22e7cc27608df5"
_type = "adhoc_communication/ExpCluster"
_has_header = False #flag to mark the presence of a Header object
_full_text = """ExpClusterElement[] ids_contained
float64 bid
================================================================================
MSG: adhoc_communication/ExpClusterElement
int64 id
string detected_by_robot_str
"""
__slots__ = ['ids_contained','bid']
_slot_types = ['adhoc_communication/ExpClusterElement[]','float64']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
ids_contained,bid
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(ExpCluster, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.ids_contained is None:
self.ids_contained = []
if self.bid is None:
self.bid = 0.
else:
self.ids_contained = []
self.bid = 0.
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
length = len(self.ids_contained)
buff.write(_struct_I.pack(length))
for val1 in self.ids_contained:
buff.write(_struct_q.pack(val1.id))
_x = val1.detected_by_robot_str
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
buff.write(_struct_d.pack(self.bid))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.ids_contained is None:
self.ids_contained = None
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.ids_contained = []
for i in range(0, length):
val1 = adhoc_communication.msg.ExpClusterElement()
start = end
end += 8
(val1.id,) = _struct_q.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.detected_by_robot_str = str[start:end].decode('utf-8')
else:
val1.detected_by_robot_str = str[start:end]
self.ids_contained.append(val1)
start = end
end += 8
(self.bid,) = _struct_d.unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
length = len(self.ids_contained)
buff.write(_struct_I.pack(length))
for val1 in self.ids_contained:
buff.write(_struct_q.pack(val1.id))
_x = val1.detected_by_robot_str
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
buff.write(_struct_d.pack(self.bid))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.ids_contained is None:
self.ids_contained = None
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.ids_contained = []
for i in range(0, length):
val1 = adhoc_communication.msg.ExpClusterElement()
start = end
end += 8
(val1.id,) = _struct_q.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.detected_by_robot_str = str[start:end].decode('utf-8')
else:
val1.detected_by_robot_str = str[start:end]
self.ids_contained.append(val1)
start = end
end += 8
(self.bid,) = _struct_d.unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
_struct_q = struct.Struct("<q")
_struct_d = struct.Struct("<d")
|
[
"builder@kududyn.com"
] |
builder@kududyn.com
|
9eab7ac63befed2d25ff7d06879122dcee603533
|
bf1711af678a07b2030166d98b77f1320f16b940
|
/scripts/diagnostics/gradlogpconvadv.py
|
2987d2f89a2c7b9e2b7c7ba81ac2e6c924ada93e
|
[
"MIT"
] |
permissive
|
SFPD/rlreloaded
|
81e2ee489389145092cd425f305f9f50a7fd1ec9
|
650c64ec22ad45996c8c577d85b1a4f20aa1c692
|
refs/heads/master
| 2021-01-01T17:47:55.592481
| 2015-06-16T16:33:06
| 2015-06-16T16:37:19
| 37,540,592
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,398
|
py
|
#!/usr/bin/env python
import matplotlib.pyplot as plt
import theano #pylint: disable=F0401
import copy
import numpy as np
from control4.misc.console_utils import Message
from control4.algs.save_load_utils import load_agent_and_mdp,get_mdp,construct_agent
from control4.algs.advantage_est import demean_timeserieses
from control4.misc.randomness import random_indices
from control4.config import floatX #pylint: disable=W0611
from control4.core.rollout import rollout
from control4.maths import symbolic
import theano.tensor as TT
# ipython --pylab -i gradlogpconvadv.py -- --num_trajs=1000 --max_steps=200 --horizon=50 --agent_module=control4.agents.nn_reactive_agent --mdp_name=mjc:3swimmer
def pairwise_correlate(x_sm, y_tn,mode='valid'):
"""
Use FFT to compute correlation between pairs of channels of x and y
"""
S,M = x_sm.shape
T,N = y_tn.shape
U = S+T-1 # if mode==valid we can use less padding
px_um = np.concatenate([x_sm,np.zeros((U-S,M))])
py_un = np.concatenate([y_tn,np.zeros((U-T,N))])
qpx_um = np.fft.fft(px_um,axis=0) #pylint: disable=E1103,E1101
qpy_un = np.fft.fft(py_un,axis=0) #pylint: disable=E1103,E1101
qconv_umn = qpx_um[:,:,None] * np.conj(qpy_un[:,None,:])
conv_umn = np.fft.ifft(qconv_umn,axis=0).real #pylint: disable=E1103,E1101
if mode == "valid":
assert T<S
return conv_umn[:S-T+1]
else:
raise NotImplementedError
def test_pairwise_correlate():
x = np.random.randn(10,3)
y = np.random.randn(8,2)
corr0 = pairwise_correlate(x,y,'valid')
corr1 = np.empty((x.shape[0] - y.shape[0] + 1, x.shape[1],y.shape[1]))
for (i,xcol) in enumerate(x.T):
for (j,ycol) in enumerate(y.T):
corr1[:,i,j] = np.correlate(xcol,ycol,mode='valid')
assert np.allclose(corr0,corr1,atol=1e-7)
def make_gradlogps(mdp,agent):
o = TT.matrix("o",mdp.output_dtype("o"))
b = TT.matrix("b",agent.output_dtype("b"))
newa = agent.ponder({"o":o})["a"]
logp_n = agent.cpd().logliks(newa, b)
def onegrad(i):
logp1 = theano.clone(logp_n, replace = {b:b[i:i+1],o:o[i:i+1]})[0]
return symbolic.flatten(TT.grad(logp1, agent.policy_vars()))
gradlogps,_ = theano.map(onegrad, TT.arange(logp_n.shape[0]))
f = theano.function([o,b],gradlogps)
return f
def main():
# test_pairwise_correlate()
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--hdf")
parser.add_argument("--agent_module")
parser.add_argument("--mdp_name")
parser.add_argument("--horizon",type=int,default=20)
parser.add_argument("--num_trajs",type=int,default=100)
parser.add_argument("--max_steps",type=int,default=500)
parser.add_argument("--load_idx",type=int,default=-1)
parser.add_argument("--plot_mode",choices=["off","save","interactive"],default="off")
parser.add_argument("--plot_save_prefix")
parser.add_argument("--gamma",type=float,default=0.99)
parser.add_argument("--outfile")
# ValueParams.add_to_parser(parser)
np.random.seed(0)
args = parser.parse_args()
assert bool(args.hdf) != (bool(args.agent_module) and bool(args.mdp_name))
if args.hdf:
agent, mdp, _hdf = load_agent_and_mdp(args.hdf,args.load_idx)
elif args.agent_module:
mdp = get_mdp(args.mdp_name)
agent = construct_agent({"agent_module":args.agent_module},mdp)
fs_gradlogps = make_gradlogps(mdp,agent)
n_params = agent.policy.size()
horizon=args.horizon
from collections import namedtuple
Path = namedtuple("Path",['o','c','b'])
save_arrs = ["o","b","c"]
with Message("Doing rollouts"):
paths = []
for i_path in xrange(args.num_trajs):
if i_path % 20 == 0:
print "%i/%i done"%(i_path,args.num_trajs)
init,traj = rollout(mdp, agent, args.max_steps,save_arrs=save_arrs)
o = np.concatenate([init["o"]]+traj["o"][:-1])
c = np.concatenate(traj["c"])
b = np.concatenate(traj["b"])
paths.append(Path(o,c,b))
# vf = LinearVF(use_m=False, use_o=True, legendre_degree = 2, use_product_features=False)
# vf.make_funcs()
# with Message("Fitting value function"):
# fit_linear_vf_single_path(vf, paths, args)
li_c_t = [path.c.sum(axis=1) for path in paths]
li_dmc_t = copy.deepcopy(li_c_t)
demean_timeserieses(li_dmc_t)
# li_delta_t = []
# for (path,c_t) in zip(paths,li_c_t):
# v_t = vf.fs_vfunc(path.prevm_tg,path.o_tf)
# li_delta_t.append( c_t + args.gamma*v_t[1:] - v_t[:-1] )
li_serieses = zip(li_dmc_t)
series_names=["demeaned costs"]
n_series = len(series_names)
li_corr = [np.zeros((horizon,n_params)) for _ in xrange(n_series)]
corr_tkz = np.zeros((horizon,n_series,n_params))
sqcorr_tkz = np.zeros((horizon,n_series,n_params))
count = 0
for (i_path,path,serieses) in zip(xrange(len(paths)),paths,li_serieses):
if i_path % 20 == 0:
print "%i/%i done"%(i_path,len(paths))
sig_tk = np.array(serieses).T
grad_tz = fs_gradlogps(path.o,path.b)
newcorr_tzk = pairwise_correlate( sig_tk, grad_tz[:-horizon+1], mode='valid')
corr_tkz += newcorr_tzk
sqcorr_tkz += newcorr_tzk**2
# for (li_series_t,corr_tz) in zip(li_li_series,li_corr):
# for z in xrange(n_params):
# corr_tkz[:,z] += scipy.signal.correlate(li_series_t[i_path], grad_tz[:-horizon+1,z],mode='valid')
# count += (grad_tz.shape[0]-horizon)
count += 1
corr_tkz /= count
sqcorr_tkz /= count
stderr_tkz = np.sqrt( (sqcorr_tkz - corr_tkz**2)/len(paths) )
# NOTE stderr is not totally legit
plot_stderr = True
zs = random_indices(n_params,30)
# plot_stderr = False
# zs = np.arange(n_params)
for (i_series,_corr_tz) in enumerate(li_corr):
plt.figure(i_series+1)
plt.clf()
plt.title(series_names[i_series])
for z in zs:
line, = plt.plot(corr_tkz[:,i_series,z])
if plot_stderr: plt.fill_between(np.arange(horizon), corr_tkz[:,i_series,z] - stderr_tkz[:,i_series,z],corr_tkz[:,i_series,z] + stderr_tkz[:,i_series,z],alpha=.1,color=line.get_color())
if args.outfile: plt.savefig(args.outfile)
if __name__ == "__main__":
main()
|
[
""
] | |
90dac013e7b8d3564d99078b2df9f789bb89833d
|
d6c86a4302debcf730516ac5bba8ad32d44faf82
|
/Final Project/sportclubs/teammanager/urls.py
|
5dbdda6101ef6e59f1e409323ee318db97b25b11
|
[] |
no_license
|
patrickmcgreevy/SportClubManager
|
7cd51c1af20d6092a210640d038b3d8075962166
|
8f067aef7b2319c329bbf6db29836fc352635263
|
refs/heads/master
| 2020-04-16T17:49:13.846890
| 2019-04-27T00:14:43
| 2019-04-27T00:14:43
| 165,790,239
| 1
| 0
| null | 2019-04-27T00:14:43
| 2019-01-15T05:22:22
|
Python
|
UTF-8
|
Python
| false
| false
| 480
|
py
|
from django.urls import path, reverse
from . import views
urlpatterns = [
path('', views.AllClubs.as_view(), name='clubshome'),
path('myclubs/', views.UserClubs.as_view(), name='userclubs'),
path('<int:pk>/', views.ClubDetails.as_view(), name='clubdetails'),
path('<int:pk>/officer_details/', views.ClubOfficerDetails.as_view(), name='clubofficerdetails'),
path('<int:pk>/change_members/', views.ClubMemberChange.as_view(), name='changemembers'),
]
|
[
"noreply@github.com"
] |
noreply@github.com
|
a79901d7cd6230cf60535dc9cffd9a91da0145c5
|
82e0c57e5b133d27e2380c9f809c2b338b3bc52c
|
/test/aqua/operators/test_op_construction.py
|
e5419631592af242da1fd109f730d14fcc7fdf60
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
ryzbaka/qiskit-aqua
|
62b3e50d60f080ed8aa1b9a484fcd508bc1139b3
|
c1375ff5a1e7cf06d6691519f3ca4feb32e1a747
|
refs/heads/master
| 2022-07-19T03:55:58.393568
| 2020-05-20T14:41:12
| 2020-05-20T14:41:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,065
|
py
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
""" Test Operator construction, including OpPrimitives and singletons. """
import unittest
from test.aqua import QiskitAquaTestCase
import itertools
import numpy as np
from qiskit import QuantumCircuit
from qiskit.quantum_info.operators import Operator, Pauli
from qiskit.circuit.library import CZGate
from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, PrimitiveOp, SummedOp, PauliOp, Minus
# pylint: disable=invalid-name
class TestOpConstruction(QiskitAquaTestCase):
"""Operator Construction tests."""
def test_pauli_primitives(self):
""" from to file test """
newop = X ^ Y ^ Z ^ I
self.assertEqual(newop.primitive, Pauli(label='XYZI'))
kpower_op = (Y ^ 5) ^ (I ^ 3)
self.assertEqual(kpower_op.primitive, Pauli(label='YYYYYIII'))
kpower_op2 = (Y ^ I) ^ 4
self.assertEqual(kpower_op2.primitive, Pauli(label='YIYIYIYI'))
# Check immutability
self.assertEqual(X.primitive, Pauli(label='X'))
self.assertEqual(Y.primitive, Pauli(label='Y'))
self.assertEqual(Z.primitive, Pauli(label='Z'))
self.assertEqual(I.primitive, Pauli(label='I'))
def test_composed_eval(self):
""" Test eval of ComposedOp """
self.assertAlmostEqual(Minus.eval('1'), -.5 ** .5)
def test_evals(self):
""" evals test """
# pylint: disable=no-member
# TODO: Think about eval names
self.assertEqual(Z.eval('0').eval('0'), 1)
self.assertEqual(Z.eval('1').eval('0'), 0)
self.assertEqual(Z.eval('0').eval('1'), 0)
self.assertEqual(Z.eval('1').eval('1'), -1)
self.assertEqual(X.eval('0').eval('0'), 0)
self.assertEqual(X.eval('1').eval('0'), 1)
self.assertEqual(X.eval('0').eval('1'), 1)
self.assertEqual(X.eval('1').eval('1'), 0)
self.assertEqual(Y.eval('0').eval('0'), 0)
self.assertEqual(Y.eval('1').eval('0'), -1j)
self.assertEqual(Y.eval('0').eval('1'), 1j)
self.assertEqual(Y.eval('1').eval('1'), 0)
# Check that Pauli logic eval returns same as matrix logic
self.assertEqual(PrimitiveOp(Z.to_matrix()).eval('0').eval('0'), 1)
self.assertEqual(PrimitiveOp(Z.to_matrix()).eval('1').eval('0'), 0)
self.assertEqual(PrimitiveOp(Z.to_matrix()).eval('0').eval('1'), 0)
self.assertEqual(PrimitiveOp(Z.to_matrix()).eval('1').eval('1'), -1)
self.assertEqual(PrimitiveOp(X.to_matrix()).eval('0').eval('0'), 0)
self.assertEqual(PrimitiveOp(X.to_matrix()).eval('1').eval('0'), 1)
self.assertEqual(PrimitiveOp(X.to_matrix()).eval('0').eval('1'), 1)
self.assertEqual(PrimitiveOp(X.to_matrix()).eval('1').eval('1'), 0)
self.assertEqual(PrimitiveOp(Y.to_matrix()).eval('0').eval('0'), 0)
self.assertEqual(PrimitiveOp(Y.to_matrix()).eval('1').eval('0'), -1j)
self.assertEqual(PrimitiveOp(Y.to_matrix()).eval('0').eval('1'), 1j)
self.assertEqual(PrimitiveOp(Y.to_matrix()).eval('1').eval('1'), 0)
pauli_op = Z ^ I ^ X ^ Y
mat_op = PrimitiveOp(pauli_op.to_matrix())
full_basis = list(map(''.join, itertools.product('01', repeat=pauli_op.num_qubits)))
for bstr1, bstr2 in itertools.product(full_basis, full_basis):
# print('{} {} {} {}'.format(bstr1, bstr2, pauli_op.eval(bstr1, bstr2),
# mat_op.eval(bstr1, bstr2)))
np.testing.assert_array_almost_equal(pauli_op.eval(bstr1).eval(bstr2),
mat_op.eval(bstr1).eval(bstr2))
gnarly_op = SummedOp([(H ^ I ^ Y).compose(X ^ X ^ Z).tensor(Z),
PrimitiveOp(Operator.from_label('+r0I')),
3 * (X ^ CX ^ T)], coeff=3 + .2j)
gnarly_mat_op = PrimitiveOp(gnarly_op.to_matrix())
full_basis = list(map(''.join, itertools.product('01', repeat=gnarly_op.num_qubits)))
for bstr1, bstr2 in itertools.product(full_basis, full_basis):
np.testing.assert_array_almost_equal(gnarly_op.eval(bstr1).eval(bstr2),
gnarly_mat_op.eval(bstr1).eval(bstr2))
def test_circuit_construction(self):
""" circuit construction test """
hadq2 = H ^ I
cz = hadq2.compose(CX).compose(hadq2)
qc = QuantumCircuit(2)
qc.append(cz.primitive, qargs=range(2))
ref_cz_mat = PrimitiveOp(CZGate()).to_matrix()
np.testing.assert_array_almost_equal(cz.to_matrix(), ref_cz_mat)
def test_io_consistency(self):
""" consistency test """
new_op = X ^ Y ^ I
label = 'XYI'
# label = new_op.primitive.to_label()
self.assertEqual(str(new_op.primitive), label)
np.testing.assert_array_almost_equal(new_op.primitive.to_matrix(),
Operator.from_label(label).data)
self.assertEqual(new_op.primitive, Pauli(label=label))
x_mat = X.primitive.to_matrix()
y_mat = Y.primitive.to_matrix()
i_mat = np.eye(2, 2)
np.testing.assert_array_almost_equal(new_op.primitive.to_matrix(),
np.kron(np.kron(x_mat, y_mat), i_mat))
hi = np.kron(H.to_matrix(), I.to_matrix())
hi2 = Operator.from_label('HI').data
hi3 = (H ^ I).to_matrix()
np.testing.assert_array_almost_equal(hi, hi2)
np.testing.assert_array_almost_equal(hi2, hi3)
xy = np.kron(X.to_matrix(), Y.to_matrix())
xy2 = Operator.from_label('XY').data
xy3 = (X ^ Y).to_matrix()
np.testing.assert_array_almost_equal(xy, xy2)
np.testing.assert_array_almost_equal(xy2, xy3)
# Check if numpy array instantiation is the same as from Operator
matrix_op = Operator.from_label('+r')
np.testing.assert_array_almost_equal(PrimitiveOp(matrix_op).to_matrix(),
PrimitiveOp(matrix_op.data).to_matrix())
# Ditto list of lists
np.testing.assert_array_almost_equal(PrimitiveOp(matrix_op.data.tolist()).to_matrix(),
PrimitiveOp(matrix_op.data).to_matrix())
# TODO make sure this works once we resolve endianness mayhem
# qc = QuantumCircuit(3)
# qc.x(2)
# qc.y(1)
# from qiskit import BasicAer, QuantumCircuit, execute
# unitary = execute(qc, BasicAer.get_backend('unitary_simulator')).result().get_unitary()
# np.testing.assert_array_almost_equal(new_op.primitive.to_matrix(), unitary)
def test_to_matrix(self):
"""to matrix text """
np.testing.assert_array_equal(X.to_matrix(), Operator.from_label('X').data)
np.testing.assert_array_equal(Y.to_matrix(), Operator.from_label('Y').data)
np.testing.assert_array_equal(Z.to_matrix(), Operator.from_label('Z').data)
op1 = Y + H
np.testing.assert_array_almost_equal(op1.to_matrix(), Y.to_matrix() + H.to_matrix())
op2 = op1 * .5
np.testing.assert_array_almost_equal(op2.to_matrix(), op1.to_matrix() * .5)
op3 = (4 - .6j) * op2
np.testing.assert_array_almost_equal(op3.to_matrix(), op2.to_matrix() * (4 - .6j))
op4 = op3.tensor(X)
np.testing.assert_array_almost_equal(op4.to_matrix(),
np.kron(op3.to_matrix(), X.to_matrix()))
op5 = op4.compose(H ^ I)
np.testing.assert_array_almost_equal(op5.to_matrix(), np.dot(op4.to_matrix(),
(H ^ I).to_matrix()))
op6 = op5 + PrimitiveOp(Operator.from_label('+r').data)
np.testing.assert_array_almost_equal(
op6.to_matrix(), op5.to_matrix() + Operator.from_label('+r').data)
def test_adjoint(self):
""" adjoint test """
gnarly_op = 3 * (H ^ I ^ Y).compose(X ^ X ^ Z).tensor(T ^ Z) + \
PrimitiveOp(Operator.from_label('+r0IX').data)
np.testing.assert_array_almost_equal(np.conj(np.transpose(gnarly_op.to_matrix())),
gnarly_op.adjoint().to_matrix())
def test_primitive_strings(self):
""" get primitives test """
self.assertEqual(X.primitive_strings(), {'Pauli'})
gnarly_op = 3 * (H ^ I ^ Y).compose(X ^ X ^ Z).tensor(T ^ Z) + \
PrimitiveOp(Operator.from_label('+r0IX').data)
self.assertEqual(gnarly_op.primitive_strings(), {'QuantumCircuit', 'Matrix'})
def test_to_pauli_op(self):
""" Test to_pauli_op method """
gnarly_op = 3 * (H ^ I ^ Y).compose(X ^ X ^ Z).tensor(T ^ Z) + \
PrimitiveOp(Operator.from_label('+r0IX').data)
mat_op = gnarly_op.to_matrix_op()
pauli_op = gnarly_op.to_pauli_op()
self.assertIsInstance(pauli_op, SummedOp)
for p in pauli_op:
self.assertIsInstance(p, PauliOp)
np.testing.assert_array_almost_equal(mat_op.to_matrix(), pauli_op.to_matrix())
def test_circuit_permute(self):
r""" Test the CircuitOp's .permute method """
perm = range(7)[::-1]
c_op = (((CX ^ 3) ^ X) @
(H ^ 7) @
(X ^ Y ^ Z ^ I ^ X ^ X ^ X) @
(Y ^ (CX ^ 3)) @
(X ^ Y ^ Z ^ I ^ X ^ X ^ X))
c_op_perm = c_op.permute(perm)
self.assertNotEqual(c_op, c_op_perm)
c_op_id = c_op_perm.permute(perm)
self.assertEqual(c_op, c_op_id)
if __name__ == '__main__':
unittest.main()
|
[
"noreply@github.com"
] |
noreply@github.com
|
9dc8e842d1c50ed74d1c5b4728ef47282db16f7c
|
cf43421567c1634abe1df885c6e185a180659708
|
/Extract/common.py
|
cd7e1ac1c4a6b11a70a4290fe71d6d2217580e77
|
[] |
no_license
|
fabio-gz/ETL_newspaper
|
4c5239892098840a730ecf3b58452054a50e914b
|
7458701eab76821a1fd65f0821356b1e7924bc97
|
refs/heads/master
| 2023-01-11T05:01:39.773346
| 2020-11-16T22:10:57
| 2020-11-16T22:10:57
| 292,719,167
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 234
|
py
|
# cargar yaml
import yaml
#global var
__config = None
def config():
global __config
if not __config:
with open('config.yml', mode='r') as f:
__config = yaml.safe_load(f)
return __config
|
[
"fabiogomez.silva@gmail.com"
] |
fabiogomez.silva@gmail.com
|
3cbc9bfba6c7cc7ac49325cfc8ffaf1622d354b1
|
bdaed512916fcf96e5dc915538fe8598aeb2d3cf
|
/mcex/history/nphistory.py
|
f042a176b3e0f6b83b0c8e8c2c2c693ec6657ff1
|
[] |
no_license
|
jsalvatier/mcex
|
9657cc2e8083f4e4dd013baaaceba08f9a48754e
|
040f49bfd6eb467ef4d50d15de25033b1ba52c55
|
refs/heads/master
| 2021-06-18T19:02:07.055877
| 2017-01-22T01:10:01
| 2017-01-22T01:10:01
| 1,455,409
| 9
| 3
| null | 2012-06-21T18:07:36
| 2011-03-08T17:02:42
|
Python
|
UTF-8
|
Python
| false
| false
| 954
|
py
|
'''
Created on Mar 15, 2011
@author: jsalvatier
'''
import numpy as np
class NpHistory(object):
"""
encapsulates the recording of a process chain
"""
def __init__(self, max_draws):
self.max_draws = max_draws
self.samples = {}
self.nsamples = 0
def record(self, point):
"""
records the position of a chain at a certain point in time
"""
if self.nsamples < self.max_draws:
for var, value in point.iteritems():
try :
s = self.samples[var]
except:
s = np.empty((self.max_draws,) + value.shape)
self.samples[var] = s
s[self.nsamples,...] = value
self.nsamples += 1
else :
raise ValueError('out of space!')
def __getitem__(self, key):
return self.samples[key][0:self.nsamples,...]
|
[
"jsalvatier@gmail.com"
] |
jsalvatier@gmail.com
|
159ae11c00f6321cb99a0ef0d0efc843e5b9f5ce
|
8d034478e79e5653bc3d43656925d480c2f4d5ea
|
/image_detection.py
|
0e0c0978699965c5bada9201c6c1a64edf4a5297
|
[] |
no_license
|
stavik476/last_project_stav
|
515e907d871f59dfda12797411a7eee32d25550d
|
1f61204a9bc7d6cb03807b89db6085ea085320c4
|
refs/heads/main
| 2023-04-26T02:30:28.914181
| 2021-05-25T14:23:05
| 2021-05-25T14:23:05
| 370,720,223
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,777
|
py
|
# Import packages
import os
import cv2
import numpy as np
import tensorflow.compat.v1 as tf
import sys
# This is needed since the notebook is stored in the object_detection folder.
sys.path.append("..")
# Import utilites
from the_utils import label_map_util
from the_utils import visualization_utils as vis_util
#Change the test image name
IMAGE_NAME = 'red_lights.jpg'
# Number of classes the object detector can identify
NUM_CLASSES = 7
# Name of the directory containing the object detection module we're using
MODEL_NAME = 'inference_graph'
# Grab path to current working directory
CWD_PATH = os.getcwd()
# Path to frozen detection graph .pb file, which contains the model that is used
# for object detection.
PATH_TO_CKPT = "models/frozen_inference_graph.pb"
# Path to label map file
PATH_TO_LABELS = "models\label_map.pbtxt"
# Path to image
PATH_TO_IMAGE = os.path.join("D:\models\Research\object_detection",'Testing\images',IMAGE_NAME)
# Load the label map.
# Label maps map indices to category names, so that when our convolution
# network predicts `5`, we know that this corresponds to `king`.
# Here we use internal utility functions, but anything that returns a
# dictionary mapping integers to appropriate string labels would be fine
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
# Load the Tensorflow model into memory.
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
sess = tf.Session(graph=detection_graph)
# Define input and output tensors (i.e. data) for the object detection classifier
# Input tensor is the image
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Output tensors are the detection boxes, scores, and classes
# Each box represents a part of the image where a particular object was detected
detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represents level of confidence for each of the objects.
# The score is shown on the result image, together with the class label.
detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
# Number of objects detected
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
# Load image using OpenCV and
# expand image dimensions to have shape: [1, None, None, 3]
# i.e. a single-column array, where each item in the column has the pixel RGB value
image = cv2.imread(PATH_TO_IMAGE)
image_expanded = np.expand_dims(image, axis=0)
# Perform the actual detection by running the model with the image as input
(boxes, scores, classes, num) = sess.run(
[detection_boxes, detection_scores, detection_classes, num_detections],
feed_dict={image_tensor: image_expanded})
# Draw the results of the detection (aka 'visulaize the results')
coordinates = vis_util.coordinates_find(
image,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=8,
min_score_thresh=0.8)
print(coordinates)
name = "Traffic light"
squares = []
for cord in coordinates:
rows = []
if "1" == cord[4][0].split(":")[0]:
for i in range(4):
print(cord[i]%0.25)
if (cord[i] % 0.25) < 0.03 and (0 != int(cord[i] / 0.25) and (i != 0) and (i != 1)):
rows.append(int(cord[i]/0.25))
else:
rows.append((int(cord[i]/0.25) + 1))
#squares.append((5 - rows[0]) * 4 - rows[1] + 1)
#squares.append((5 - rows[0]) * 4 - rows[3] + 1)
#squares.append((5 - rows[2]) * 4 - rows[1] + 1)
#squares.append((5 - rows[2]) * 4 - rows[3] + 1)
for j in range(rows[2] - rows[0] + 1):
for t in range(rows[3] - rows[1] + 1):
squares.append((5 - rows[0] - j) * 4 - rows[1] - t + 1)
print(rows)
print(squares)
vis_util.visualize_boxes_and_labels_on_image_array(
image,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=8,
min_score_thresh=0.6)
# All the results have been drawn on image. Now display the image.
cv2.imshow('Object detector', image)
# Press any key to close the image
cv2.waitKey(0)
# Clean up
cv2.destroyAllWindows()
|
[
"71749280+stavik476@users.noreply.github.com"
] |
71749280+stavik476@users.noreply.github.com
|
c52d673bdcbfae703d470556fea4604762501224
|
91f2e23782b05aa1fb273f3170c50dc4185e8dc1
|
/clif/pybind11/staging/virtual_funcs_basics_test.py
|
6238c3144d5233fd2ad32b961ceef33c93be6b74
|
[
"Apache-2.0"
] |
permissive
|
anukaal/clif
|
152fd58e575b90d626a300875aac71cdf69ec6a3
|
8ff675bf93599f4d4a4865376b441d8d0551fd54
|
refs/heads/main
| 2023-08-03T19:47:00.538660
| 2021-09-14T05:50:43
| 2021-09-30T01:00:14
| 406,238,691
| 0
| 0
|
Apache-2.0
| 2021-09-14T05:39:04
| 2021-09-14T05:39:03
| null |
UTF-8
|
Python
| false
| false
| 3,058
|
py
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for clif.pybind11.staging.virtual_funcs_basics.
This file is a copy of clif/testing/python/virtual_funcs_basics_test.py.
"""
import unittest
from clif.pybind11.staging import virtual_funcs_basics
class B(virtual_funcs_basics.B):
def __init__(self):
virtual_funcs_basics.B.__init__(self)
self.c = -1
def set_c(self, v):
self.c = v
class K(virtual_funcs_basics.K):
def inc(self, n):
self.i += n
class L(virtual_funcs_basics.Q):
def __init__(self, max_len):
virtual_funcs_basics.Q.__init__(self)
self._q = []
self._max = max_len
def data(self):
return list(self._q)
def PossiblyPush(self, data):
if len(self._q) < self._max:
self._q.append(data)
return True
return False
class AbstractClassNonDefConstImpl(
virtual_funcs_basics.AbstractClassNonDefConst):
def DoSomething(self):
return self.a * self.b
class ClassNonDefConstImpl(virtual_funcs_basics.ClassNonDefConst):
def __init__(self, a, b):
super().__init__(a, b)
self.c = [1, 2, 3] # Must have a non-trivial container to enable gc.
# Remove self.invalidated after gaining (limited) access to invalidated ptr.
self.invalidated = False
def DoSomething(self):
return -1 if self.invalidated else self.a * self.b
class VirtualFuncsTest(unittest.TestCase):
def testInitConcreteClassWithVirtualMethods(self):
b = virtual_funcs_basics.B()
b.set_c(2)
self.assertEqual(b.c, 2)
c = virtual_funcs_basics.ClassNonDefConst(1, 2)
self.assertEqual(c.DoSomething(), 3)
def testBasicCall(self):
b = B()
b.set_c(2)
self.assertEqual(b.c, 2)
virtual_funcs_basics.Bset(b, 4)
self.assertEqual(b.c, 4)
def testVirtual(self):
self.assertEqual(virtual_funcs_basics.seq(K(), 2, 6), [0, 2, 4, 6])
abc_non_def_impl = AbstractClassNonDefConstImpl(4, 5)
self.assertEqual(abc_non_def_impl.DoSomething(), 20)
self.assertEqual(virtual_funcs_basics.DoSomething1(abc_non_def_impl), 20)
non_def_impl = ClassNonDefConstImpl(4, 5)
self.assertEqual(non_def_impl.DoSomething(), 20)
self.assertEqual(virtual_funcs_basics.DoSomething2(non_def_impl), 20)
def testVirtual2(self):
q = L(3)
self.assertEqual(virtual_funcs_basics.add_seq(q, 2, 6), 3)
self.assertEqual(q.data(), [0, 2, 4])
def testVirtualProperty(self):
c = virtual_funcs_basics.D()
c.pos_c = -1
self.assertEqual(c.pos_c, 1)
if __name__ == '__main__':
unittest.main()
|
[
"rwgk@google.com"
] |
rwgk@google.com
|
ce369289555ace6e16616cbac4ee84b00b8d594e
|
9939d9357257f10074b3d0055d70d5f278e7032f
|
/entity.py
|
b3e4eaaab491ccabdaa68dce765d27934f403a48
|
[] |
no_license
|
bravequinoaa/FlappyBirdPy
|
8068042b97f2b9829bf5fb4364b84f70b05b9f65
|
6e04c07bb6566846386b2cad5e226a072092a36b
|
refs/heads/main
| 2023-02-03T23:03:38.021231
| 2020-12-16T04:27:44
| 2020-12-16T04:27:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 319
|
py
|
import pygame
from abc import ABC, abstractmethod, ABCMeta
class Entity(ABC):
__metaclass__ = ABCMeta
@abstractmethod
def __init__(self, surface, clock, width, height, color):
self.X = None
self.Y = None
self.gravity = None
@abstractmethod
def update(self):
pass
|
[
"wvano97@gmail.com"
] |
wvano97@gmail.com
|
8946b98e89bdddce94a2715d79b0fbbcb3e9740e
|
e7e6e19e293c67e412355811fd9e447b3f26443d
|
/libs/functions/__init__.py
|
a5f7f6b9dfa0d84d541d00f6f4ae843dd43084ac
|
[
"MIT"
] |
permissive
|
nga-27/SecuritiesAnalysisTools
|
6c2f57929346e01433e7d6e1176747de2dbce50a
|
7c4ce3d9d6ffb62aaf86c7d46bd7f15f4c68cbb0
|
refs/heads/master
| 2023-08-09T03:54:33.587670
| 2023-07-19T23:11:27
| 2023-07-19T23:11:27
| 180,685,810
| 5
| 2
|
MIT
| 2023-07-19T23:11:28
| 2019-04-11T00:44:25
|
Python
|
UTF-8
|
Python
| false
| false
| 123
|
py
|
""" functions to be used a single operations (or eventual API functions) """
from .functions import only_functions_handler
|
[
"namell91@gmail.com"
] |
namell91@gmail.com
|
5acdd79baad3e3b1e64e2899d6958a752a4e1fbd
|
ec2490a6628ea5240ee16d7ee0ab35c4bdf3f954
|
/gaurabda/GCEarthData.py
|
6312b600eae9579137e7dd37368ff3a49ed3cacb
|
[
"MIT"
] |
permissive
|
tuksik/gaurabda-calendar-ekadasi
|
26e0f13112949ec9a8895bc1b0bccbc587544ae5
|
36f00a497bc30c041619baa1e9551e3a16021e4e
|
refs/heads/master
| 2022-12-30T09:32:51.683966
| 2020-10-16T14:35:03
| 2020-10-16T14:35:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,436
|
py
|
import math
import gaurabda.GCMath as GCMath
import gaurabda.GCAyanamsha as GCAyanamsha
import gaurabda.GCTimeZone as GCTimeZone
import gaurabda.GCStrings as GCStrings
import gaurabda.GCCoords as GCCoords
import gaurabda.GCUT as GCUT
def calc_epsilon_phi(date):
arg_mul = [
[ 0, 0, 0, 0, 1],
[-2, 0, 0, 2, 2],
[ 0, 0, 0, 2, 2],
[ 0, 0, 0, 0, 2],
[ 0, 1, 0, 0, 0],
[ 0, 0, 1, 0, 0],
[-2, 1, 0, 2, 2],
[ 0, 0, 0, 2, 1],
[ 0, 0, 1, 2, 2],
[-2,-1, 0, 2, 2],
[-2, 0, 1, 0, 0],
[-2, 0, 0, 2, 1],
[ 0, 0,-1, 2, 2],
[ 2, 0, 0, 0, 0],
[ 0, 0, 1, 0, 1],
[ 2, 0,-1, 2, 2],
[ 0, 0,-1, 0, 1],
[ 0, 0, 1, 2, 1],
[-2, 0, 2, 0, 0],
[ 0, 0,-2, 2, 1],
[ 2, 0, 0, 2, 2],
[ 0, 0, 2, 2, 2],
[ 0, 0, 2, 0, 0],
[-2, 0, 1, 2, 2],
[ 0, 0, 0, 2, 0],
[-2, 0, 0, 2, 0],
[ 0, 0,-1, 2, 1],
[ 0, 2, 0, 0, 0],
[ 2, 0,-1, 0, 1],
[-2, 2, 0, 2, 2],
[ 0, 1, 0, 0, 1]
]
arg_phi = [
[-171996,-1742],
[ -13187, -16],
[ -2274, -2],
[ 2062, 2],
[ 1426, -34],
[ 712, 1],
[ -517, 12],
[ -386, -4],
[ -301, 0],
[ 217, -5],
[ -158, 0],
[ 129, 1],
[ 123, 0],
[ 63, 0],
[ 63, 1],
[ -59, 0],
[ -58, -1],
[ -51, 0],
[ 48, 0],
[ 46, 0],
[ -38, 0],
[ -31, 0],
[ 29, 0],
[ 29, 0],
[ 26, 0],
[ -22, 0],
[ 21, 0],
[ 17, -1],
[ 16, 0],
[ -16, 1],
[ -15, 0]
]
arg_eps = [
[ 92025, 89],
[ 5736, -31],
[ 977, -5],
[ -895, 5],
[ 54, -1],
[ -7, 0],
[ 224, -6],
[ 200, 0],
[ 129, -1],
[ -95, 3],
[ 0, 0],
[ -70, 0],
[ -53, 0],
[ 0, 0],
[ -33, 0],
[ 26, 0],
[ 32, 0],
[ 27, 0],
[ 0, 0],
[ -24, 0],
[ 16, 0],
[ 13, 0],
[ 0, 0],
[ -12, 0],
[ 0, 0],
[ 0, 0],
[ -10, 0],
[ 0, 0],
[ -8, 0],
[ 7, 0],
[ 9, 0]
]
t = ( date -2451545.0)/36525
delta_phi = 0.0
# longitude of rising knot
omega =GCMath.putIn360(125.04452+(-1934.136261+(0.0020708+1.0/450000*t)*t)*t)
if True:
l = 280.4665+36000.7698*t
ls = 218.3165+481267.8813*t
delta_epsilon = 9.20 * GCMath.cosDeg(omega)+ 0.57* GCMath.cosDeg(2*l)+ 0.10* GCMath.cosDeg(2*ls) - 0.09*GCMath.cosDeg(2*omega)
delta_phi =(-17.20* GCMath.sinDeg(omega)- 1.32*GCMath.sinDeg(2*l)-0.23*GCMath.sinDeg(2*ls) + 0.21*GCMath.sinDeg(2*omega))/3600
else:
# mean elongation of moon to sun
d = GCMath.putIn360(297.85036+(445267.111480+(-0.0019142+t/189474)*t)*t)
# mean anomaly of the sun
m =GCMath.putIn360(357.52772+(35999.050340+(-0.0001603-t/300000)*t)*t)
# mean anomaly of the moon
ms =GCMath.putIn360(134.96298+(477198.867398+(0.0086972+t/56250)*t)*t)
# argument of the latitude of the moon
f = GCMath.putIn360(93.27191+(483202.017538+(-0.0036825+t/327270)*t)*t)
delta_phi = 0
delta_epsilon = 0
for i in range(31):
s= arg_mul[i][0]*d + arg_mul[i][1]*m + arg_mul[i][2]*ms + arg_mul[i][3]*f + arg_mul[i][4]*omega
delta_phi = delta_phi+(arg_phi[i][0]+arg_phi[i][1]*t*0.1)*GCMath.sinDeg(s)
delta_epsilon = delta_epsilon+(arg_eps[i][0] + arg_eps[i][1]*t*0.1) * GCMath.cosDeg(s)
delta_phi=delta_phi*0.0001/3600
delta_epsilon=delta_epsilon*0.0001/3600
# angle of ecliptic
epsilon_0=84381.448+(-46.8150+(-0.00059+0.001813*t)*t)*t
epsilon=(epsilon_0+delta_epsilon)/3600
return delta_phi, epsilon
def eclipticalToEquatorialCoords(ecc,date):
eqc = GCCoords.GCEquatorialCoords()
epsilon = 0.0
delta_phi = 0.0
alpha = delta = 0.0
delta_phi,epsilon = calc_epsilon_phi(date)
ecc.longitude = GCMath.putIn360(ecc.longitude + delta_phi)
eqc.rightAscension = GCMath.arcTan2Deg( GCMath.sinDeg(ecc.longitude) * GCMath.cosDeg(epsilon) - GCMath.tanDeg(ecc.latitude) * GCMath.sinDeg(epsilon), GCMath.cosDeg(ecc.longitude));
eqc.declination = GCMath.arcSinDeg( GCMath.sinDeg(ecc.latitude) * GCMath.cosDeg(epsilon) + GCMath.cosDeg(ecc.latitude) * GCMath.sinDeg(epsilon) * GCMath.sinDeg(ecc.longitude));
return eqc,ecc
def equatorialToHorizontalCoords(eqc, obs, date):
hc = GCCoords.GCHorizontalCoords()
h = GCMath.putIn360(star_time(date) - eqc.rightAscension + obs.longitude_deg)
hc.azimut = GCMath.rad2deg( math.atan2(GCMath.sinDeg(h), GCMath.cosDeg(h) * GCMath.sinDeg(obs.latitude_deg) - GCMath.tanDeg(eqc.declination) * GCMath.cosDeg(obs.latitude_deg) ))
hc.elevation = GCMath.rad2deg( math.asin(GCMath.sinDeg(obs.latitude_deg) * GCMath.sinDeg(eqc.declination) + GCMath.cosDeg(obs.latitude_deg) * GCMath.cosDeg(eqc.declination) * GCMath.cosDeg(h)));
return hc
def GetTextLatitude(d):
c0 = 'S' if d < 0.0 else 'N'
d = math.fabs(d)
a0 = int(math.floor(d))
a1 = int(math.floor((d - a0)*60 + 0.5))
return "{}{}{:02d}".format(a0, c0, a1)
def GetTextLongitude(d):
c0 = 'W' if d < 0.0 else 'E'
d = math.fabs(d)
a0 = int(math.floor(d))
a1 = int(math.floor((d - a0)*60 + 0.5))
return "{}{}{:02d}".format(a0, c0, a1)
def star_time(date):
jd = date
t =(jd-2451545.0)/36525.0
delta_phi, epsilon = calc_epsilon_phi(date)
return GCMath.putIn360(280.46061837+360.98564736629*(jd-2451545.0)+
t*t*(0.000387933-t/38710000)+
delta_phi*GCMath.cosDeg(epsilon) )
class EARTHDATA:
def __init__(self):
# observated event
# 0 - center of the sun
# 1 - civil twilight
# 2 - nautical twilight
# 3 - astronomical twilight
self.obs = 0
self.longitude_deg = 0.0
self.latitude_deg = 0.0
self.tzone = 0.0
self.dst = 0
def __str__(self):
return '{}: {} {}: {} {}: {}'.format(
GCStrings.getString(10), GetTextLatitude(latitude_deg),
GCStrings.getString(11), GetTextLongitude(longitude_deg),
GCStrings.getString(12), GCTimeZone.GetTimeZoneOffsetText(tzone))
def GetHorizontDegrees(self,jday):
return GCMath.putIn360(star_time(jday) - self.longitude_deg - GCAyanamsha.GetAyanamsa(jday) + 155)
def GetNextAscendentStart(self, startDate):
phi = 30.0
l1 = l2 = 0.0
jday = startDate.GetJulianComplete()
xj = 0.0
d = GCGregorianDate(date=startDate)
xd = GCGregorianDate()
scan_step = 0.05
prev_tit = 0
new_tit = -1
l1 = self.GetHorizontDegrees(jday)
prev_tit = int(math.floor(l1/phi))
counter = 0
while counter < 20:
xj = jday
xd.Set(d)
jday += scan_step
d.shour += scan_step
if d.shour > 1.0:
d.shour -= 1.0
d.NextDay()
l2 = self.GetHorizontDegrees(jday)
new_tit = int(math.floor(l2/phi))
if prev_tit != new_tit:
jday = xj
d.Set(xd)
scan_step *= 0.5
counter+=1
continue
else:
l1 = l2
nextDate = GCGregorianDate.GCGregorianDate(date=d)
return new_tit, nextDate
def unittests():
GCUT.info('earth data')
GCUT.val(GetTextLatitude(12.5),'12N30','text latitude')
GCUT.val(GetTextLongitude(-15.25),'15W15','text longitude')
GCUT.val(star_time(2451545.0),280.45704234942144,'start time')
dp,ep = calc_epsilon_phi(2451545.0)
GCUT.val(dp,-0.0038975991170544155,'delta phi')
GCUT.val(ep,23.437690731210242,'epsilon')
|
[
"root@gopal.home.sk"
] |
root@gopal.home.sk
|
5f8bdbabaf7e01920dfee62d7b029a3ca4a594e5
|
87376c79491df2ff693cd6046689251e409d6052
|
/cwProject/dog_account/migrations/0001_initial.py
|
9f2981d9293cac972cfe539a50e3189bdc7e15e5
|
[
"Apache-2.0"
] |
permissive
|
cs-fullstack-2019-spring/django-models-cw-gkg901
|
1874b63ad859a56cc1363856696a136d47f34df2
|
32b8b7135223077c75d6bcd151652cd41d7e0397
|
refs/heads/master
| 2020-04-24T04:41:50.535817
| 2019-02-22T02:58:33
| 2019-02-22T02:58:33
| 171,713,563
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,173
|
py
|
# Generated by Django 2.0.6 on 2019-02-20 17:23
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Account',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(max_length=30)),
('realName', models.CharField(max_length=50)),
('accountNumber', models.IntegerField(max_length=16)),
('balance', models.DecimalField(decimal_places=2, max_digits=99999999999)),
],
),
migrations.CreateModel(
name='Dog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('breed', models.CharField(max_length=50)),
('color', models.CharField(max_length=50)),
('gender', models.CharField(max_length=6)),
],
),
]
|
[
"gerren.gregory@gmail.com"
] |
gerren.gregory@gmail.com
|
a02ca8156ec3b6dbe8f89c8ae875458caa693365
|
4683d3bfe1b6ba70a566249149f55671c7bb3826
|
/game/3mouse/mousedir.py
|
aae7cdaa7e430a6e6d2c090a1e43cc79772a6f0d
|
[] |
no_license
|
crazyj7/python
|
01105fe5b8ec23b53164a3f7f8a12690abc0bf6a
|
2d0a55c8371aa138bcebb1f65b53109599d39009
|
refs/heads/master
| 2020-05-02T18:36:00.992398
| 2020-02-07T13:07:13
| 2020-02-07T13:07:13
| 178,133,678
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,427
|
py
|
'''
mouse position angle. move
'''
import pygame
from pygame import Surface
import os, sys
import math
bExit = False
RED=(255,0,0)
GREEN=(0,255,0)
BLUE=(0,0,255)
BLACK=(0,0,0)
pygame.init()
pad = pygame.display.set_mode( (640,480))
pygame.display.set_caption('test')
user = Surface((100,100))
# 시작 이미지. x축으로 0도 기울기.
pygame.draw.polygon(user, RED, [(0,0), (100,50), (0,100)], 3)
pygame.draw.line(user, GREEN, (100,50), (0, 50), 2)
pygame.draw.rect(user, BLUE, pygame.Rect(0, 0, 100, 100), 2)
user.set_colorkey(BLACK)
pos = user.get_rect()
# 시작 위치
pos.centerx = 100
pos.centery = 100
# print('pos (rect)= ', pos, ' current angle=0')
def rot_center2(image, angle):
'''
사각 영역은 변함없고, 내부의 이미지만 회전 시키고, 밖으로 나간 부분은 잘린다. 중심유지.
:param image:
:param angle:
:return:
'''
orig_rect = image.get_rect()
# 이미지 회전
rot_image = pygame.transform.rotate(image, angle)
rot_rect = orig_rect.copy()
# 원본 이미지 크기의 중심을 회전된 이미지 영역의 중심에 위치
rot_rect.center = rot_image.get_rect().center
# 원본 이미지 크기만큼 자름.
rot_image = rot_image.subsurface(rot_rect).copy()
return rot_image
def rot_center(image, rect, angle):
'''
영역의 중심점에서 회전시키고, 새로운(더 커진) 영역 크기도 반환. 잘림 없음. 중심유지.
:param image:
:param rect:
:param angle:
:return:
'''
# 각도 만큼 회전.
rot_image = pygame.transform.rotate(image, angle)
# 중심점을 맞춘다. 새로운 영역 보정. 영역 크기가 커질수 있음. 짤림 없음.
rot_rect = rot_image.get_rect(center=rect.center)
return rot_image, rot_rect
clock = pygame.time.Clock()
speed = 10
while not bExit:
for event in pygame.event.get():
if event.type == pygame.QUIT:
bExit=True
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
bExit=True
elif event.type == pygame.MOUSEBUTTONDOWN:
pass
elif event.type == pygame.MOUSEBUTTONUP:
pass
key = pygame.key.get_pressed()
if key[pygame.K_a]:
pos.centerx -= speed
if key[pygame.K_d]:
pos.centerx += speed
if key[pygame.K_w]:
pos.centery -= speed
if key[pygame.K_s]:
pos.centery += speed
pad.fill(BLACK)
mousepos = pygame.mouse.get_pos()
# print('mousepos=', mousepos)
angle = math.atan2(pos.centery - mousepos[1], mousepos[0] - pos.centerx)
print('angle=', angle)
# 각도는 라디안 0~pi, -pi, 0
# user는 x축 방향 0도 기준으로 있음. user를 angle만큼 CCW로 회전.
# degree로 변환 필요.
# img = pygame.transform.rotate(user, angle*180/math.pi)
img, rect = rot_center(user, user.get_rect(), angle*180/math.pi)
# img = rot_center2(user, angle*180/math.pi)
# pad.blit(img, (pos.x, pos.y) )
rect.centerx += pos.x
rect.centery += pos.y
pad.blit(img, (rect.x, rect.y))
mousedown = pygame.mouse.get_pressed()
# 마우스 다운 상태면 선을 그림.
if mousedown[0]:
pygame.draw.line(pad, BLUE, mousepos, rect.center)
# pad.blit(user, (pos.x, pos.y) )
pygame.display.flip()
# pygame.display.upate()
clock.tick(60)
pygame.quit()
|
[
"psychic@secuve.com"
] |
psychic@secuve.com
|
bfe44943e89a9537af13bd731fb422a50eb87f7f
|
488fb4ea9b50759c61d115fea2f830dbe1a92fb4
|
/flask_app/simple.py
|
fc09e1aef193d07567780b2eea211e3db2ae8005
|
[] |
no_license
|
jacobcui/python301
|
ea42e8664c8dd1ce1d7f5f05c592372a050a27bf
|
0f2961bdf061bf6d8b6390f19deeef1b73af96b4
|
refs/heads/master
| 2020-04-19T22:13:59.503027
| 2019-02-08T00:17:18
| 2019-02-08T00:17:18
| 168,463,713
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 368
|
py
|
"""A simple example explaining GET, POST.
Reference: http://flask_app.pocoo.org/docs/1.0/quickstart/#a-minimal-application
"""
from flask import Blueprint, render_template, request
bp = Blueprint('simple', __name__)
@bp.route('/simple', methods=['GET', 'POST'])
def simple_handler():
if request.method == 'GET':
return render_template('simple.html')
|
[
"jacobcui123@gmail.com"
] |
jacobcui123@gmail.com
|
8ef7a014215cf45c13afb9ae120e112806e9cc33
|
0a8a3d486c170019a09fadeafc36cffb459f55db
|
/miscImu/testIMU.py
|
682b2e9007fdba02a94d619d77cd53b9402a9718
|
[
"MIT"
] |
permissive
|
Armon16/IMU
|
e240f6d30451695d94a71e5f0d2454cf0ccc7d85
|
ee4d114c7f5074a5f45bd658bf5d7f310d452f2f
|
refs/heads/master
| 2020-04-29T01:21:05.744558
| 2019-04-12T06:17:29
| 2019-04-12T06:17:29
| 153,354,839
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,560
|
py
|
import logging
import sys
import time
from deepstream import get, post
from Adafruit_BNO055 import BNO055
try:
obj = {}
post(obj, 'imu')
except:
print("Not connected to deepstream")
magneticDeviation = 11
bno = BNO055.BNO055(busnum=2)
confMode = True
while True:
try:
if not bno.begin():
print('The sensor is not connected')
time.sleep(1)
#raise RuntimeError('Failed to initialize BNO055! Is the sensor connected?')
else:
break
except:
print('waiting for sensor...')
def magToTrue(h):
global magneticDeviation
if (h - magneticDeviation < 0):
return (h + 360 - magneticDeviation)
else:
return h - magneticDeviation
bno.set_mode(0x00)
print("Entering Config Mode")
fileIn = open('calibrationData.txt','r')
data = fileIn.read().splitlines()
for i in range(len(data)):
data[i] = int(data[i])
bno.set_calibration(data)
fileIn.close()
# Print system status and self test result.
status, self_test, error = bno.get_system_status()
print('System status: {0}'.format(status))
print('Self test result (0x0F is normal): 0x{0:02X}'.format(self_test))
# Print out an error if system status is in error mode.
if status == 0x01:
print('System error: {0}'.format(error))
print('See datasheet section 4.3.59 for the meaning.')
# Print BNO055 software revision and other diagnostic data.
sw, bl, accel, mag, gyro = bno.get_revision()
print('Software version: {0}'.format(sw))
print('Bootloader version: {0}'.format(bl))
print('Accelerometer ID: 0x{0:02X}'.format(accel))
print('Magnetometer ID: 0x{0:02X}'.format(mag))
print('Gyroscope ID: 0x{0:02X}\n'.format(gyro))
print('Reading BNO055 data, press Ctrl-C to quit...')
try:
while True:
heading, roll, pitch = bno.read_euler()
sys, gyro, accel, mag = bno.get_calibration_status()
heading = magToTrue(heading)
if (sys == 3 and gyro == 3 and accel == 3 and mag == 3 and confMode):
bno.set_mode(0x0C)
print("Entering Nine Degrees of Freedom Fusion Mode")
confMode = False
print('Heading={0:0.2F} Roll={1:0.2F} Pitch={2:0.2F}\tSys_cal={3} Gyro_cal={4} Accel_cal={5} Mag_cal={6}'.format(
heading, roll, pitch, sys, gyro, accel, mag))
try:
response = post({ "heading":heading, "roll":roll, "pitch":pitch, "sys":sys, "gyro":gyro, "accel":accel, "mag":mag }, 'imu')
except:
print("Cannot Post to Deepstream")
response = None
time.sleep(.03)
except:
print("Error in try catch")
|
[
"armon16@csu.fullerton.edu"
] |
armon16@csu.fullerton.edu
|
665fa4ba03e6c225b3c0e1b947ee5d50644e1b6b
|
4b660991e5c9c93c83dccccdd3ea91531201e8a3
|
/DSA/stack/balanced_parentheses.py
|
b4f1220d5f0ec9e0f22a0eb60703bc0198df83f8
|
[
"MIT"
] |
permissive
|
RohanMiraje/DSAwithPython
|
2a1515fa5f9e5cc76b08a3e6f0ce34e451fb6f4b
|
ea4884afcac9d6cc2817a93e918c829dd10cef5d
|
refs/heads/master
| 2022-09-24T08:57:04.695470
| 2021-10-21T01:06:06
| 2021-10-21T01:06:06
| 238,381,770
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,280
|
py
|
def check_balanced_parentheses(string):
stack = list()
matches = [("(", ")"), ("{", "}"), ("[", "]")]
if len(string) % 2:
"""
base condition to early check assuming string has only parentheses
"""
return False
for char in string:
if char in ['(', '{', '[']:
stack.append(char)
elif char in [')', '}', ']']:
if len(stack) == 0:
return False
last_opening = stack.pop()
if (last_opening, char) not in matches:
return False
# prev = stack.pop()
# if char == ')':
# if prev != "(":
# return False
# elif char == "}":
# if prev != "{":
# return False
# elif char == "]":
# if prev != "[":
# return False
"""
other approach for checking matches like
matches = [("(",")"),("{","}"),("[","]")]
last_opening = stack.pop()
if (last_opening, curr_char )not in matches:
return False
"""
return len(stack) == 0
if __name__ == '__main__':
exp = "([{}])"
print(check_balanced_parentheses(exp))
|
[
"rohanmiraje19@gmail.com"
] |
rohanmiraje19@gmail.com
|
c6216e017e386c6fcba6a03eb401c29dae4b42b7
|
abfa70e1da5b4ba8e465cdc046fa36e81386744a
|
/base_ml/10.1.Iris_DecisionTree.py
|
68bd1cb46b1c29c5cf1e31ca7b17b59b9c34a20c
|
[] |
no_license
|
superman666ai/crazy_project
|
f850819ff2287e345b67500111733bafa5629d1f
|
99dcba0fe246ecaf3f556f747d44731a04231921
|
refs/heads/master
| 2020-05-15T09:32:56.523875
| 2019-05-16T00:57:23
| 2019-05-16T00:57:23
| 182,179,544
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,473
|
py
|
#!/usr/bin/python
# -*- coding:utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import tree
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
def iris_type(s):
it = {'Iris-setosa': 0, 'Iris-versicolor': 1, 'Iris-virginica': 2}
return it[s]
# 花萼长度、花萼宽度,花瓣长度,花瓣宽度
# iris_feature = 'sepal length', 'sepal width', 'petal length', 'petal width'
iris_feature = u'花萼长度', u'花萼宽度', u'花瓣长度', u'花瓣宽度'
if __name__ == "__main__":
mpl.rcParams['font.sans-serif'] = [u'SimHei']
mpl.rcParams['axes.unicode_minus'] = False
path = '../data/8.iris.data' # 数据文件路径
data = np.loadtxt(path, dtype=float, delimiter=',', converters={4: iris_type},encoding="utf-8")
x, y = np.split(data, (4,), axis=1)
# 为了可视化,仅使用前两列特征
x = x[:, :2]
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3, random_state=1)
#ss = StandardScaler()
#ss = ss.fit(x_train)
# 决策树参数估计
# min_samples_split = 10:如果该结点包含的样本数目大于10,则(有可能)对其分支
# min_samples_leaf = 10:若将某结点分支后,得到的每个子结点样本数目都大于10,则完成分支;否则,不进行分支
model = Pipeline([
('ss', StandardScaler()),
('DTC', DecisionTreeClassifier(criterion='entropy', max_depth=3))])
# clf = DecisionTreeClassifier(criterion='entropy', max_depth=3)
model = model.fit(x_train, y_train)
y_test_hat = model.predict(x_test) # 测试数据
print(model.score)
# 保存
# dot -Tpng -o 1.png 1.dot
f = open('.\\iris_tree.dot', 'w')
tree.export_graphviz(model.get_params('DTC')['DTC'], out_file=f)
# 画图
N, M = 100, 100 # 横纵各采样多少个值
x1_min, x1_max = x[:, 0].min(), x[:, 0].max() # 第0列的范围
x2_min, x2_max = x[:, 1].min(), x[:, 1].max() # 第1列的范围
t1 = np.linspace(x1_min, x1_max, N)
t2 = np.linspace(x2_min, x2_max, M)
x1, x2 = np.meshgrid(t1, t2) # 生成网格采样点
x_show = np.stack((x1.flat, x2.flat), axis=1) # 测试点
# # 无意义,只是为了凑另外两个维度
# # 打开该注释前,确保注释掉x = x[:, :2]
# x3 = np.ones(x1.size) * np.average(x[:, 2])
# x4 = np.ones(x1.size) * np.average(x[:, 3])
# x_test = np.stack((x1.flat, x2.flat, x3, x4), axis=1) # 测试点
cm_light = mpl.colors.ListedColormap(['#A0FFA0', '#FFA0A0', '#A0A0FF'])
cm_dark = mpl.colors.ListedColormap(['g', 'r', 'b'])
y_show_hat = model.predict(x_show) # 预测值
y_show_hat = y_show_hat.reshape(x1.shape) # 使之与输入的形状相同
plt.figure(facecolor='w')
plt.pcolormesh(x1, x2, y_show_hat, cmap=cm_light) # 预测值的显示
plt.scatter(x_test[:, 0], x_test[:, 1], c=y_test.ravel(), edgecolors='k', s=100, cmap=cm_dark, marker='o') # 测试数据
plt.scatter(x[:, 0], x[:, 1], c=y.ravel(), edgecolors='k', s=40, cmap=cm_dark) # 全部数据
plt.xlabel(iris_feature[0], fontsize=15)
plt.ylabel(iris_feature[1], fontsize=15)
plt.xlim(x1_min, x1_max)
plt.ylim(x2_min, x2_max)
plt.grid(True)
plt.title(u'鸢尾花数据的决策树分类', fontsize=17)
plt.show()
# 训练集上的预测结果
y_test = y_test.reshape(-1)
# print y_test_hat
# print y_test
result = (y_test_hat == y_test) # True则预测正确,False则预测错误
acc = np.mean(result)
# print '准确度: %.2f%%' % (100 * acc)
# 过拟合:错误率
depth = np.arange(1, 15)
err_list = []
for d in depth:
clf = DecisionTreeClassifier(criterion='entropy', max_depth=d)
clf = clf.fit(x_train, y_train)
y_test_hat = clf.predict(x_test) # 测试数据
result = (y_test_hat == y_test) # True则预测正确,False则预测错误
err = 1 - np.mean(result)
err_list.append(err)
# print d, ' 准确度: %.2f%%' % (100 * err)
plt.figure(facecolor='w')
plt.plot(depth, err_list, 'ro-', lw=2)
plt.xlabel(u'决策树深度', fontsize=15)
plt.ylabel(u'错误率', fontsize=15)
plt.title(u'决策树深度与过拟合', fontsize=17)
plt.grid(True)
plt.show()
|
[
"keepingoner@163.com"
] |
keepingoner@163.com
|
c1cf80839a68e4e308b1e1494623900cb368e997
|
a483ec5f451f4d6a4455626d3b5e7493f2c44052
|
/sophomore/基于神经网络的人体行为姿态识别/SRTP-201909069-项目成果/3DCNN-Behavior Recognition/model_detail.py
|
01973fa68d8799c8feb8ab796528dfc78cb54d7a
|
[] |
no_license
|
wjialei/DuringColloge
|
8e62587da265e2cf512c6a90990cf41c3beccf40
|
d899cfb9954e1f8e10dd806d0e0428dfae18ad9b
|
refs/heads/master
| 2020-12-13T05:47:05.759575
| 2020-01-18T05:40:41
| 2020-01-18T05:40:41
| 234,324,600
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,007
|
py
|
import h5py
#打开文件
f1 = h5py.File('.\\data\\checkpoint\\inception.001-2.03.hdf5','r')
f2 = h5py.File('.\\data\\checkpoint\\inception.002-1.77.hdf5','r')
f3 = h5py.File('.\\data\\checkpoint\\inception.005-1.64.hdf5','r')
f4 = h5py.File('.\\data\\checkpoint\\inception.015-1.58.hdf5','r')
f5 = h5py.File('.\\data\\checkpoint\\inception.016-1.55.hdf5','r')
f6 = h5py.File('.\\data\\checkpoint\\inception.022-1.39.hdf5','r')
f7 = h5py.File('.\\data\\checkpoint\\inception.029-1.39.hdf5','r')
with h5py.File('.\\data\\checkpoint\\inception.001-2.03.hdf5','r') as f:
def prtname(name):
print(name)
f.visit(prtname)
subgroup = f['subgroup']
subsub = subgroup['subsub']
data1 = subgroup['data1']
data2 = subsub['data2']
# print(dset)
print("data1 name:",data1.name,"data2 name",data2.name)
print("data1 shape:",data1.shape,"data2 shape:",data2.shape)
print("data1 dtype:",data1.dtype,"data2 dtype:",data2.dtype)
print("data1:",data1[:],"data2:",data2[:])
|
[
"Jialei_w@163.com"
] |
Jialei_w@163.com
|
ed80bc16dfb22d80024c3f7ec27c4aa5882763ad
|
9a8746628978eb368da0c4aea7da9ad0818b0c75
|
/StreamLitLibrary.py
|
1f14d7959d1c59d1779fd1855080ec70de2cf10a
|
[] |
no_license
|
jayreds73/Heroku-Deployment
|
67999469165f4f9bee91252aef34a54de518b51b
|
bfa893a19004c1418e8a0ac72d1e522b03ae790f
|
refs/heads/master
| 2022-12-15T03:17:28.359818
| 2020-09-09T20:10:53
| 2020-09-09T20:10:53
| 294,200,010
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,725
|
py
|
#from flask import Flask,request
import pandas as pd
import pickle as pkl
import streamlit as st
# Run this - "streamlit run filename" in command prompt
# load the model at the start of the app
pickle_in = open('model.pkl','rb')
model_iris = pkl.load(pickle_in)
def get_description(int_code):
if (int_code==0):
desc = 'Setosa'
elif (int_code == 1):
desc = 'Versicolour'
else:
desc = 'Virginica'
return desc
def Welcome():
return "Hello world, Jayanth"
def iris_predict(sl,sw,pl,pw):
prediction = model_iris.predict([[sl,sw,pl,pw]])
return "The prediction is: " + get_description(int(prediction[0]))
def main():
#Gives Title
st.title("Iris Data Set Prediction")
# Creates look and feel -- see more for html
html_temp = """
<div style="background-color:tomato;padding:10px">
<h2 style="color:white;text-align:center;">Streamlit Rendered App for IRIS prediction </h2>
</div>
"""
# Executes HTML
st.markdown(html_temp, unsafe_allow_html=True)
sl = float(st.text_input('Sepal Length','1.25'))
sw = float(st.text_input('Sepal Width','2.25'))
pl = float(st.text_input('Petal Length','3.25'))
pw = float(st.text_input('Petal Width','4.8'))
prediction = ""
# create button
if st.button("Predict"):
prediction = iris_predict(sl,sw,pl,pw)
st.success(prediction)
# prediction_t = ""
# if st.button("Test"):
# prediction_t = 'Pass'
# st.success(prediction_t)
# if st.button("About"):
# st.text("Lets LEarn")
# st.text("Built with Streamlit")
if(__name__=='__main__'):
main()
|
[
"noreply@github.com"
] |
noreply@github.com
|
4b6ede8c383df1a8a4b9930c4be790de6c73ee5f
|
e8811aaa2f4344de1f835b4e72b26f9dc5eb9f47
|
/http1/predict.py
|
387a14987524c7b3f34297d2b3c2f10ab83bf5f8
|
[] |
no_license
|
anandanthony/anandpyfunc
|
93bae5ce03b8638e2b11a2fcb540592578e1ba84
|
6f7d497676492c1ced88a84aae8aad60a7907f9f
|
refs/heads/master
| 2022-11-23T07:56:30.060122
| 2020-08-03T21:23:27
| 2020-08-03T21:23:27
| 284,809,547
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,585
|
py
|
# General libs
from datetime import datetime
import logging
import os
# Image processing libs
import tensorflow as tf
import tensorflow_addons as tfa
import cv2
from PIL import Image
# Additional libs
import numpy as np
from urllib.request import urlopen
import requests
from io import BytesIO
from numpy import genfromtxt
from scipy.spatial import distance
scriptpath = os.path.abspath(__file__)
dir = os.path.dirname(scriptpath)
image = os.path.join(dir, 'file.jpeg')
model_weights = os.path.join(dir, 'keras.h5')
dataset = os.path.join(dir, 'dataset.tsv')
classes = os.path.join(dir, 'classes.txt')
database = genfromtxt(dataset, delimiter='\t')
classes_list = genfromtxt(classes, delimiter='\n',dtype=None)
size = 480
def exctract_roi(image): # Exctract object from an image
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (5,5), 0)
canny = cv2.Canny(blurred, 0,100, 3)
kernel = np.ones((5,5),np.uint8)
dilate = cv2.dilate(canny, kernel, iterations=1)
cnts = cv2.findContours(dilate, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
image_number = 0
for c in cnts:
x,y,w,h = cv2.boundingRect(c)
ROI = image[y:y+h, x:x+w]
return ROI
def url_to_image(url): # Download image from URL and open in opencv
#image = urlopen(url)
#image = np.asarray(bytearray(image.read()), dtype="uint8")
#image = cv2.imdecode(image, cv2.IMREAD_COLOR)
response = requests.get(url)
image = Image.open(BytesIO(response.content))
image = np.array(image)
return image
def image_preprocessing(image_url):
image = url_to_image(image_url)
image = exctract_roi(image)
image = np.array(image)
tensor = tf.convert_to_tensor(image)
tensor = tf.image.convert_image_dtype(tensor, tf.float32)
tensor = tf.image.resize(tensor, (size,size))
return tf.expand_dims(tensor,0)
def result_post_processing(result):
distances = []
for i in database:
dist = distance.euclidean(i,result)
distances.append(dist)
id = np.take(classes_list,np.argmin(distances))
return id.decode("utf-8")
def predict_image_from_url(image_url):
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(filters=64, kernel_size=7, strides=2, padding='valid', activation='relu', input_shape=(480,480,3)),
tf.keras.layers.MaxPooling2D(pool_size=2),
tf.keras.layers.Dropout(0.3),
tf.keras.layers.Conv2D(filters=192, kernel_size=3, strides=1, padding='valid', activation='relu'),
tf.keras.layers.MaxPooling2D(pool_size=2),
tf.keras.layers.Conv2D(filters=192, kernel_size=3, strides=1, padding='valid', activation='relu'),
tf.keras.layers.MaxPooling2D(pool_size=2),
#tf.keras.layers.Conv2D(filters=384, kernel_size=3, strides=1, padding='valid', activation='relu'),
tf.keras.layers.Conv2D(filters=256, kernel_size=3, strides=1, padding='valid', activation='relu'),
#tf.keras.layers.Conv2D(filters=256, kernel_size=3, strides=1, padding='valid', activation='relu'),
tf.keras.layers.MaxPooling2D(pool_size=2),
tf.keras.layers.Dropout(0.3),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation=None),
tf.keras.layers.Lambda(lambda x: tf.math.l2_normalize(x, axis=1))
])
model.load_weights(model_weights)
model.compile(loss=tfa.losses.TripletSemiHardLoss(margin = 4.0))
result = model.predict(image_preprocessing(image_url))
mongoid = result_post_processing(result)
return mongoid
|
[
"anfranci@microsoft.com"
] |
anfranci@microsoft.com
|
c7fe334fcb246d191e7db56465b77abd86f98947
|
83412c7effe6a47e423fb55541d768d1bb308de0
|
/HW1/code/src/titanic.py
|
90f303852d55635b43e5ce3301f6e9b3daf995ab
|
[] |
no_license
|
atibhav21/CSM146
|
7be041ae972ebd3a78c01e2a98075f66f875e9f4
|
ab806ec5fe23a7b36e503b304445b0efe83f12d5
|
refs/heads/master
| 2021-09-09T05:52:27.155543
| 2018-03-14T02:52:46
| 2018-03-14T02:52:46
| 117,149,527
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,174
|
py
|
"""
Author : Yi-Chieh Wu, Sriram Sankararaman
Description : Titanic
"""
# Use only the provided packages!
import math
import csv
from util import *
from collections import Counter
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.cross_validation import cross_val_score
from sklearn.cross_validation import train_test_split
from sklearn import metrics
import operator
######################################################################
# classes
######################################################################
class Classifier(object) :
"""
Classifier interface.
"""
def fit(self, X, y):
raise NotImplementedError()
def predict(self, X):
raise NotImplementedError()
class MajorityVoteClassifier(Classifier) :
def __init__(self) :
"""
A classifier that always predicts the majority class.
Attributes
--------------------
prediction_ -- majority class
"""
self.prediction_ = None
def fit(self, X, y) :
"""
Build a majority vote classifier from the training set (X, y).
Parameters
--------------------
X -- numpy array of shape (n,d), samples
y -- numpy array of shape (n,), target classes
Returns
--------------------
self -- an instance of self
"""
majority_val = Counter(y).most_common(1)[0][0]
self.prediction_ = majority_val
return self
def predict(self, X) :
"""
Predict class values.
Parameters
--------------------
X -- numpy array of shape (n,d), samples
Returns
--------------------
y -- numpy array of shape (n,), predicted classes
"""
if self.prediction_ is None :
raise Exception("Classifier not initialized. Perform a fit first.")
n,d = X.shape
y = [self.prediction_] * n
return y
class RandomClassifier(Classifier) :
def __init__(self) :
"""
A classifier that predicts according to the distribution of the classes.
Attributes
--------------------
probabilities_ -- class distribution dict (key = class, val = probability of class)
"""
self.probabilities_ = None
def fit(self, X, y) :
"""
Build a random classifier from the training set (X, y).
Parameters
--------------------
X -- numpy array of shape (n,d), samples
y -- numpy array of shape (n,), target classes
Returns
--------------------
self -- an instance of self
"""
### ========== TODO : START ========== ###
# part b: set self.probabilities_ according to the training set
classes = np.unique(y);
self.probabilities_ = {};
total_number = y.shape[0]
for i in classes:
self.probabilities_[int(i)] = len(np.where(y == i)[0])/float(total_number)
### ========== TODO : END ========== ###
return self
def predict(self, X, seed=1234) :
"""
Predict class values.
Parameters
--------------------
X -- numpy array of shape (n,d), samples
seed -- integer, random seed
Returns
--------------------
y -- numpy array of shape (n,), predicted classes
"""
if self.probabilities_ is None :
raise Exception("Classifier not initialized. Perform a fit first.")
np.random.seed(seed)
### ========== TODO : START ========== ###
# part b: predict the class for each test example
# hint: use np.random.choice (be careful of the parameters)
#print(self.probabilities_)
n = X.shape[0]
y = np.random.choice(2, n, p=[self.probabilities_[0], self.probabilities_[1]])
### ========== TODO : END ========== ###
return y
######################################################################
# functions
######################################################################
def plot_histograms(X, y, Xnames, yname) :
n,d = X.shape # n = number of examples, d = number of features
fig = plt.figure(figsize=(20,15))
nrow = 3; ncol = 3
for i in range(d) :
fig.add_subplot (3,3,i)
data, bins, align, labels = plot_histogram(X[:,i], y, Xname=Xnames[i], yname=yname, show = False)
n, bins, patches = plt.hist(data, bins=bins, align=align, alpha=0.5, label=labels)
plt.xlabel(Xnames[i])
plt.ylabel('Frequency')
plt.legend() #plt.legend(loc='upper left')
plt.savefig ('histograms.pdf')
def plot_histogram(X, y, Xname, yname, show = True) :
"""
Plots histogram of values in X grouped by y.
Parameters
--------------------
X -- numpy array of shape (n,d), feature values
y -- numpy array of shape (n,), target classes
Xname -- string, name of feature
yname -- string, name of target
"""
# set up data for plotting
targets = sorted(set(y))
data = []; labels = []
for target in targets :
features = [X[i] for i in range(len(y)) if y[i] == target]
data.append(features)
labels.append('%s = %s' % (yname, target))
# set up histogram bins
features = set(X)
nfeatures = len(features)
test_range = list(range(int(math.floor(min(features))), int(math.ceil(max(features)))+1))
if nfeatures < 10 and sorted(features) == test_range:
bins = test_range + [test_range[-1] + 1] # add last bin
align = 'left'
else :
bins = 10
align = 'mid'
# plot
if show == True:
plt.figure()
n, bins, patches = plt.hist(data, bins=bins, align=align, alpha=0.5, label=labels)
plt.xlabel(Xname)
plt.ylabel('Frequency')
plt.legend() #plt.legend(loc='upper left')
#plt.show()
return data, bins, align, labels
def error(clf, X, y, ntrials=100, test_size=0.2) :
"""
Computes the classifier error over a random split of the data,
averaged over ntrials runs.
Parameters
--------------------
clf -- classifier
X -- numpy array of shape (n,d), features values
y -- numpy array of shape (n,), target classes
ntrials -- integer, number of trials
Returns
--------------------
train_error -- float, training error
test_error -- float, test error
"""
### ========== TODO : START ========== ###
# compute cross-validation error over ntrials
# hint: use train_test_split (be careful of the parameters)
train_error = 0
test_error = 0
for i in range(1, ntrials+1):
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, train_size=0.8, random_state=i)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_train) # compute the training error
train_error += 1 - metrics.accuracy_score(y_train, y_pred, normalize=True)
y_pred = clf.predict(X_test) # compute the test error
test_error += 1 - metrics.accuracy_score(y_test, y_pred, normalize=True)
train_error = float(train_error) / ntrials
test_error = float(test_error) / ntrials # average the errors out
### ========== TODO : END ========== ###
return train_error, test_error
def write_predictions(y_pred, filename, yname=None) :
"""Write out predictions to csv file."""
out = open(filename, 'wb')
f = csv.writer(out)
if yname :
f.writerow([yname])
f.writerows(list(zip(y_pred)))
out.close()
######################################################################
# main
######################################################################
def main():
# load Titanic dataset
titanic = load_data("titanic_train.csv", header=1, predict_col=0)
X = titanic.X; Xnames = titanic.Xnames
y = titanic.y; yname = titanic.yname
n,d = X.shape # n = number of examples, d = number of features
#========================================
# part a: plot histograms of each feature
print('Plotting...')
for i in range(d) :
plot_histogram(X[:,i], y, Xname=Xnames[i], yname=yname)
plt.close('all')
#========================================
# train Majority Vote classifier on data
print('Classifying using Majority Vote...')
clf = MajorityVoteClassifier() # create MajorityVote classifier, which includes all model parameters
clf.fit(X, y) # fit training data using the classifier
y_pred = clf.predict(X) # take the classifier and run it on the training data
train_error = 1 - metrics.accuracy_score(y, y_pred, normalize=True)
print('\t-- training error: %.3f' % train_error)
majority_vote_error = train_error
### ========== TODO : START ========== ###
# part b: evaluate training error of Random classifier
print('Classifying using Random...')
rand_clf = RandomClassifier()
rand_clf.fit(X, y)
y_pred = rand_clf.predict(X)
train_error = 1 - metrics.accuracy_score(y, y_pred, normalize=True)
print('\t-- training error: %.3f' % train_error)
random_clf_error = train_error;
### ========== TODO : END ========== ###
### ========== TODO : START ========== ###
# part c: evaluate training error of Decision Tree classifier
# use criterion of "entropy" for Information gain
print('Classifying using Decision Tree...')
decision_tree_clf = DecisionTreeClassifier(criterion="entropy")
decision_tree_clf.fit(X, y)
y_pred = decision_tree_clf.predict(X)
train_error = 1 - metrics.accuracy_score(y, y_pred, normalize=True)
print('\t-- training error: %.3f' % train_error)
### ========== TODO : END ========== ###
# note: uncomment out the following lines to output the Decision Tree graph
"""
# save the classifier -- requires GraphViz and pydot
import StringIO, pydot
from sklearn import tree
dot_data = StringIO.StringIO()
tree.export_graphviz(clf, out_file=dot_data,
feature_names=Xnames)
graph = pydot.graph_from_dot_data(dot_data.getvalue())
graph.write_pdf("dtree.pdf")
"""
### ========== TODO : START ========== ###
# part d: evaluate training error of k-Nearest Neighbors classifier
# use k = 3, 5, 7 for n_neighbors
print('Classifying using k-Nearest Neighbors...')
for k in (3, 5, 7):
k_Nearest_clf = KNeighborsClassifier(n_neighbors=k)
k_Nearest_clf.fit(X, y)
y_pred = k_Nearest_clf.predict(X)
train_error = 1 - metrics.accuracy_score(y, y_pred, normalize=True)
print('\t-- training error (k = %d): %.3f' % (k, train_error))
# Redeclare it for part e
k_Nearest_clf = KNeighborsClassifier(n_neighbors=5)
### ========== TODO : END ========== ###
### ========== TODO : START ========== ###
# part e: use cross-validation to compute average training and test error of classifiers
print('Investigating various classifiers...')
for classifier in (clf, rand_clf, decision_tree_clf, k_Nearest_clf):
train_error, test_error = error(classifier, X, y)
print('\t--Train Error:%.3f Test Error%.3f Classifier: %s' % (train_error, test_error, classifier.__class__.__name__ ))
### ========== TODO : END ========== ###
### ========== TODO : START ========== ###
# part f: use 10-fold cross-validation to find the best value of k for k-Nearest Neighbors classifier
print('Finding the best k for KNeighbors classifier...')
x_points = []
y_points = []
for k in range(1, 50, 2):
x_points.append(k)
k_Nearest_clf = KNeighborsClassifier(n_neighbors=k)
k_y = 1 - cross_val_score(k_Nearest_clf, X, y, scoring='accuracy', cv=10)
y_points.append(sum(k_y) / len(k_y))
plt.plot(x_points, y_points)
plt.xlabel('Number of neighbors')
plt.ylabel('Average Error')
plt.show()
### ========== TODO : END ========== ###
### ========== TODO : START ========== ###
# part g: investigate decision tree classifier with various depths
print('Investigating depths...')
x_points = []
y_test_points = []
y_train_points = []
for k in range(1, 21):
decision_tree_clf = DecisionTreeClassifier(criterion='entropy', max_depth=k)
train_error, test_error = error(decision_tree_clf, X, y)
x_points.append(k)
y_test_points.append(test_error)
y_train_points.append(train_error)
plt.plot(x_points, y_train_points, label='Training Error')
plt.plot(x_points, y_test_points, label='Test Error')
plt.plot(x_points, [majority_vote_error] * len(x_points), label='Majority Vote Classifier error')
plt.plot(x_points, [random_clf_error] * len(x_points), label='Random Classifier error')
plt.legend(loc='upper right')
plt.xlabel('Depth')
plt.ylabel('Average Error')
plt.ylim(ymax=0.7)
plt.show()
#plt.close('all')
### ========== TODO : END ========== ###
### ========== TODO : START ========== ###
# part h: investigate Decision Tree and k-Nearest Neighbors classifier with various training set sizes
print('Investigating training set sizes...')
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, train_size=0.9, random_state=42)
fraction_percentages = [x / 10.0 for x in range(1, 11)]
fraction_indices = [int(i * X_train.shape[0]) for i in fraction_percentages]
k_Nearest_clf = KNeighborsClassifier(n_neighbors=7)
decision_tree_clf = DecisionTreeClassifier(criterion='entropy', max_depth=6)
x_points = fraction_percentages[:]
y_points_decision_train = []
y_points_knn_train = []
y_points_decision_test =[]
y_points_knn_test = []
for end_index in fraction_indices:
X_train_set = X_train[:end_index+1]
y_train_set = y_train[:end_index+1]
k_Nearest_clf.fit(X_train_set, y_train_set)
decision_tree_clf.fit(X_train_set, y_train_set)
y_pred_knn_train = k_Nearest_clf.predict(X_train_set)
y_pred_decision_train = decision_tree_clf.predict(X_train_set)
y_pred_knn_test = k_Nearest_clf.predict(X_test)
y_pred_decision_test = decision_tree_clf.predict(X_test)
train_error_knn = 1 - metrics.accuracy_score(y_train_set, y_pred_knn_train, normalize=True)
test_error_knn = 1 - metrics.accuracy_score(y_test, y_pred_knn_test, normalize=True)
train_error_decision = 1 - metrics.accuracy_score(y_train_set, y_pred_decision_train, normalize=True)
test_error_decision = 1 - metrics.accuracy_score(y_test, y_pred_decision_test, normalize=True)
y_points_decision_train.append(train_error_decision)
y_points_decision_test.append(test_error_decision)
y_points_knn_train.append(train_error_knn)
y_points_knn_test.append(test_error_knn)
plt.plot(x_points, y_points_decision_train, label="Decision Tree Training Error")
plt.plot(x_points, y_points_decision_test, label="Decision Tree Test Error")
plt.plot(x_points, y_points_knn_train, label="KNearest Training Error")
plt.plot(x_points, y_points_knn_test, label="KNearest Test Error")
plt.plot(x_points, [majority_vote_error] * len(x_points), label='Majority Vote Classifier error')
plt.plot(x_points, [random_clf_error] * len(x_points), label='Random Classifier error')
plt.ylim(ymax=0.8)
plt.legend(loc='upper right')
plt.xlabel('Fraction of Training Data')
plt.ylabel('Error')
plt.show()
### ========== TODO : END ========== ###
print('Done')
if __name__ == "__main__":
main()
|
[
"atibhav.mittal6@gmail.com"
] |
atibhav.mittal6@gmail.com
|
30dd3f1c4df8cb5dbb131dfd4d1780d86003bd26
|
c1d4f80fbf94fc1cb075d04284cc726f354cc586
|
/diffOfSquares.py
|
de5cfad77b93706c58f2823a072b50e608d6464d
|
[
"MIT"
] |
permissive
|
azizamukhamedova/Python-Thunder
|
2af6ec8d9be07d03d51a53430449c41ec6f21a0d
|
820b943c4884dad4a247c7480b86c057a1508509
|
refs/heads/master
| 2022-12-24T10:05:06.879056
| 2020-10-01T10:04:30
| 2020-10-01T10:04:30
| 300,230,819
| 2
| 0
|
MIT
| 2020-10-01T09:59:00
| 2020-10-01T09:58:59
| null |
UTF-8
|
Python
| false
| false
| 204
|
py
|
'''
Probem Task : This program returns the difference in areas of two squares.
Problem Link: https://edabit.com/challenge/NNhkGocuPMcryW7GP
'''
def square_areas_difference(r):
return (2*r)**2-(r**2)*2
|
[
"noreply@github.com"
] |
noreply@github.com
|
d3980370454d25fd98274030292d5c8ed674a8f7
|
4116790ee11de30eade92cabd5cddcb0978eb2c9
|
/employeerest/company/company/views.py
|
bce69d005ffe90441d1cc9375a9ca66db31e094a
|
[] |
no_license
|
Joel-hanson/djangomytutorial
|
4e8aadbccea831bb8f7e4cf0de3d35e4bfeaadc0
|
93d2925ae1a8d5f5dcec03e0c85b3ff0e492d125
|
refs/heads/master
| 2021-08-30T10:48:42.207229
| 2017-12-17T14:43:34
| 2017-12-17T14:43:34
| 108,539,027
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 267
|
py
|
from django.views.generic import TemplateView
class TestPage(TemplateView):
template_name = 'firstapp/test.html'
class ThanksPage(TemplateView):
template_name = 'firstapp/thanks.html'
class HomePage(TemplateView):
template_name = 'firstapp/index.html'
|
[
"joelhanson025@gmail.com"
] |
joelhanson025@gmail.com
|
b75e556ea1b40b23295bd4f418cd7787509b3aab
|
a1af08e61db95281579497b2a3f05535c60b0c84
|
/Algorithms and data stractures in python/lesson2/les_2_task_4.py
|
64d41bb9219825990ead01dc96a40b2c5a4aa986
|
[] |
no_license
|
kargamant/education
|
4c6d4bd419094eb175a73bb3888b8638b7d42af4
|
21e346a3eedf342efaae3636f24385b97713c06d
|
refs/heads/master
| 2020-07-03T10:50:32.152899
| 2020-06-19T10:33:36
| 2020-06-19T10:33:36
| 201,883,903
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 397
|
py
|
#4. Найти сумму n элементов следующего ряда чисел: 1, -0.5, 0.25, -0.125,…
#Количество элементов (n) вводится с клавиатуры.
n = int(input('Введите количество элементов>>>'))
b1 = 1
q = -0.5
s = 0
for i in range(n):
s += b1 * pow(q, i)
print(f'Сумма {n} элементов: {s}')
|
[
"noreply@github.com"
] |
noreply@github.com
|
14349834269be1eb71541b0b9ba7c9447bd65661
|
6f9a5717fed38b0a79c399f7e5da55c6a461de6d
|
/Baekjoon/TreeDiameter.py
|
403cdb3ebca8db3488b4692be26727c85cc6920a
|
[] |
no_license
|
Alfred-Walker/pythonps
|
d4d3b0f7fe93c138d02651e05ca5165825676a5e
|
81ef8c712c36aa83d1c53aa50886eb845378d035
|
refs/heads/master
| 2022-04-16T21:34:39.316565
| 2020-04-10T07:50:46
| 2020-04-10T07:50:46
| 254,570,527
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,267
|
py
|
# 트리의 지름이란, 트리에서 임의의 두 점 사이의 거리 중 가장 긴 것을 말한다.
# 트리의 지름을 구하는 프로그램을 작성하시오.
#
# 입력
# 트리가 입력으로 주어진다.
# 먼저 첫 번째 줄에서는 트리의 정점의 개수 V가 주어지고 (2≤V≤100,000)
# 둘째 줄부터 V개의 줄에 걸쳐 간선의 정보가 다음과 같이 주어진다.
# (정점 번호는 1부터 V까지 매겨져 있다고 생각한다)
#
# 먼저 정점 번호가 주어지고, 이어서 연결된 간선의 정보를 의미하는 정수가 두 개씩 주어지는데,
# 하나는 정점번호, 다른 하나는 그 정점까지의 거리이다.
# 예를 들어 네 번째 줄의 경우 정점 3은 정점 1과 거리가 2인 간선으로 연결되어 있고,
# 정점 4와는 거리가 3인 간선으로 연결되어 있는 것을 보여준다.
# 각 줄의 마지막에는 -1이 입력으로 주어진다. 주어지는 거리는 모두 10,000 이하의 자연수이다.
#
# 출력
# 첫째 줄에 트리의 지름을 출력한다.
import sys
sys.setrecursionlimit(10**6)
V = int(sys.stdin.readline().rstrip())
connected = [[]for _ in range(V + 1)]
visited = [False for _ in range(V + 1)]
# 입력 처리
for i in range(1, V + 1):
edges = list(map(int, sys.stdin.readline().rstrip().split()))
for j in range(1, len(edges)-1, 2):
connected[edges[0]].append((edges[j], edges[j + 1]))
# 오입력 주의: connected[i].append((edges[j], edges[j + 1]))
# v로부터 연결된 정점 중 방문하지 않은 곳들에 대하여 재귀.
# dist로 누적 거리를 체크
def dfs(v, dist):
ret = (v, dist)
visited[v] = True
for v_d in connected[v]:
if visited[v_d[0]]:
continue
next_search = dfs(v_d[0], dist + v_d[1])
if ret[1] < next_search[1]:
ret = next_search
return ret
# 첫번째 dfs: 임의의 점(1)로부터 가장 먼 곳과 거리 구함
first_dfs = dfs(1, 0)
far_v = first_dfs[0]
# 다시 dfs 하기 위해 visited 초기화
visited = [False for _ in range(V + 1)]
# 두번째 dfs: 앞서 구한 1로부터 먼 곳에서 다시 가장 먼 곳을 찾음
second_dfs = dfs(far_v, 0)
far_v = second_dfs[1]
print(far_v)
|
[
"studio.alfred.walker@gmail.com"
] |
studio.alfred.walker@gmail.com
|
921140b83f3882c30e59c2c40f58e83ac495e3d1
|
6d8817b7a81c1f65c10ada235edde0f2f37f2f01
|
/test/123.py
|
f08af05041c3702e27f66562116cd8474a131365
|
[] |
no_license
|
angel681166/LawTech
|
63ca49aa90a53ee3b70bcf3e4ae761dd53e8d19b
|
539ef05ed6a32f3c2b551301b51608ec8b340fc3
|
refs/heads/main
| 2023-01-08T08:38:58.220828
| 2020-11-08T17:24:49
| 2020-11-08T17:24:49
| 304,250,683
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15
|
py
|
hello law tech
|
[
"noreply@github.com"
] |
noreply@github.com
|
58e85669af64469dd275db9980940709213e68dc
|
2b0bbe4977893f5368eab4eb540c451863bf3e24
|
/tools/get_rates.py
|
859067050df1ba382330b78a4b3c31dbd3c0b1d1
|
[
"MIT"
] |
permissive
|
hildogjr/KiCost
|
a1f0b91ec1c8ba6b8a86f2108a38742c26ff112f
|
197a61d90a24ab21049824ad1e5638ac9c4420ac
|
refs/heads/master
| 2023-06-25T16:51:23.137235
| 2023-06-22T16:24:05
| 2023-06-22T16:24:05
| 38,410,608
| 143
| 20
|
MIT
| 2023-04-10T13:38:40
| 2015-07-02T04:09:13
|
Python
|
UTF-8
|
Python
| false
| false
| 846
|
py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# Copyright (c) 2021 Salvador E. Tropea
# Copyright (c) 2021 Instituto Nacional de Tecnología Industrial
# License: Apache 2.0
# Project: KiCost
"""
Tool to generate the default exchange rates.
Should be used before each release.
"""
import sys
import os
sys.path.insert(0, os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from kicost.currency_converter.download_rates import download_rates # noqa: E402
date, rates = download_rates()
assert date
print('#!/usr/bin/python3')
print('# -*- coding: utf-8 -*-')
print("default_date = '{}'".format(date))
first = True
for cur, rate in rates.items():
cont = "'"+cur+"': "+str(rate)+","
if first:
first = False
print("default_rates = {"+cont)
else:
print(' '+cont)
print(" }")
|
[
"salvador@inti.gob.ar"
] |
salvador@inti.gob.ar
|
b0af44c71bc504fdf4b8d8d7454978a75e49f783
|
532989f8d1efeed25c954e801802ecaa2038ce52
|
/movies_genre_model.py
|
e69f92c1675cac0c640013a8ce0741cd722a74b1
|
[] |
no_license
|
NaHut/Project3
|
465016ab8204abd47e419b229f20b0977e3c323e
|
586fe26c98117b6b3cc183a3a22e51663c3300cc
|
refs/heads/master
| 2020-03-23T10:13:57.741738
| 2018-07-19T01:57:45
| 2018-07-19T01:57:45
| 141,432,593
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,314
|
py
|
import os
import time
import keras
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Dense, Dropout, Flatten
from keras.models import Sequential
from keras.models import Model, Input, Sequential
from keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D, Activation, Average, Dropout
from keras.utils import to_categorical
from keras.losses import categorical_crossentropy
from keras.callbacks import ModelCheckpoint, TensorBoard
from keras.optimizers import Adam
from keras.datasets import cifar10
def build(ratio, epochs,batch_size,
x_train=None, y_train=None, x_validation=None, y_validation=None):
print(' x_train shape: ', x_train.shape)
print(' y_train shape: ', y_train.shape)
print(x_train.shape[0], 'train samples')
print(x_validation.shape[0], 'validation samples')
print(' x_valditaion shape: ', x_validation.shape)
print(' y_validation shape: ', y_validation.shape)
#build model
num_classes = len(y_train[0])
model = Sequential([
Conv2D(24, (5, 5), padding='same', input_shape=x_train.shape[1:], activation='relu'),
Conv2D(24, (5, 5), activation='relu'),
MaxPooling2D(pool_size=(2, 2)),
Dropout(0.25),
Conv2D(48, (5, 5), padding='same', activation='relu'),
Conv2D(48, (5, 5), activation='relu'),
MaxPooling2D(pool_size=(2, 2)),
Dropout(0.25),
Flatten(),
Dense(256, activation='relu'),
Dropout(0.5),
Dense(num_classes, activation='sigmoid')
])
model2 = Sequential([
Conv2D(24, (3, 3), input_shape=x_train.shape[1:], activation='relu', padding='same'),
Conv2D(24, (3, 3), activation='relu', padding='same'),
Conv2D(24, (3, 3), activation='relu', padding='same'),
MaxPooling2D(pool_size=(3, 3), strides=2),
Conv2D(48, (3, 3), activation='relu', padding='same'),
Conv2D(48, (3, 3), activation='relu', padding='same'),
Conv2D(48, (3, 3), activation='relu', padding='same'),
MaxPooling2D(pool_size=(3, 3), strides=2),
Conv2D(48, (3, 3), activation='relu', padding='same'),
Conv2D(48, (3, 3), activation='relu'),
Conv2D(48, (3, 3)),
Flatten(),
Dense(128, activation='relu'),
Dropout(0.5),
Dense(num_classes, activation='sigmoid')
])
# lr = 1e-4 -> 0.024
opt = keras.optimizers.rmsprop(lr=1e-4, decay=1e-6)
model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])
print(model.summary())
# create save_dir
save_dir = os.path.join(os.getcwd(), 'saved_models')
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, validation_data=(x_validation, y_validation))
model_file_name = 'genres' + '_tmp.h5'
model_path = os.path.join(save_dir, model_file_name)
keras.callbacks.ModelCheckpoint(model_path,
monitor='val_loss',
verbose=0,
save_best_only=False,
save_weights_only=False,
mode='auto',
period=1)
model.save(model_path)
|
[
"whow1111@naver.com"
] |
whow1111@naver.com
|
8f1edffb45c18305e72ee6fa487d900f2792d2a0
|
850493f3c5c9bf3805a04547e5fe6131f9b2f906
|
/teashop_server/app_models/user_type_model.py
|
f92d2c67a6fafe6f95723a87d5641e3f9d5ced12
|
[] |
no_license
|
blazej700/ibd
|
558f6952d41966fe0e40d74356bcc9f2483add2c
|
9e7a5d84f05b3be0a0cbdc247867e179962db02a
|
refs/heads/main
| 2023-02-01T01:19:10.364462
| 2020-12-12T09:46:04
| 2020-12-12T09:46:04
| 311,062,158
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 353
|
py
|
from app import db
from flask_restful_swagger_3 import Schema
class UserTypeModel(db.Model):
__tablename__ = 'user_type'
id = db.Column(db.Integer, primary_key=True)
user_type_name = db.Column(db.String(20))
def serialize(self):
return {
'id': self.id,
'user_type_name' : self.user_type_name,
}
|
[
"blazej700@gmail.com"
] |
blazej700@gmail.com
|
46efb78a2acbcd58008ea1bc5f50998401a4a474
|
557c9d016edc56c72dac6a0b3093d195b10b1a6c
|
/examples/ex33_common.py
|
cd535acf255a5de421d64d16d452c5e8f5303ca4
|
[
"BSD-3-Clause"
] |
permissive
|
mfem/PyMFEM
|
dae79fb8aa9ca983780c789b2b6e70e894644573
|
37084af6798ea8bb8f03b2648863befabee829bb
|
refs/heads/master
| 2023-08-31T07:04:28.522945
| 2023-07-18T02:21:57
| 2023-07-18T02:21:57
| 83,574,932
| 154
| 54
|
BSD-3-Clause
| 2023-08-15T01:40:47
| 2017-03-01T16:16:48
|
SWIG
|
UTF-8
|
Python
| false
| false
| 8,356
|
py
|
'''
Ex33_common.py
This is a translation of MFEM ex33.hpp. LAPACK call, mfem.Vector,
mfem.DenseMatrix are replaced using numpy
(Implementation of the AAA algorithm)
Here, we implement the triple-A algorithm [1] for the rational approximation
of complex-valued functions,
p(z)/q(z) ≈ f(z).
In this file, we always assume f(z) = z^{-α}. The triple-A algorithm
provides a robust, accurate approximation in rational barycentric form.
This representation must be transformed into a partial fraction
representation in order to be used to solve a spectral FPDE.
More specifically, we first expand the numerator in terms of the zeros of
the rational approximation,
p(z) ∝ Π_i (z - z_i),
and expand the denominator in terms of the poles of the rational
approximation,
q(z) ∝ Π_i (z - p_i).
We then use these zeros and poles to derive the partial fraction expansion
f(z) ≈ p(z)/q(z) = Σ_i c_i / (z - p_i).
[1] Nakatsukasa, Y., Sète, O., & Trefethen, L. N. (2018). The AAA algorithm
for rational approximation. SIAM Journal on Scientific Computing, 40(3),
A1494-A1522.
'''
import numpy as np
import scipy
from scipy.linalg import eig
import mfem
if mfem.mfem_mode == 'parallel':
import mfem.par as mfem
from mfem.par import intArray, doubleArray
else:
import mfem.ser as mfem
from mfem.ser import intArray, doubleArray
from sys import float_info
eps = float_info.min
def RationalApproximation_AAA(val, pt, tol, max_order):
'''
RationalApproximation_AAA: compute the rational approximation (RA) of data
val at the set of points pt
in:
val Vector of data values
pt Vector of sample points
tol Relative tolerance
max_order Maximum number of terms (order) of the RA
out:
z Support points of the RA in rational barycentric form
f Data values at support points at z
w Weights of the RA in rational barycentric form
See pg. A1501 of Nakatsukasa et al. [1].
'''
# number of sample points
size = len(val) # .Size()
assert len(pt) == size, "size mismatch"
# Initializations
J = list(range(size))
c_i = []
# mean of the value vector
mean_val = np.mean(val)
R = np.array([mean_val]*size)
z = []
f = []
w = []
for k in range(max_order):
# select next support point
idx = 0
tmp_max = 0
idx = np.argmax(np.abs(val-R))
# Append support points and data values
z.append(pt[idx])
f.append(val[idx])
# Update index vector
J.remove(idx)
# next column in Cauchy matrix
C_tmp = [(1.0/(pp-pt[idx]) if pp != pt[idx] else np.inf) for pp in pt]
c_i = np.hstack((c_i, C_tmp))
h_C = len(C_tmp)
w_C = k+1
# note: tranpose is necessary due to the difference of column-major
# and raw-major of matrix
C = c_i.reshape(w_C, h_C).transpose()
Ctemp = C.copy()
Ctemp = Ctemp*(np.atleast_2d(1/val)).transpose() # InvLeftScaling
Ctemp = Ctemp*f # RgithScaling
A = C - Ctemp
A = A*(np.atleast_2d(val)).transpose() # LeftScaling
h_Am = len(J)
w_Am = A.shape[1]
Am = np.zeros((h_Am, w_Am))
for i in range(h_Am):
ii = J[i]
for j in range(w_Am):
Am[i, j] = A[ii, j]
u, s, vh = np.linalg.svd(Am)
w = vh[k, :]
N = C.dot(w*np.array(f))
D = C.dot(w)
R = val.copy()
for i, ii in enumerate(J):
R[ii] = N[ii]/D[ii]
verr = val - R
if np.max(verr) <= tol*max(val):
break
return z, f, w
def ComputePolesAndZeros(z, f, w):
'''
ComputePolesAndZeros: compute the poles and zeros of the
rational function f(z) = C p(z)/q(z) from its ration barycentric form.
in:
z Support points in rational barycentric form
f Data values at support points @a z
w Weights in rational barycentric form
out:
poles Array of poles (roots of p(z))
zeros Array of zeros (roots of q(z))
scale Scaling constant in f(z) = C p(z)/q(z)
See pg. A1501 of Nakatsukasa et al. [1].
'''
# Initialization
poles = []
zeros = []
# Compute the poles
m = len(w)
B = np.zeros((m+1, m+1))
E = np.zeros((m+1, m+1))
for i in range(m+1):
if i == 0:
continue
B[i, i] = 1.
E[0, i] = w[i-1]
E[i, 0] = 1.
E[i, i] = z[i-1]
# real part of eigen value
evalues = eig(E, B, left=False, right=False).real
new_poles = evalues[np.isfinite(evalues)]
poles.extend(new_poles)
B = np.zeros((m+1, m+1))
E = np.zeros((m+1, m+1))
for i in range(m+1):
if i == 0:
continue
B[i, i] = 1.
E[0, i] = w[i-1] * f[i-1]
E[i, 0] = 1.
E[i, i] = z[i-1]
# real part of eigen value
evalues = eig(E, B, left=False, right=False).real
new_zeros = evalues[np.isfinite(evalues)]
zeros.extend(new_zeros)
scale = np.dot(w, f)/np.sum(w)
return poles, zeros, scale
def PartialFractionExpansion(scale, poles, zeros):
'''
PartialFractionExpansion: compute the partial fraction expansion of the
rational function f(z) = Σ_i c_i / (z - p_i) from its poles and zeros
@a zeros [in].
in:
poles Array of poles (same as p_i above)
zeros Array of zeros
scale Scaling constant
out:
coeffs Coefficients c_i
'''
# Note: C p(z)/q(z) = Σ_i c_i / (z - p_i) results in an system of equations
# where the N unknowns are the coefficients c_i. After multiplying the
# system with q(z), the coefficients c_i can be computed analytically by
# choosing N values for z. Choosing z_j = = p_j diagonalizes the system and
# one can obtain an analytic form for the c_i coefficients. The result is
# implemented in the code block below.
psize = len(poles)
zsize = len(zeros)
coeffs = [scale] * psize
for i in range(psize):
tmp_numer = 1.0
for j in range(zsize):
tmp_numer *= poles[i]-zeros[j]
tmp_denom = 1.0
for k in range(psize):
if k != i:
tmp_denom *= poles[i]-poles[k]
coeffs[i] *= tmp_numer / tmp_denom
return coeffs
def ComputePartialFractionApproximation(alpha,
lmax=1000.,
tol=1e-10,
npoints=1000,
max_order=100):
'''
ComputePartialFractionApproximation: compute a rational approximation (RA)
in partial fraction form, e.g., f(z) ≈ Σ_i c_i / (z - p_i), from sampled
values of the function f(z) = z^{-a}, 0 < a < 1.
in:
alpha Exponent a in f(z) = z^-a
lmax,npoints f(z) is uniformly sampled @a npoints times in the
interval [ 0, @a lmax ]
tol Relative tolerance
max_order Maximum number of terms (order) of the RA
out:
coeffs Coefficients c_i
poles Poles p_i
'''
assert alpha < 1., "alpha must be less than 1"
assert alpha > 0., "alpha must be greater than 0"
assert npoints > 2, "npoints must be greater than 2"
assert lmax > 0, "lmin must be greater than 0"
assert tol > 0, "tol must be greater than 0"
dx = lmax / (npoints-1)
x = np.arange(npoints)*dx
val = x**(1-alpha)
# Apply triple-A algorithm to f(x) = x^{1-a}
z, f, w = RationalApproximation_AAA(val, # mfem.Vector(val),
x, # mfem.Vector(x),
tol, max_order)
# Compute poles and zeros for RA of f(x) = x^{1-a}
poles, zeros, scale = ComputePolesAndZeros(z, f, w)
# Remove the zero at x=0, thus, delivering a RA for f(x) = x^{-a}
zeros.remove(0.0)
# Compute partial fraction approximation of f(x) = x^{-a}
coeffs = PartialFractionExpansion(scale, poles, zeros)
return poles, coeffs
|
[
"shiraiwa@princeton.edu"
] |
shiraiwa@princeton.edu
|
966693712e3410280164b684654420510e60bfac
|
07cc188b2e10f204cd0191aa3c28ca058b863973
|
/film_crawler/film_crawler/film_crawler/graph_constructor.py
|
0be10c6e74fbbf8cce417100cccac2e37d73d3fa
|
[] |
no_license
|
WriteAfterReed/web_scraper_wiki_film_proj
|
b80bfc18a14832c2bf01100e9eee56375c9f6ac6
|
1a55e570b54700ef55bb6d73cf76d47456952f3e
|
refs/heads/master
| 2022-12-28T17:21:46.354362
| 2020-10-06T03:18:06
| 2020-10-06T03:18:06
| 301,601,514
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,098
|
py
|
import json
class Actor:
def __init__(self, name="", year=1900, films=[], income=0):
self.status = "actor"
self.name = name
self.year = year
self.films = films
self.income = income
def __str__(self):
return self.name
def get_name(self):
return self.name
def set_name(self, name):
self.name = name
def get_year(self):
return self.year
def set_year(self, year):
self.year = year
def get_age(self):
return 2019 - self.year
def get_films(self):
return self.films
def add_film(self, film):
self.films.append(film)
def get_income(self):
return self.income
def add_income(self, income):
self.income += income
def get_status(self):
return self.status
class Film:
def __init__(self, name="", year=1900, cast=[], income=0):
self.status = "film"
self.name = name
self.year = year
self.cast = cast
self.income = income
def __str__(self):
return self.name
def get_status(self):
return self.status
def get_name(self):
return self.name
def set_name(self, name):
self.name = name
def get_year(self):
return self.year
def set_year(self, year):
self.year = year
def get_cast(self):
return self.cast
def add_cast(self, actor):
self.cast.append(actor)
def get_income(self):
return self.income
def set_income(self, income):
self.income = income
class Graph:
def __init__(self):
self.datum = {}
def new_vert(self, node, entry=[]):
self.datum[node] = entry
def update_vert(self, node, entry):
obj = self.datum[node]
if entry not in obj:
obj.append(entry)
self.datum[node] = obj
def get_verts(self):
return self.datum.keys()
def get_edges(self):
return self.datum.values()
def write_to_file(self):
with open('result.json', 'w') as fp:
json.dump(self.datum, fp)
def read_from_json(self, path):
dataset = None
with open(path, 'r') as json_file:
temp = json.load(json_file)
self.datum = temp[0]
count = 0
print(type(self.datum))
print(self.datum.keys())
for each in self.datum:
if count > 100:
break
print("")
print(each)
count += 1
def query_one(target, mapper):
obj = mapper[target]
gross = obj.get_income()
print("Exec query 1...")
print("For film: " + target + " the gross was: " + str(gross))
print("Fin query 1 \n")
def query_two(target, mapper):
obj = mapper[target]
projects = obj.get_films()
print("Exec query 2...")
print("For Actor: " + target + " they have worked on: " + str(projects))
print("Fin query 2 \n")
def query_three(target, mapper):
obj = mapper[target]
team = obj.get_cast()
print("Exec query 3...")
print("For film: " + target + " the cast was: " + str(team))
print("Fin query 3 \n")
def query_four(actor_map):
payload = []
for name in actor_map.keys():
obj = actor_map[name]
worth = obj.get_income()
payload.append((name, worth))
sorted_by_second = sorted(payload, key=lambda tup: tup[1])
sorted_by_second.reverse()
print("Exec query 4...")
print("The top grossing actors are: ")
for i in range(0, 5):
entry = sorted_by_second[i]
print(str(entry[0]) + " is worth " + str(entry[1]))
print("Fin query 4 \n")
def query_five(actor_map):
payload = []
for name in actor_map.keys():
obj = actor_map[name]
age = obj.get_age()
payload.append((name, age))
sorted_by_second = sorted(payload, key=lambda tup: tup[1])
sorted_by_second.reverse()
print("Exec query 5...")
print("The top oldest actors are: ")
for i in range(0, 5):
entry = sorted_by_second[i]
print(str(entry[0]) + " is age " + str(entry[1]))
print("Fin query 5 \n")
def query_six(film_map, target_year):
payload = []
print("Exec query 6...")
print("For the year " + str(target_year) + " films are...")
for movie in film_map.keys():
obj = film_map[movie]
film_year = obj.get_year()
if film_year == target_year:
print("Flim: " + movie)
print("Fin query 6 \n")
def query_seven(actor_map, target_year):
payload = []
print("Exec query 7...")
print("For the year " + str(target_year) + " actors born are...")
for person in actor_map.keys():
obj = actor_map[person]
birth_year = obj.get_year()
if birth_year == target_year:
print("Actor: " + person)
print("Fin query 7 \n")
actor_list = []
actor_dict = {}
film_list = []
film_dict = {}
graph = Graph()
graph.read_from_json("data.json")
def test_first_week():
# dataset = None
# with open('../out.json') as json_file:
# dataset = json.load(json_file)
graph.read_from_json("data.json")
#
# for each in dataset:
# # This parses current Json for Actors
# if each['page_type'] == 'actor':
# year = each['actor_year']
# name = each['name']
# films = []
# income = 0
# if (2019 - year) > 100:
# continue
# if name not in actor_list:
# actor_list.append(name)
# new_actor = Actor(name, year, films, income)
# actor_dict[name] = new_actor
#
# for each in dataset:
#
# # This parses current Json for films
# if each['page_type'] == "film":
# year = each['film_year']
# film_name = each['name']
# cast = each['film_cast']
# income = each['film_value']
# if film_name not in film_list:
# film_list.append(film_name)
# new_film = Film(film_name, year, cast, income)
# for person in cast:
# if person in actor_dict.keys():
# income = income // 2
# actor_obj = actor_dict[person]
# actor_obj.add_income(income)
# actor_obj.add_film(film_name)
#
# film_dict[film_name] = new_film
#
# for each in actor_list:
# entry = actor_dict[each]
# film_edges = entry.get_films()
# graph.new_vert(each, film_edges)
#
# for each in film_list:
# entry = film_dict[each]
# actor_edges = entry.get_cast()
# graph.new_vert(each, actor_edges)
#
# query_one("Drive (2011 film)", film_dict)
# query_two("Michael Caine", actor_dict)
# query_three("Drive (2011 film)", film_dict)
# query_four(actor_dict)
# query_five(actor_dict)
# query_six(film_dict, 2012)
# query_seven(actor_dict, 1964)
#
# graph.write_to_file()
|
[
"mloviska15@gmail.com"
] |
mloviska15@gmail.com
|
c4e3d2c0198df15fcb9635b190ff937c0a238289
|
5c35be01a7f659bb080544c5e62faa22307f01da
|
/pr412-my-charity-change-backend-python/migrations/versions/28f74cb10e35_add_send_tax_reciept_flasg_to_customer.py
|
383f316a63eb39b17dddf20b2043b814522640bd
|
[] |
no_license
|
dragonmaster-alpha/Charity-App
|
3b3932c0a05cc21b9d36bd2952673028cc56a11a
|
b66e2bc74fc15ca2a9c70f5261d05f5b9d17b451
|
refs/heads/master
| 2023-08-24T02:21:15.406784
| 2021-10-29T06:18:20
| 2021-10-29T06:18:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 701
|
py
|
"""Add send_tax_reciept flasg to customer
Revision ID: 28f74cb10e35
Revises: f3bdf790db9b
Create Date: 2020-08-10 14:53:05.732193
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '28f74cb10e35'
down_revision = 'f3bdf790db9b'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('Customer', sa.Column('send_tax_reciept', sa.Boolean(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('Customer', 'send_tax_reciept')
# ### end Alembic commands ###
|
[
"78602151+dragonmaster-alpha@users.noreply.github.com"
] |
78602151+dragonmaster-alpha@users.noreply.github.com
|
eca0f1c99ec492e8b3c6a27b02d6557f8aa3ae1b
|
84c2fa4aed9094b5ec3cc612d28980afe5d42d34
|
/leetcode/day11_24.py
|
a6e997a5b64e66b53c1eb8fab9ec554c45fcd371
|
[] |
no_license
|
cyg2695249540/generatewework
|
186831a1b5c788e9b99e90d1a08bf6a8638131ce
|
cd01b0fc4a69cc2f2ed4c109afdf8771bee3bffd
|
refs/heads/master
| 2023-01-20T17:13:13.186034
| 2020-12-01T12:05:01
| 2020-12-01T12:05:01
| 310,201,995
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 925
|
py
|
# !/usr/bin/env Python3
# -*- coding: utf-8 -*-
# @FILE : day11_24.py
# @Author : Pluto.
# @Time : 2020/11/24 16:06
"""
exp:66. 加一
给定一个由 整数 组成的 非空 数组所表示的非负整数,在该数的基础上加一。
最高位数字存放在数组的首位, 数组中每个元素只存储单个数字。
你可以假设除了整数 0 之外,这个整数不会以零开头。
示例1:
输入:digits = [1,2,3]
输出:[1,2,4]
解释:输入数组表示数字 123。
示例2:
输入:digits = [4,3,2,1]
输出:[4,3,2,2]
解释:输入数组表示数字 4321。
示例 3:
输入:digits = [0]
输出:[1]
提示:
1 <= digits.length <= 100
0 <= digits[i] <= 9
"""
def plusOne():
s="".join(str(x) for x in digits)
ss=str(int(s)+1)
r=[int(x) for x in ss]
return [0]*(len(digits)-len(r))+r
if __name__ == '__main__':
digits = [0, 0, 0]
print(plusOne())
|
[
"2695249540@qq.com"
] |
2695249540@qq.com
|
5605956e0c0bed78aa6a229a16b89113b010781d
|
6a5c92bf039d768ab2e455e4d0652c2bd847a5ca
|
/backend/backend/settings.py
|
53e3b1db47ed90d2cc34db359f977b74796f5081
|
[] |
no_license
|
sahil143/feedback-form
|
a2bf39162f165b7ca7b11f0a793a3dd21f8c5a98
|
933ab90fee267b0ca88cee9d3363529e5e67992a
|
refs/heads/master
| 2023-05-07T16:48:15.743272
| 2021-05-31T06:56:57
| 2021-05-31T06:56:57
| 372,308,186
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,455
|
py
|
"""
Django settings for backend project.
Generated by 'django-admin startproject' using Django 3.2.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-3n4io1wg%c@6qpda_z*gfzxbc=_w)92h$zj5t(nq4_r@!)d*hn'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'corsheaders',
'rest_framework',
'feedback_form'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
]
ROOT_URLCONF = 'backend.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'backend.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
CORS_ORIGIN_WHITELIST = [
'http://localhost:3000'
]
|
[
"sahilbudhwar143@gmail.com"
] |
sahilbudhwar143@gmail.com
|
4b06fc12b6a9688a025351ad8cbe58b26b21f7c5
|
754b824173f3e5b1cef69890deaea71c2ad0b84d
|
/Linear Regression.py
|
3d28b9e8ff31b546708213c7417bde49d549cbcf
|
[] |
no_license
|
yanivmm/python
|
5a8e5c8a59c27a280a92cc7b8617e73b9d1c9a6e
|
e27f4d0716525e9c4ee6c8541cdb048670f5db22
|
refs/heads/master
| 2023-02-03T11:49:35.837406
| 2020-12-24T10:53:10
| 2020-12-24T10:53:10
| 257,415,163
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,241
|
py
|
#import
import pandas as pd
import numpy as np
import datetime
import matplotlib.pyplot as plt
import seaborn as sns
# read file
path = r'C:\Users\97250\Desktop\studied\R ,python\ניתוח מידע\Ecommerce Customers.csv'
cust = pd.read_csv(path)
# explore data
# A try to search the most affecting column on the Yearly Amount Spent and other
sns.jointplot(x='Time on Website',y='Yearly Amount Spent',data=cust)
sns.jointplot(x='Time on App',y='Yearly Amount Spent',data=cust)
sns.lmplot(x='Length of Membership',y='Yearly Amount Spent',data = cust)
#pairplot
sns.pairplot(cust)
### Training and Testing Data
from sklearn.model_selection import train_test_split
X=cust[['Avg. Session Length', 'Time on App','Time on Website', 'Length of Membership']]
y=cust['Yearly Amount Spent']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=101)
# model
from sklearn.linear_model import LinearRegression
lm =LinearRegression().fit(X_train,y_train)
lm.coef_
prediction = lm.predict(X_test)
# visual plot of the differences between y_test and prediction
sns.scatterplot(x = y_test,y = prediction, hue =(abs(prediction-y_test)))
# numerical evaluation
MAE = np.mean(abs(prediction-y_test))
MSE = np.mean((prediction-y_test)**2)
RMSE= np.sqrt(np.mean((prediction-y_test)**2))
print('\n')
print('MAE: '+str(MAE),'MSE: '+str(MSE),'RMSE: '+str(RMSE),sep = '\n')
# plot of the residuals of the y_test and prediction
residuals = (y_test-prediction)
plt.figure(figsize=(12,8))
sns.distplot(residuals,bins = 60,color='red')
# it's a normal distribution therefore it's a fine model.!
#creating a dataframe of the coefficients and its values
coefficient = lm.coef_
col = ['Avg. Session Length', 'Time on App','Time on Website', 'Length of Membership']
coefficient_data = pd.DataFrame(coefficient,col,columns = ['coefficient'])
coefficient_data = coefficient_data.sort_values('coefficient',ascending=False)
# visual affect
coefficient_data.plot(kind ='bar',figsize=(12,8),color='gold',fontsize = 18)
plt.title('\n Coefficients and its values\n',fontsize=34)
# only two most affecting coefficients
print('\n')
for i in range(2):
print(coefficient_data.index[i])
|
[
"noreply@github.com"
] |
noreply@github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.