blob_id
stringlengths
40
40
directory_id
stringlengths
40
40
path
stringlengths
3
281
content_id
stringlengths
40
40
detected_licenses
listlengths
0
57
license_type
stringclasses
2 values
repo_name
stringlengths
6
116
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
branch_name
stringclasses
313 values
visit_date
timestamp[us]
revision_date
timestamp[us]
committer_date
timestamp[us]
github_id
int64
18.2k
668M
star_events_count
int64
0
102k
fork_events_count
int64
0
38.2k
gha_license_id
stringclasses
17 values
gha_event_created_at
timestamp[us]
gha_created_at
timestamp[us]
gha_language
stringclasses
107 values
src_encoding
stringclasses
20 values
language
stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
4
6.02M
extension
stringclasses
78 values
content
stringlengths
2
6.02M
authors
listlengths
1
1
author
stringlengths
0
175
dce628934a13bb8243e2323ffafa2f00f3564e81
f318601d474d11eea375e24e8da825544ed91637
/prefija.py
230d46b526ca30ad83ef7086e579cf57a78e2630
[]
no_license
josefm09/triplos
484f1f428af3b4ad2b4a2cf90a9ee5087a0754e8
cb5c65cb9373938dcee729e1abf3a6b64455fa03
refs/heads/master
2020-03-15T19:27:47.105932
2018-05-06T05:22:32
2018-05-06T05:22:32
132,309,212
0
0
null
null
null
null
UTF-8
Python
false
false
2,222
py
# -*- coding: utf-8 -*- import sys import re #example (6+4)*8(7+4) print('Declare una expresion') #ex = str(input()) #se almacena la expresion ex = '(6+4)*8(7+4)' valid = False #se inicializa una variable en falso, luego se utilizara pila = [] #se inicializa la pila resultado = [] #se inicializa el arreglo de resultados orderRes = '' #se inicializa el string del resultado ordenado def prefija(value): #se declara la funcion prefija que lleva por parametro un valor for i in value: #se inicia un ciclo con el tamaño de la expresion if re.match('[a-zA-Z0-9-/?+?*?(?)]',i): #se pregunta si es una expresion correcta con expresiones regjulares valid = True #se cambia el valor a verdadero para seguir else: #si no termina el programa print('ingrese una expresion valida') sys.exit() if valid:#si es valido continua la funcion value = value[::-1] #se invierte el orden de la expresion for j in value: #se inicia un ciclo para recorrer la expresion if j in '+-*/)': #se buscan los operadores en la expresion pila.append(j) #si se encuentran se agregan a la pila if re.match('[a-zA-Z0-9]',j): #se buscan los numeros en la expresionm y las letras resultado.append(j) #se agregan al resultado if j == '(': #se busca en cierre de parentesis while pila: #se inicia un ciclo que mientras haya elementos en la pila resultado.append(pila.pop()) #se añadiran los elementos ultimos de la pila al resultado for n in resultado: #se recorre el resultado para eliminar los parentesis de cierre if n == ')':#si se encuentra resultado.remove(n)#se elimina prefija(ex)# se ejecuta la funcion for n in reversed(resultado): #se recorre el resultado invertido orderRes += n#para añadir los valores a un string triplo(orderRes)# se ejecuta la función para imprimir los triplos def triplo(value): #se declara la función que regresa las variables intermedias print('Notacion infija ', ex)#se imprime la expresion original print('Notacion prefija ',orderRes)#se imprime la expresion en prefija
[ "josecarlosfloresmoran@hotmail.com" ]
josecarlosfloresmoran@hotmail.com
0a1a637d989435418064ced834271773be9a47dc
943c9b8ee8414a467c6c20cdb517b3aaeb365fbc
/old_data/code/RunTest2.py
b9cd98009207fed3cb7a1cb01f7a7360eabf27f2
[]
no_license
Aralas/cross-reference_network
b735d9e7abf162cd36b8169c24726be25baa8ccf
04ed853895445548ddaa5b49bec2123ec43cc4fd
refs/heads/master
2020-04-08T12:26:03.225760
2019-03-08T08:20:56
2019-03-08T08:20:56
159,347,229
0
1
null
null
null
null
UTF-8
Python
false
false
4,750
py
# -*- coding:utf-8 -*- """ @author:Xu Jingyi @file:RunTest2.py.py @time:2018/12/2804:06 """ import tensorflow as tf import numpy as np import random import FactoryClass dataset = 'MNIST' model_type = 'CNN' seed = 10 # initialization = 'xavier' model_architecture = [[32, 5, 5], [64, 5, 5], [1500]] noise_level = 1 augmentation = True dropout = 0.5 learning_rate = 0.001 batch_size = 128 section_num = 20 epochs_for_binary = 5 epochs_for_multiple = 1 data_size = 1000 # first_merged_section = 5 # first_update_section = 50 # first_value_to_label_section = 10 # update_threshold = [0.8, 0.6] def randomly_sample_binary_data(x, y, data_size, label): indeces_positive = list(np.where(y[:, label] == 1)[0]) indeces_negative = set(range(len(y))) - set(indeces_positive) index_train = random.sample(indeces_positive, data_size) + random.sample(indeces_negative, data_size) x_small = x[index_train] y_small = np.array([1] * data_size + [0] * data_size).reshape(2 * data_size, 1) shuffle_index = np.arange(len(x_small)) random.shuffle(shuffle_index) x_small = x_small[shuffle_index] y_small = y_small[shuffle_index] return x_small, y_small def multi_label_to_binary_label(y, label): y_hat = np.zeros((len(y), 1)) indeces_positive = list(np.where(y[:, label] == 1)[0]) y_hat[indeces_positive] = 1 return y_hat def generate_probability_matrix(x, binary_classifier_list): num_classes = len(binary_classifier_list) num_sample = len(x) result = np.zeros((num_sample, num_classes)) for label in range(num_classes): classifier = binary_classifier_list[label] prediction = classifier.prediction(x).reshape((num_sample,)) result[:, label] = prediction with tf.Session() as sess: probability_matrix = sess.run(tf.nn.softmax(result)) return probability_matrix def run_cross_reference(): data_chooser = FactoryClass.ChooseDataset(dataset, seed, noise_level, augmentation) data_object = data_chooser.data_object x_train, y_train, y_train_orig, x_test, y_test = data_object.x_train, data_object.y_train, \ data_object.y_train_orig, data_object.x_test, data_object.y_test num_classes = data_object.num_classes input_size = data_object.input_size binary_classifier_list = [] binary_model_object = FactoryClass.ChooseNetworkCreator(model_type, model_architecture, input_size, learning_rate, dropout, 2) multi_model_object = FactoryClass.ChooseNetworkCreator(model_type, model_architecture, input_size, learning_rate, dropout, num_classes) record_file = 'test2_1/' + dataset + '.txt' record = open(record_file, 'a+') record.write('model architecture: ' + str(model_architecture) + '\n') record.write('noise level: ' + str(noise_level) + '\n') record.write('augmentation: ' + str(augmentation) + '\n') record.write('learning rate: ' + str(learning_rate) + '\n') record.write('batch size: ' + str(batch_size) + '\n') record.write('epoch for binary classifiers: ' + str(epochs_for_binary) + ', multi-classifier: ' + str( epochs_for_multiple) + '\n') record.write('data size: ' + str(data_size) + '\n') record.write('section: ' + str(section_num) + '\n') for label in range(num_classes): binary_classifier_list.append(binary_model_object.choose_network_creator()) multi_classifier = multi_model_object.choose_network_creator() for section in range(section_num): for label in range(num_classes): classifier = binary_classifier_list[label] x, y = randomly_sample_binary_data(x_train, y_train, data_size, label) classifier.train_model(x, y, batch_size, epochs_for_binary) loss_train, accuracy_train = classifier.evaluate_model(x, y) record.write(str(section) + '-th section, ' + str(label) + '-th classifier, loss: ' + str(loss_train) + ', train accuracy: ' + str(accuracy_train) + '\n') record.flush() for epoch in range(epochs_for_multiple): new_y = generate_probability_matrix(x_train, binary_classifier_list) multi_classifier.train_model(x_train, new_y, batch_size, epochs=1) loss_test, accuracy_test = multi_classifier.evaluate_model(x_test, y_test) record.write(str(section) + '-th section, ' + str(epoch) + '-th epoch, test accuracy:' + str( accuracy_test) + '\n') record.flush() record.write('*' * 30 + '\n') record.close() for noise_level in [0.5]: run_cross_reference()
[ "14300180119@fudan.edu.cn" ]
14300180119@fudan.edu.cn
aaf684914f88ee47e002fe6283aad1328b10f3ad
6cce023315d4083c7df0fcdeea2a037b00818878
/py-files/data_setup.py
3a1efa85492db400854022be0137e9d4defafa58
[]
no_license
Limmen/Distributed_ML
e02e865a123e552d3795c76a4a0846f2da6f3a55
d5b65a0bcb89182e3ac773b0a3cec46625dabccb
refs/heads/master
2021-05-09T03:52:41.530823
2018-01-28T12:21:47
2018-01-28T12:21:47
119,255,519
5
1
null
null
null
null
UTF-8
Python
false
false
2,735
py
import argparse import pyspark from pyspark.sql.functions import udf from pyspark.sql.types import * import tensorflow as tf import pandas as pd import numpy as np SEQ_LABELS_TRAIN = "data/y_train.csv" SEQ_FEATURES_TRAIN = "data/x_train.csv" SEQ_LABELS_TEST = "data/y_test.csv" SEQ_FEATURES_TEST = "data/x_test.csv" conf = pyspark.SparkConf() conf = conf.setAppName("har_data_setup").set("spark.hadoop.validateOutputSpecs", "false") sc = pyspark.SparkContext(conf=conf) sql = pyspark.SQLContext(sc) CLEANED_DATA_PATH = "./cleaned_data" def read_raw_data(sql): seq_features_train_raw = sql.read.format("com.databricks.spark.csv").options(header="false").load(SEQ_FEATURES_TRAIN) seq_labels_train_raw = sql.read.format("com.databricks.spark.csv").options(header="false").load(SEQ_LABELS_TRAIN) seq_features_test_raw = sql.read.format("com.databricks.spark.csv").options(header="false").load(SEQ_FEATURES_TEST) seq_labels_test_raw = sql.read.format("com.databricks.spark.csv").options(header="false").load(SEQ_LABELS_TEST) return seq_features_train_raw,seq_labels_train_raw, seq_features_test_raw, seq_labels_test_raw seq_features_train_raw, seq_labels_train_raw,seq_features_test_raw,seq_labels_test_raw = read_raw_data(sql) features_train_size = seq_features_train_raw.count() labels_train_size = seq_labels_train_raw.count() features_test_size = seq_features_test_raw.count() labels_test_size = seq_labels_test_raw.count() print("train feat size: {0}, train label size: {1}, test feat size {2}, test label size {3}".format(features_train_size, labels_train_size, features_test_size, labels_test_size)) seq_labels_test_raw.printSchema classes = seq_labels_train_raw.unionAll(seq_labels_test_raw).select("_c0").distinct().rdd.map(lambda row: row._c0).zipWithIndex().collectAsMap() seq_labels_train_clean = seq_labels_train_raw.select("_c0").rdd.map(lambda row: classes[row._c0]) seq_labels_test_clean = seq_labels_test_raw.select("_c0").rdd.map(lambda row: classes[row._c0]) labels_train_np = seq_labels_train_clean.collect() labels_test_np = seq_labels_test_clean.collect() np.savetxt(CLEANED_DATA_PATH + "/train/labels/y_train.csv", np.array(labels_train_np).astype(int), fmt='%i', delimiter=",") np.savetxt(CLEANED_DATA_PATH + "/test/labels/y_test.csv", np.array(labels_test_np).astype(int), fmt='%i', delimiter=",") np.savetxt(CLEANED_DATA_PATH + "/classes/classes.csv", np.array([[k,v] for k,v in classes.items()]),fmt="%s", delimiter=",") np.savetxt(CLEANED_DATA_PATH + "/size/sizes.csv", np.array([["features_train_size", features_train_size], ["labels_train_size", labels_train_size], ["features_test_size", features_test_size], ["labels_test_size", labels_test_size]]), fmt="%s", delimiter=",")
[ "kimham@kth.se" ]
kimham@kth.se
6f37bf00f359e16e59af581cd68fe789c123ffa9
fbc9f9088f636d71119fca87c2120f533e10668d
/Book_store/settings/base.py
eed8356f89434a96c8696d7152bf2bb7955b548e
[]
no_license
mastercsay2511/BookStoreV1
e726c18499482ef1bcd12acf4575856b68a536a6
5ee23b35a18d0ab4d7810bacf482e7f9f7a21768
refs/heads/master
2023-05-31T04:58:36.545403
2020-07-18T05:33:39
2020-07-18T05:33:39
null
0
0
null
null
null
null
UTF-8
Python
false
false
4,214
py
import os from decouple import config # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname( os.path.dirname (os.path.abspath(__file__)))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = config('SECRET_KEY') # SECURITY WARNING: don't run with ³ug turned on in production! # DEBUG = True # ALLOWED_HOSTS = [] # INTERNAL_IPS = [ # '127.0.0.1', # ] # Application definition INSTALLED_APPS = [ # local apps "library.apps.LibraryConfig", "books.apps.BooksConfig", "pages.apps.PagesConfig", "users.apps.UsersConfig", 'core', # Third party apps 'django_countries', 'crispy_forms', # 'allauth', # 'allauth.account', # default 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', # added # 'django.contrib.sites', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'Book_store.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [os.path.join(BASE_DIR, 'templates')], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'Book_store.wsgi.application' # Database # https://docs.djangoproject.com/en/3.0/ref/settings/#databases # Password validation # https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/3.0/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/3.0/howto/static-files/ STATIC_URL = '/static/' STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static_in_env'), ] VENV_PATH = os.path.dirname(BASE_DIR) STATIC_ROOT = os.path.join(BASE_DIR, 'static_root') # optional STATICFILES_FINDERS = [ "django.contrib.staticfiles.finders.FileSystemFinder", "django.contrib.staticfiles.finders.AppDirectoriesFinder", ] MEDIA_URL = '/media/' MEDIA_ROOT = os.path.join(BASE_DIR, 'media') AUTH_USER_MODEL = "users.PersoUser" LOGIN_URL = "login" LOGIN_REDIRECT_URL = "home" LOGOUT_REDIRECT_URL = "home" # Gmail mailing method EMAIL_BACKEND = "django.core.mail.backends.smtp.EmailBackend" # if smtp not confugured yet EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend" EMAIL_HOST = "smtp.gmail.com" EMAIL_PORT = 587 EMAIL_USE_TLS = True EMAIL_HOST_USER = os.environ.get("GMAIL_U") EMAIL_HOST_PASSWORD = os.environ.get("GMAIL_P") CRISPY_TEMPLATE_PACK = 'bootstrap4' # django allauth config # SITE_ID = 1 # AUTHENTICATIONS_BACKENDS = [ # 'django.contrib.auth.backends.ModelBackend', # 'allauth.account.auth_backends.AuthenticationBackend', # ] # ACCOUNT_FORMS = {'signup': 'Book_store.forms.PersoUserRegisterForm'}
[ "ffoot9898@protonmail.com" ]
ffoot9898@protonmail.com
7a8252f05c1ee87e900b5ed853a3cabc43688b96
74081581575e80b2b0f6b75ba912d58ea4f37ac6
/maskrcnn_benchmark/modeling/detector/detectors.py
35064d2f9992fd2c2e08d4a29ad31d1e5a229f8f
[]
no_license
youngfly11/LCMCG-PyTorch
5f6b9f231613b86ac7b250ca0f34229402e1615e
e95299b9a9f1b13e21750ef0dcde0941d703d009
refs/heads/master
2021-10-25T19:29:12.967318
2021-10-25T03:35:14
2021-10-25T03:35:14
221,908,808
56
12
null
null
null
null
UTF-8
Python
false
false
419
py
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. from .generalized_rcnn import GeneralizedRCNN from .generalized_rcnn_det import GeneralizedRCNNDet _DETECTION_META_ARCHITECTURES = {"GeneralizedRCNN": GeneralizedRCNN, "GeneralizedRCNNDet": GeneralizedRCNNDet} def build_detection_model(cfg): meta_arch = _DETECTION_META_ARCHITECTURES[cfg.MODEL.META_ARCHITECTURE] return meta_arch(cfg)
[ "liuyf3@shanghaitech.edu.cn" ]
liuyf3@shanghaitech.edu.cn
67360e7af6f9cc8a5f230d48a2d706a49749ca35
fc9175c91bf99ce697e3bed46a7cc0db3343283e
/11Documentos/exemplo02.py
2418a4d8567d9e2c793a8faa0d452a2f60fe5918
[]
no_license
cerchiariluiza/CodigosScrapsCompiladosPython
27758df20243146d29bcdc612d58082af450d4d1
4b1b7429068fe3072e7bed3f720e8519f9d60629
refs/heads/main
2023-04-01T10:33:47.642816
2021-04-10T15:07:57
2021-04-10T15:07:57
356,616,553
0
0
null
null
null
null
UTF-8
Python
false
false
1,175
py
from urllib.request import urlopen from bs4 import BeautifulSoup import re import string def limpar_texto(texto): texto = texto.strip() texto_limpo = [] # Trocando um ou mais caracteres de nova linha (enter) por um espaço. texto = re.sub("\n+", " ", texto) # Trocando um ou mais espaços por um espaço. texto = re.sub(" +", " ", texto) # Remover os caracteres de controle texto = texto.replace(u'\xa0', u'') # Remover números entre colchetes (citações Wikipedia) texto = re.sub("\[[0-9]*\]", "", texto) texto = texto.split(" ") for item in texto: item = item.strip() # string.punctuation == '!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~' # Removendo caracteres de pontuação antes e depois da string item = item.strip(string.punctuation) if len(item) > 1 or (item.lower() == 'a' or item.lower() == 'e' or item.lower() == 'o'): texto_limpo.append(item) return texto_limpo html = urlopen("https://pt.wikipedia.org/wiki/Python") bsObj = BeautifulSoup(html) conteudo = bsObj.find("div", {"id":"mw-content-text"}).get_text() conteudo = limpar_texto(conteudo) print(conteudo)
[ "Lab@NOT-000-008.nscyber.local" ]
Lab@NOT-000-008.nscyber.local
e1e458abdbc5777af32bf1194e4add3db39fd867
36b9fa9f2d8ff655546a33cb47ddacd009bc00c9
/autogalaxy/profiles/light/linear/__init__.py
bb275334df92a6bf4b34d18f21e6aa123ae5dc62
[ "MIT" ]
permissive
Jammy2211/PyAutoGalaxy
67b76968b8516309b2ebdbff7affd5c1923cf0b1
d1a2e400b7ac984a21d972f54e419d8783342454
refs/heads/main
2023-08-19T01:00:22.320073
2023-08-17T15:39:46
2023-08-17T15:39:46
216,190,501
27
9
MIT
2023-09-13T14:07:43
2019-10-19T10:45:44
Python
UTF-8
Python
false
false
401
py
from .abstract import LightProfile, LightProfileLinear, LightProfileLinearObjFuncList from .gaussian import Gaussian, GaussianSph from .moffat import Moffat from .sersic import Sersic, SersicSph from .exponential import Exponential, ExponentialSph from .dev_vaucouleurs import DevVaucouleurs, DevVaucouleursSph from .sersic_core import SersicCore from .exponential_core import ExponentialCore
[ "james.w.nightingale@durham.ac.uk" ]
james.w.nightingale@durham.ac.uk
8e8f9d73ce6f9472f45639a730ffebcd0b78cfa8
3d078ad4b348fb3b967adcb819cd39410c00960a
/88lcm.py
12a493281bf2e2baf786d1aeccd360ec177ab889
[]
no_license
Dhandapani299/python
7275af2cf3a112d8cf930748c932ad61cb3dcbfb
ed8672a83f3810241138375eafd7420a24603734
refs/heads/master
2020-05-22T05:09:15.924855
2019-05-12T13:40:40
2019-05-12T13:40:40
186,230,941
0
0
null
null
null
null
UTF-8
Python
false
false
268
py
def lcm(m,n): if m>n: grater = m else: greater = n while(True): if((greater % m == 0) and (greater % n == 0)): lcm = greater break greater += 1 return lcm num 1 = int(input()) num 2 = int(input()) print(lcm(num,num2))
[ "noreply@github.com" ]
noreply@github.com
a3abadf3e5c58d3196f6ac02b55a07ff35093bd4
16c77266859989d156fe3f4d0ce3a37a1898ad38
/ml/sk/__init__.py
2e9e9ccc69e32e7f29ce91893b4883b61084bc5d
[ "MIT" ]
permissive
SRHerzog/ut
92620e66be2ea9707d9cd3cf390179326ed2eefe
894bd5607eb76676aaea7a37ed8a91b5fb5e805e
refs/heads/master
2021-06-30T19:15:46.131299
2017-09-15T20:47:35
2017-09-15T20:47:35
103,696,926
0
0
null
2017-09-15T20:08:10
2017-09-15T20:08:10
null
UTF-8
Python
false
false
54
py
from __future__ import division __author__ = 'thor'
[ "thorwhalen1@gmail.com" ]
thorwhalen1@gmail.com
df7e6169ccdea122ba78000885ec1008e3579ebd
bc539788b876773e294383863252c1637de9eb7f
/Pscrapy/PycharmProjects/Reptile/Practise/practise4.py
a3249cb05379978d945554dfcee685df198bd50c
[]
no_license
umsung/scrapy
4eb56bf74f3e617e49dcdec61cf77010eb912f4f
deacd9f289159c5af114b0dd3110448ad7eb43e8
refs/heads/master
2020-05-31T14:11:46.530793
2019-10-16T01:32:25
2019-10-16T01:32:25
190,321,772
3
0
null
null
null
null
UTF-8
Python
false
false
120
py
num = input('输入数字:').strip() num = int(num) for i in range(1, num): print(' '*(num-i),'* '*i)
[ "545699233@qq.com" ]
545699233@qq.com
b4ed712c26a2002dc0070864602ef4332b6c1617
a76803a441b76595372329a54e84e2b2d8fd5c6b
/clang/ubsan/__init__.py
e11a24908fdba31ce0eb9d93480cf990564256ff
[]
no_license
securesystemslab/sanitizing-for-security-benchmarks
ac66b1e6bd67a954a88e48751df8ea98b2b400b9
c2bf9d922ec8564208a7f926dce56e3a2dfc8355
refs/heads/master
2020-03-19T07:34:23.463598
2018-06-05T06:04:35
2018-10-04T04:28:34
136,126,254
20
4
null
null
null
null
UTF-8
Python
false
false
20
py
from ubsan import *
[ "dokyung.song@gmail.com" ]
dokyung.song@gmail.com
d26bfad3144de16da67ac8ac8a3c28948acff915
a98e18285e3ba5c5fde601bae933820cbaff3d7a
/mysite/blog/migrations/0003_auto_20180424_1228.py
302e9afd88f96af0041d1fcd411067ddc7d7d1db
[]
no_license
huanyiyuxin/mysql_mysite
bb72a1ee7207e8a88d1cd2c37e31058e3f81e7d8
55c94fad7a3ead45dbc57efda5dd10a6384705d5
refs/heads/master
2020-03-16T17:59:01.752362
2018-05-10T06:00:54
2018-05-10T06:00:54
132,854,767
0
0
null
null
null
null
UTF-8
Python
false
false
707
py
# Generated by Django 2.0.4 on 2018-04-24 04:28 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('blog', '0002_auto_20180424_1206'), ] operations = [ migrations.RemoveField( model_name='post', name='author', ), migrations.AlterField( model_name='blogpost', name='author', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='blog_posts', to=settings.AUTH_USER_MODEL), ), migrations.DeleteModel( name='Post', ), ]
[ "954441@qq.com" ]
954441@qq.com
8cf423d1f9f0257fa371e065ae2d57628aeedaf2
ee4db47ccecd23559b3b6f3fce1822c9e5982a56
/Analyse Data/NumpPy.py
d2bf919d8ad04f330f143dfea2c477d7387bd3ee
[]
no_license
meoclark/Data-Science-DropBox
d51e5da75569626affc89fdcca1975bed15422fd
5f365cedc8d0a780abeb4e595cd0d90113a75d9d
refs/heads/master
2022-10-30T08:43:22.502408
2020-06-16T19:45:05
2020-06-16T19:45:05
265,558,242
0
1
null
null
null
null
UTF-8
Python
false
false
4,021
py
# Introduction to NumPy: Numerical Python #NumPy is great at storing and manipulating numerical data in arrays. import numpy as np # NumPy Arrays #A NumPy array is a special type of list. It’s a data structure that organizes multiple items. Each item can be of any type (strings, numbers, or even other arrays). test_1 = np.array([92, 94, 88, 91, 87]) # test_1 is now a numpy array #Creating an Array from a CSV # Note the delimiter can be in other formats such as semi colon and tabs. test_2 = np.genfromtxt('test_2.csv', delimiter=',') # Operations with NumPy Arrays # Let’s compare how to add a number to each value in a python list versus a NumPy array: # With a list A = [1, 2, 3, 4, 5,6] A_plus_3 = [] for i in range(len(A)): l_plus_3.append(A[i] + 3) # With an array a = np.array(l) a_plus_3 = a + 3 #Squaring each value: a ** 2 #array([ 1, 4, 9, 16, 25, 36]) #Taking the square root of each value: np.sqrt(a) #array([ 1, 1.41421356, 1.73205081, 2, 2.23606798, 2.44948974]) # Add extra 2 points to test_3 test_3 = np.array([87, 85, 72, 90, 92]) test_3_fixed = test_3 + 2 # Operations with NumPy Arrays II # Arrays can also be added to or subtracted from each other in NumPy, # assuming the arrays have the same number of elements. a = np.array([1, 2, 3, 4, 5]) b = np.array([6, 7, 8, 9, 10]) a + b #array([ 7, 9, 11, 13, 15]) test_1 = np.array([92, 94, 88, 91, 87]) test_2 = np.array([79, 100, 86, 93, 91]) test_3 = np.array([87, 85, 72, 90, 92]) test_3_fixed = test_3 + 2 total_grade = test_1 + test_2 + test_3_fixed # average score final_grade = total_grade / 3 print(final_grade) # Two-Dimensional Arrays # in NumPy we can create an array of arrays. # If the arrays that make up our bigger array are all the same size, then it has a special name: a two-dimensional array. #we could have also stored all of this data in a single, two-dimensional array: Notice the double square brackets syntax [[]] np.array([[92, 94, 88, 91, 87], [79, 100, 86, 93, 91], [87, 85, 72, 90, 92]]) coin_toss_again = np.array([[1,0,0,1,0],[0,0,1,1,1]]) # Selecting Elements from a 1-D Array # This uses normal indexing test_1 = np.array([92, 94, 88, 91, 87]) test_2 = np.array([79, 100, 86, 93, 91]) test_3 = np.array([87, 85, 72, 90, 92]) jeremy_test_2 = test_2[-2] #grabs 93 manual_adwoa_test_1 = test_1[1:3] # grabs 94 88 # Selecting Elements from a 2-D Array # The syntax for selecting from a 2-d array is a[row,column] where a is the array. a = np.array([[32, 15, 6, 9, 14], [12, 10, 5, 23, 1], [2, 16, 13, 40, 37]]) a[2,1] #16 # selects the first column a[:,0] #array([32, 12, 2]) a[1,:] #array([12, 10, 5, 23, 1]) # selects the first three elements of the first row a[0,0:3] #array([32, 15, 6]) student_scores = np.array([[92, 94, 88, 91, 87], [79, 100, 86, 93, 91], [87, 85, 72, 90, 92]]) tanya_test_3 = student_scores[2,0] cody_test_scores = student_scores[:,4] # Logical Operations with Arrays # < > == != | & porridge = np.array([79, 65, 50, 63, 56, 90, 85, 98, 79, 51]) cold = porridge[porridge < 60] hot = porridge[porridge > 80] just_right = porridge[(porridge >= 60) & (porridge <= 80)] print(cold, hot, just_right ) # Review import numpy as np temperatures = np.genfromtxt('temperature_data.csv',delimiter=',') print(temperatures) temperatures_fixed = temperatures + 3.0 monday_temperatures = temperatures_fixed[0,:] thursday_friday_morning = temperatures_fixed[3:,1] temperature_extremes = temperatures_fixed[(temperatures_fixed < 50) | (temperatures_fixed > 60)] # Project Bettys Bakery import numpy as np cupcakes = np.array([2,0.75,2,1,0.5]) recipes = np.genfromtxt('recipes.csv',delimiter=',') print(recipes) eggs = recipes[:,2] print(eggs) #egg = recipes[recipes[:,2] == 1] cookies = recipes[2,:] print(cookies) double_batch = cupcakes * 2 print(double_batch) grocery_list = cookies + double_batch print(grocery_list)
[ "oluchukwuegbo@gmail.com" ]
oluchukwuegbo@gmail.com
d1974e0e287d5e7372b79c80651ea675983be9e0
1c5cb22e6d0faab6ac11e7b8f4aadd2308a5b75f
/spark/persist_transactions.py
38d77568c2748a5a7cec5115a45942648b135657
[]
no_license
alexanderbatanov/safepay
c758b790334dae7f95a47d311d2ff2fb30b52f5c
02836bec67561d6a3ae6d1f8b0788837f2f9f0b3
refs/heads/master
2021-05-01T07:24:54.744465
2018-02-27T04:41:47
2018-02-27T04:41:47
121,155,217
1
1
null
null
null
null
UTF-8
Python
false
false
1,535
py
# persist_transactions - SafePay spark streaming job (initial data load) from pyspark import SparkContext from pyspark.streaming import StreamingContext from pyspark.streaming.kafka import KafkaUtils from pyspark.sql import SparkSession from pyspark.sql.types import * from pyspark.sql.functions import * from cassandra.cluster import Cluster from datetime import datetime import uuid # spark sc = SparkContext(appName="transactions1") sc.setLogLevel('ERROR') ssc = StreamingContext(sc, 2) spark = SparkSession(sc) def processRDD(rdd): if rdd.isEmpty(): return rdd2 = rdd.map(lambda x: str(uuid.uuid4()) + ";" + x) rdd3 = rdd2.map(lambda x: x.split(";")) # persist transactions in tx table for further proessing by a batch job df = rdd3.toDF(['tx_id', 'source', 'sent_dt', 'tx_dt', 'from_party_id', 'from_party_name', 'to_party_id', 'to_party_name']) df = df.drop('source')\ .drop('sent_dt')\ .withColumn('denied', lit(False))\ .withColumn('reason', lit('passthru')) df.write\ .format("org.apache.spark.sql.cassandra")\ .mode('append')\ .options(table="tx", keyspace="p2p")\ .save() # kafka topic = "transactions1" brokers_dns_str = "0.0.0.0:9092" kvs=KafkaUtils.createDirectStream(ssc, [topic], {"metadata.broker.list": brokers_dns_str}) # read stream batch=kvs.map(lambda x: x[1]) batch.count().map(lambda x:'Messages in this batch: %s' % x).pprint() batch.foreachRDD(processRDD) ssc.start() ssc.awaitTermination()
[ "alexb@MACAROSHA.local" ]
alexb@MACAROSHA.local
637c084fe5757dc015d5c194b45ffe7749980d41
ed5d9407a3321b079b4cf7d67f09c2e32a82aef6
/ensemble_predicted.py
cfe09e7b801495d806d569ba819b36dc55b4cb45
[]
no_license
laurinpaech/segme-net
45b087e015d48116d151a5e8b3f196f145ff0831
df19792336696dddbf419aa5e7a18eea66f66869
refs/heads/master
2022-07-17T14:20:13.074387
2019-11-02T17:25:08
2019-11-02T17:25:08
175,799,081
0
0
null
2022-06-21T22:15:48
2019-03-15T10:27:13
Python
UTF-8
Python
false
false
1,725
py
from data_loader.data import * import argparse import os """ Use this to combine pre_ensemble images to fully ensembled output submission images Images are in data/submit_output/XXX/pre_ensembled/ """ parser = argparse.ArgumentParser() parser.add_argument('--desc', type=str, default='stacked_unet_default', help='How to name this run, defines folder in logs dir, only use "a-z,A-Z,1-9,_" pls') parser.add_argument('--cutoff', type=float, default=0.5, help='Cutoff value for ensembling data.') parser.add_argument('--nr', type=float, default=0.5, help='Cutoff value for ensembling data.') parser.add_argument('--nr_of_stacks', type=int, default=2, help='number of the stack to ensemble and get final results on') args = parser.parse_args() # Set parameters cutoff = args.cutoff nr = args.nr # Create paths test_output_path = "data/submit_output" output_path = os.path.join(test_output_path, args.desc) test_predict_path = "data/test" temp_path = "data/temp" temp_path_2 = "data/temp_2" # Create dirs os.makedirs(temp_path, exist_ok=True) os.makedirs(temp_path_2, exist_ok=True) # Should be full with images output_path_pre_ensembled = os.path.join(output_path, "pre_ensembled") output_path_ensembled = os.path.join(output_path, "ensembled_{0}".format(nr)) os.makedirs(output_path_ensembled, exist_ok=True) # create ensemble predictions ensemble_predictions(test_predict_path, output_path_pre_ensembled, temp_path_2, nr_of_stacks=args.nr_of_stacks, alpha=cutoff) # save into output_path_ensembled because need resize # resize ensembled predictions and save them saveSubmitResizeEnsemble(temp_path, temp_path_2, output_path_ensembled)
[ "paech.laurin@gmail.com" ]
paech.laurin@gmail.com
9e66cd2e75a516c445ee081fad2339a4088a1f5e
d53605afd805311cc6a37b669afa2162965a5020
/sentimental_analysis/classifier.py
8920ec62cc45c2d3fc80797c992c8271bc57bb85
[]
no_license
ahmad-elbatanouni/sentimental-analyzer
659085c14d80ef3d20c8052e1ff58a04cedee2ad
b75b6871e2cc562b9d56cbab5d21cd6aba2caedb
refs/heads/master
2021-01-18T03:50:03.745465
2015-07-29T06:08:29
2015-07-29T06:08:29
39,876,505
1
0
null
null
null
null
UTF-8
Python
false
false
2,872
py
import helpers class Classifier: def __init__(self, positive_corpus, negative_corpus): self.positive_corpus = positive_corpus self.negative_corpus = negative_corpus self.total_probability = 0 self.inverse_total_probability = 0 self.tolerance = 0.05 def classify(self, text): stop_words = helpers.get_stop_words() for word in text.split(" "): if word.lower() in stop_words: continue positive_matches = self.positive_corpus.token_count(word) negative_matches = self.negative_corpus.token_count(word) positive_total = self.positive_corpus.total_tokens negative_total = self.negative_corpus.total_tokens # print word # print "=========" probability = self.calculate_probability(positive_matches, positive_total, negative_matches, negative_total) self.record_probability(probability) final_probability = self.combine_probabilities() return {"sentiment": self.compute_sentiment(final_probability), "probability": final_probability} def calculate_probability(self, positive_matches, positive_total, negative_matches, negative_total): # print "pos_mat ", positive_matches, ", pos_tot: ", positive_total, ", neg_mat: ", negative_matches, "neg_tot: ", negative_total total = positive_matches + negative_matches positive_ratio = positive_matches / float(positive_total) negative_ratio = negative_matches / float(negative_total) probability = positive_ratio / (positive_ratio + negative_ratio) if positive_ratio + negative_ratio != 0 else 0 print "total: ", total, ", positive_ratio: ", positive_ratio, ", negative_ratio: ", negative_ratio, ", prob: ", probability # print ((unknown_word_strength * unknown_word_probability) + (total * probability)) / (unknown_word_strength + total) # print "========================================================================================" return ((total * probability) + 1) / (total + 2) def record_probability(self, probability): if probability is None: return self.total_probability = probability if self.total_probability == 0 else self.total_probability * probability self.inverse_total_probability = (1 - probability) if self.inverse_total_probability == 0 else self.inverse_total_probability * (1 - probability) def combine_probabilities(self): if self.total_probability == 0: return 0.5 return self.total_probability / (self.total_probability + self.inverse_total_probability) def compute_sentiment(self, probability): if probability < (0.5): return "Negative" if probability > (0.5): return "Positive" return "Neutral"
[ "ahmad@Ahmad.(none)" ]
ahmad@Ahmad.(none)
1b1a8f0378daac92ecbb890eb38b46894b17e9a1
15a833cfb3deec9278ca4a458d55aa7132ab63d1
/kakao_codingTest_winter_Internship/test2.py
f74ac217ea6f61186801b083c03be4857341bda1
[]
no_license
jaehui327/pythonAlgorithm
9db6bfb3446ab3049b39d575170d751a912a4646
3a60fb1694fa17c8e5dc7516d3ed2bb07a91c69b
refs/heads/master
2020-07-22T07:53:38.977926
2019-12-23T14:12:52
2019-12-23T14:12:52
207,121,744
0
0
null
null
null
null
UTF-8
Python
false
false
1,024
py
def solution(s): answer = [] strList = [] numList = [] list = [] # 문자열 분할해서 리스트 저장 l = s.split(",{") for i in l: a = i.replace("{", "") b = a.replace("}", "") strList.append(b) # 크기순 정렬 strList.sort(key=len) # 숫자로 변환 for i in strList: numList = i.split(",") numList = [int (a) for a in numList] list.append(numList) # 리스트가 0이 될때까지 while(1): if not list: break # 맨 앞 리스트만 확인 num = list[0][0] # 정답에 넣음 answer.append(num) list.pop(0) # 리스트 순회 for j in list: # 리스트 내의 숫자 순회 for k in range(0, len(j)): if num == j[k]: j.pop(k) break return answer #s = "{{2},{2,1},{2,1,3},{2,1,3,4}}" #s = "{{20,111},{111}}" s = "{{4,2,3},{3},{2,3,4,1},{2,3}}" print(solution(s))
[ "jaehui327@naver.com" ]
jaehui327@naver.com
b85ffbbc411490b508f4ad212c32852d48891acc
8e559df1b6f34a09d32b4e854efa0dd6e4b62fe0
/cs5png3.py
2a28a0c248d3a21729f14d2d4a4b8c5fa009fca9
[]
no_license
norahpack/carbonEmissions
5108dbd64a9fd5181d9a496db57f551aa8f2bc2f
9b877996039c57d64e2236122fda6b15e9289f19
refs/heads/main
2023-08-29T16:08:28.642472
2021-11-11T22:23:20
2021-11-11T22:23:20
427,158,503
0
0
null
null
null
null
UTF-8
Python
false
false
3,190
py
import os import sys from PIL import Image import time def saveRGB(boxed_pixels, filename = "out.png"): """Save the given pixel array in the chosen file as an image.""" print(f'Starting to save {filename}...', end = '') w, h = getWH(boxed_pixels) im = Image.new("RGB", (w, h), "black") px = im.load() for r in range(h): #print(".", end = "") for c in range(w): bp = boxed_pixels[r][c] t = tuple(bp) px[c,r] = t im.save(filename) time.sleep(0.5) print(filename, "saved.") def getRGB(filename = "in.png"): """Reads an image png file and returns it as a list of lists of pixels (i.e., and array of pixels). """ original = Image.open(filename) print(f"{filename} contains a {original.size[0]}x{original.size[1]}" f" {original.format} image with mode {original.mode}.") WIDTH, HEIGHT = original.size px = original.load() PIXEL_LIST = [] for r in range(HEIGHT): row = [] if original.mode == 'RGB': for c in range(WIDTH): row.append(px[c, r][:3]) else: for c in range(WIDTH): pixel = px[c, r] row.append((pixel, pixel, pixel)) PIXEL_LIST.append(row) return PIXEL_LIST def getWH(px): """Given a pixel array, return its width and height as a pair.""" h = len(px) w = len(px[0]) return w, h def binaryIm(s, cols, rows): """Given a binary image s of size rows x cols, represented as a single string of 1's and 0's, write a file named "binary.png", which contains an equivalent black-and-white image.""" px = [] for row in range(rows): row = [] for col in range(cols): c = int(s[row*cols + col])*255 px = [c, c, c] row.append(px) px.append(row) saveRGB(px, 'binary.png') #return px class PNGImage: """Class to support simple manipulations on PNG images.""" def __init__(self, width, height): """Construct a PNGImage of the given dimensions.""" self.width = width self.height = height default = (255, 255, 255) self.image_data = [[(255, 255, 255) for col in range(width)] for row in range(height)] def plotPoint(self, col, row, rgb = (0, 0, 0)): """Plot a single RGB point in at the given location in a PNGImage.""" # Ensure that rgb is a three-tuple if not isinstance(rgb, (list, tuple)) or len(rgb) != 3: print(f"In plotPoint, the color {rgb} was not" f" in a recognized format.", file = sys.stderr) # Check if we're in bounds if 0 <= col < self.width and \ 0 <= row < self.height: self.image_data[row][col] = rgb else: print(f"In plotPoint, column {col} or row {row}", file = sys.stderr) return def saveFile(self, filename = "test.png"): """Save the object's data to a file.""" # We reverse the rows so that the y direction # increases upwards... saveRGB(self.image_data[::-1], filename)
[ "noreply@github.com" ]
noreply@github.com
bc441a7d134649701613b6e1325e68ee43471d29
b70be26411a98b6ef3f7598fce4f621193ff281d
/hack3/test_videocap.py
77e4cd2ea33d6cc6ff3b52bf049d480097fab3e2
[]
no_license
comcomet/first-repository
b914a1b9ac57296645b4ebf377a184ba26b63c25
6eadb7d916e69478f12e8740aebba264d2f93027
refs/heads/master
2023-02-01T15:55:11.943969
2020-12-20T02:34:10
2020-12-20T02:34:10
282,796,457
1
0
null
null
null
null
UTF-8
Python
false
false
7,047
py
#!/usr/bin/python import numpy as np import tensorflow as tf from utils import * import sys #GLOBAL VARIABLE INITIALIZATIONS TO BUILD MODEL n_steps = 80 hidden_dim = 500 frame_dim = 4096 batch_size = 1 vocab_size = len(word2id) bias_init_vector = get_bias_vector() def build_model(): """This function creates weight matrices that transform: * frames to caption dimension * hidden state to vocabulary dimension * creates word embedding matrix """ print "Network config: \nN_Steps: {}\nHidden_dim:{}\nFrame_dim:{}\nBatch_size:{}\nVocab_size:{}\n".format(n_steps, hidden_dim, frame_dim, batch_size, vocab_size) #Create placeholders for holding a batch of videos, captions and caption masks video = tf.placeholder(tf.float32,shape=[batch_size,n_steps,frame_dim],name='Input_Video') caption = tf.placeholder(tf.int32,shape=[batch_size,n_steps],name='GT_Caption') caption_mask = tf.placeholder(tf.float32,shape=[batch_size,n_steps],name='Caption_Mask') dropout_prob = tf.placeholder(tf.float32,name='Dropout_Keep_Probability') with tf.variable_scope('Im2Cap') as scope: W_im2cap = tf.get_variable(name='W_im2cap',shape=[frame_dim, hidden_dim], initializer=tf.random_uniform_initializer(minval=-0.08,maxval=0.08)) b_im2cap = tf.get_variable(name='b_im2cap',shape=[hidden_dim], initializer=tf.constant_initializer(0.0)) with tf.variable_scope('Hid2Vocab') as scope: W_H2vocab = tf.get_variable(name='W_H2vocab',shape=[hidden_dim,vocab_size], initializer=tf.random_uniform_initializer(minval=-0.08,maxval=0.08)) b_H2vocab = tf.Variable(name='b_H2vocab',initial_value=bias_init_vector.astype(np.float32)) with tf.variable_scope('Word_Vectors') as scope: word_emb = tf.get_variable(name='Word_embedding',shape=[vocab_size,hidden_dim], initializer=tf.random_uniform_initializer(minval=-0.08,maxval=0.08)) print "Created weights" #Build two LSTMs, one for processing the video and another for generating the caption with tf.variable_scope('LSTM_Video',reuse=None) as scope: lstm_vid = tf.nn.rnn_cell.BasicLSTMCell(hidden_dim) lstm_vid = tf.nn.rnn_cell.DropoutWrapper(lstm_vid,output_keep_prob=dropout_prob) with tf.variable_scope('LSTM_Caption',reuse=None) as scope: lstm_cap = tf.nn.rnn_cell.BasicLSTMCell(hidden_dim) lstm_cap = tf.nn.rnn_cell.DropoutWrapper(lstm_cap,output_keep_prob=dropout_prob) #Prepare input for lstm_video video_rshp = tf.reshape(video,[-1,frame_dim]) video_rshp = tf.nn.dropout(video_rshp,keep_prob=dropout_prob) video_emb = tf.nn.xw_plus_b(video_rshp,W_im2cap,b_im2cap) video_emb = tf.reshape(video_emb,[batch_size,n_steps,hidden_dim]) padding = tf.zeros([batch_size,n_steps-1,hidden_dim]) video_input = tf.concat([video_emb,padding],1) print "Video_input: {}".format(video_input.get_shape()) #Run lstm_vid for 2*n_steps-1 timesteps with tf.variable_scope('LSTM_Video') as scope: out_vid,state_vid = tf.nn.dynamic_rnn(lstm_vid,video_input,dtype=tf.float32) print "Video_output: {}".format(out_vid.get_shape()) #Prepare input for lstm_cap padding = tf.zeros([batch_size,n_steps,hidden_dim]) caption_vectors = tf.nn.embedding_lookup(word_emb,caption[:,0:n_steps-1]) caption_vectors = tf.nn.dropout(caption_vectors,keep_prob=dropout_prob) caption_2n = tf.concat([padding,caption_vectors],1) caption_input = tf.concat([caption_2n,out_vid],2) print "Caption_input: {}".format(caption_input.get_shape()) #Run lstm_cap for 2*n_steps-1 timesteps with tf.variable_scope('LSTM_Caption') as scope: out_cap,state_cap = tf.nn.dynamic_rnn(lstm_cap,caption_input,dtype=tf.float32) print "Caption_output: {}".format(out_cap.get_shape()) #Compute masked loss output_captions = out_cap[:,n_steps:,:] output_logits = tf.reshape(output_captions,[-1,hidden_dim]) output_logits = tf.nn.dropout(output_logits,keep_prob=dropout_prob) output_logits = tf.nn.xw_plus_b(output_logits,W_H2vocab,b_H2vocab) output_labels = tf.reshape(caption[:,1:],[-1]) caption_mask_out = tf.reshape(caption_mask[:,1:],[-1]) loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=output_logits,labels=output_labels) masked_loss = loss*caption_mask_out loss = tf.reduce_sum(masked_loss)/tf.reduce_sum(caption_mask_out) return video,caption,caption_mask,output_logits,loss,dropout_prob if __name__=="__main__": with tf.Graph().as_default(): learning_rate = 0.00001 video,caption,caption_mask,output_logits,loss,dropout_prob = build_model() optim = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(loss) ckpt_file = 'S2VT_Dyn_10_0.0001_300_46000.ckpt.meta' saver = tf.train.Saver() with tf.Session() as sess: if ckpt_file: saver_ = tf.train.import_meta_graph(ckpt_file) saver_.restore(sess,'./S2VT_Dyn_10_0.0001_300_46000.ckpt') print "Restored model" else: sess.run(tf.initialize_all_variables()) while(1): vid,caption_GT,_,video_urls = fetch_data_batch_val(1) caps,caps_mask = convert_caption(['<BOS>'],word2id,80) for i in range(n_steps): o_l = sess.run(output_logits,feed_dict={video:vid, caption:caps, caption_mask:caps_mask, dropout_prob:1.0}) out_logits = o_l.reshape([batch_size,n_steps-1,vocab_size]) output_captions = np.argmax(out_logits,2) caps[0][i+1] = output_captions[0][i] print_in_english(caps) if id2word[output_captions[0][i]] == '<EOS>': break print '............................\nGT Caption:\n' print_in_english(caption_GT) play_video = raw_input('Should I play the video? ') if play_video.lower() == 'y': playVideo(video_urls) test_again = raw_input('Want another test run? ') if test_again.lower() == 'n': break
[ "yongsungs@gmail.com" ]
yongsungs@gmail.com
cd12fc3d74b637e459f67a399298b68bce6112f1
2840fd738487060aa7604f60e70eb623f15a019e
/Lecture71_Somkiat_N.py
1660e7133d4e910222ab70747880801d5539a917
[]
no_license
knutz32/CP3-Somkiat-Nattawut
421f58a6953ede7326d2ac0ad6d972a52aaa2047
911feb4732b34dbcd62d19edae392dd0b0f800fa
refs/heads/master
2020-06-04T06:59:14.345898
2019-06-29T14:10:45
2019-06-29T14:10:45
191,915,041
0
0
null
null
null
null
UTF-8
Python
false
false
499
py
menuList = [] priceList = [] while True: menuName = input("Enter Menu : ") if menuName.lower() == "exit": break else: menuPrice = int(input("Enter Price : ")) menuList.append(menuName) priceList.append(menuPrice) def showBill(): print((" Food Shop ").center(30,"-")) for i in range(len(menuList)): print("%s \t\t\t %s THB" % (menuList[i],priceList[i])) print("-"*30) print("Total Price \t %s THB" % (sum(priceList))) showBill()
[ "noreply@github.com" ]
noreply@github.com
3f88cac88fa1c16cd8a12b930fb31e2d30401da9
3ec0d6535dca5e5da5fc28b6f8cb800cf9b676cc
/venv/lib/python3.6/keyword.py
e94df0be026f611988df825e5dad1d42bc6078d6
[]
no_license
omarHazim/DB.github.io
6eecaeb1643ee7c41e245e2ad2847377f377e943
2b9dc94966c710083c0b0a3a82acfdd3911e4b35
refs/heads/master
2021-09-24T23:40:15.769464
2018-10-15T17:55:55
2018-10-15T17:55:55
null
0
0
null
null
null
null
UTF-8
Python
false
false
46
py
/Users/omar/anaconda3/lib/python3.6/keyword.py
[ "omar@omars-MacBook.local" ]
omar@omars-MacBook.local
a05385930991319e2dc5ebf3029f337f10410b3a
ffba5c4a64a87214160c5904b220be8a6e88cd58
/python-packages/maizy_f/r.py
5e83d68deeca516eed184231752129e90e707f19
[]
no_license
maizy/dev-setup
6e8ae5bc9b56dd85f7612b453e92e31043816189
5eb8473cf9c66c66ff8fd9e8c72cecf931f62494
refs/heads/master
2022-11-13T22:08:00.184435
2022-11-13T08:33:25
2022-11-13T08:33:25
7,286,016
2
0
null
2017-12-22T11:57:00
2012-12-22T13:20:57
Python
UTF-8
Python
false
false
361
py
# coding: utf-8 # Copyright (c) Nikita Kovaliov, maizy.ru, 2013 from __future__ import print_function, absolute_import, unicode_literals from fabric.api import task, run @task def info(): run('uname -a') run('hostname') lsb_rel = run('which lsb_release') if lsb_rel != '': print('Debian like os found') run('lsb_release -a')
[ "nikita@maizy.ru" ]
nikita@maizy.ru
5fda4f9c3fff122d9c335a769425599b16276069
06ee55a195eca38e8cb81222704eb216f873c080
/persona/persona/settings.py
7a6480b3f17d5c78031a13cb42d073fcbd7f59a8
[]
no_license
TonyWhiteb/PersonaProject
bb0385e93d4ec610fd3ada0bcb2b7f3fa65b7d8a
5f14b21a7d83e6d7b8da188dbb8dcd0344681c45
refs/heads/master
2020-03-19T15:25:41.925361
2018-06-17T01:41:39
2018-06-17T01:41:39
135,847,701
0
0
null
null
null
null
UTF-8
Python
false
false
3,268
py
""" Django settings for persona project. Generated by 'django-admin startproject' using Django 2.0.6. For more information on this file, see https://docs.djangoproject.com/en/2.0/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/2.0/ref/settings/ """ import os # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'q=sk%wmn4y)=$7l743!c&(pg@g#dlg*lmzmrxk)+agg1y_cx-h' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'LoginSession', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'persona.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [ os.path.join(BASE_DIR,'templates') ], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'persona.wsgi.application' # Database # https://docs.djangoproject.com/en/2.0/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Password validation # https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/2.0/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/2.0/howto/static-files/ STATIC_URL = '/static/' STATICFILES_DIRS = [ os.path.join(BASE_DIR,'statics') ] AUTH_USER_MODEL = 'LoginSession.MyUser'
[ "tonybai2016@gmail.com" ]
tonybai2016@gmail.com
cbf3083dd8ea5ae4718b4b154ac624468f4e7c15
68b23f776fddb77de735419cbf30f33a49e9def2
/backend/terminus/home/urls.py
501046c917470aac71074c89c9f1d1a75f5cceac
[]
no_license
vash512/terminus
cbd00f74a600a13fd52aa2206c3eb1e7b5301ec7
4eb86d853bc76c22cd1af3c86fed1bc10d457c88
refs/heads/master
2016-09-05T14:49:42.655635
2015-07-09T03:34:38
2015-07-09T03:34:38
32,414,141
0
0
null
null
null
null
UTF-8
Python
false
false
1,155
py
# -*- coding: utf-8 -*- from django.conf.urls import patterns, include, url from django.views.generic import TemplateView urlpatterns=patterns('home.views', url(r'^$', 'index_view', name='index'), url(r'^humans.txt$', TemplateView.as_view(template_name='statics/humans.txt', content_type='text/plain; charset=utf-8')), url(r'^robots.txt$', TemplateView.as_view(template_name='statics/robots.txt', content_type='text/plain; charset=utf-8')), url(r'^sitemap.xml$', TemplateView.as_view(template_name='statics/sitemap.xml', content_type='application/xml; charset=utf-8')), url(r'^contacto/', 'contacto' ), url(r'^acercade/', 'acercade'), url(r'^corpuscontable', 'corpus'), url(r'^ayuda', 'ayuda'), #terminos urls de prueba url(r'^terminos', 'terminos'), url(r'^terminos/termino', 'termino_detalle'), url(r'^q/$', 'busqueda'), url(r'^q/termino', 'busqueda_list'), url(r'^docs/doc', 'doc_detalle'), url(r'^docs/$', 'docs'), #estas direcciones las debe administrar terminos.urls y terminos.views url(r'^login/', 'log_in'), url(r'^registro/', 'registro'), url(r'^logout/', 'log_out'), )
[ "xtornasol512@gmail.com" ]
xtornasol512@gmail.com
2a6d4814c6479677185a3e416cdb8f34af10eb47
777186acb39efa3fe39c340f4f0a5e10558eb020
/cmake-build-debug/catkin_generated/generate_cached_setup.py
b7dc571a5d7de75e2cf5290e785b4359b7ebc710
[]
no_license
UGVProject/PointGrey_CaptureROS
887b2f232b8abad8b98274569b69b4773715e07f
5ebc1f18f7ea1b72f31c128226f453f82a2cdec6
refs/heads/master
2020-05-21T23:28:10.685788
2018-04-08T02:44:36
2018-04-08T02:44:36
63,830,992
1
1
null
null
null
null
UTF-8
Python
false
false
1,371
py
# -*- coding: utf-8 -*- from __future__ import print_function import argparse import os import stat import sys # find the import for catkin's python package - either from source space or from an installed underlay if os.path.exists(os.path.join('/opt/ros/kinetic/share/catkin/cmake', 'catkinConfig.cmake.in')): sys.path.insert(0, os.path.join('/opt/ros/kinetic/share/catkin/cmake', '..', 'python')) try: from catkin.environment_cache import generate_environment_script except ImportError: # search for catkin package in all workspaces and prepend to path for workspace in "/home/zh/catkin_ws/devel;/opt/ros/kinetic".split(';'): python_path = os.path.join(workspace, 'lib/python2.7/dist-packages') if os.path.isdir(os.path.join(python_path, 'catkin')): sys.path.insert(0, python_path) break from catkin.environment_cache import generate_environment_script code = generate_environment_script('/home/zh/catkin_ws/src/PointGrey_CaptureROS/cmake-build-debug/devel/env.sh') output_filename = '/home/zh/catkin_ws/src/PointGrey_CaptureROS/cmake-build-debug/catkin_generated/setup_cached.sh' with open(output_filename, 'w') as f: #print('Generate script for cached setup "%s"' % output_filename) f.write('\n'.join(code)) mode = os.stat(output_filename).st_mode os.chmod(output_filename, mode | stat.S_IXUSR)
[ "hzhang032@e.ntu.edu.sg" ]
hzhang032@e.ntu.edu.sg
096140cdef3ff3e96fcd9caedf641f52e4453269
cc32863d3ed260b36aed4f6e61a1a525b5f06221
/fyndiq_assignment/urlshortener/migrations/0002_pairedurl.py
b058cc1dd00348c840370b1c1be35ce61de86458
[]
no_license
orcunozyurt/fyndiq_assignment
492204654c03bd57fc275b8f251125cdcbd77ffa
f2893724a639778cb42880bbf9a3c709e8342144
refs/heads/master
2020-03-30T05:49:53.940943
2015-09-13T15:02:22
2015-09-13T15:02:22
42,361,444
0
0
null
null
null
null
UTF-8
Python
false
false
677
py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations import django.core.validators class Migration(migrations.Migration): dependencies = [ ('urlshortener', '0001_initial'), ] operations = [ migrations.CreateModel( name='PairedUrl', fields=[ ('key_generated', models.OneToOneField(primary_key=True, serialize=False, to='urlshortener.WordList')), ('url', models.URLField(unique=True, validators=[django.core.validators.URLValidator()])), ('cdate', models.DateTimeField(auto_now_add=True)), ], ), ]
[ "orcunozyurt@hotmail.com" ]
orcunozyurt@hotmail.com
d4b38cd8aed1979f665275f13320cb82891a952a
56d8027b7af2fc160a63b546e6ea2f8a7e42e512
/Algorithms/peak_finding/python/1D_Peak_Finding.py
948a961875b1a83ff09cad39b098f53bbe341d70
[]
no_license
juicemia/DataStructures
fb8f278ebfc2e529c232912e0e6cc7ba3dcd1235
ffe87a03a8fc3d76e1be4e09f7f5218c897baf3a
refs/heads/master
2020-04-09T00:09:02.908752
2016-09-22T16:08:34
2016-09-22T16:08:34
13,316,162
1
1
null
null
null
null
UTF-8
Python
false
false
1,670
py
#!/usr/bin/python # Please read 1D_peak_finding.pdf for explanation what a peak is, proofs, and asymptotic complexity analysis of the algorithms # There will be two algorithms. A simple iteration through all the items in the array until finding a peak, # and a divide and conquer approach to finding a peak # Keep in mind, both of the algorithms find a peak, not all the peaks def find_peak_greedy(array): try: size = len(array); except: # array is empty return None if (size == 1 or array[0] >= array[1]): # array is of size one, or the first element is a peak return array[0] # since it's of size 1, elif (array[size - 1] >= array[size - 2]): # last element is a peak return array[size - 1] else: # Iterate through array items one by one for i in range(size): # make sure it doesn't run out of bounds if (i < size - 1 and i > 0 and (array[i] >= array[i-1] and array[i] >= array[i+1])): return array[i] def find_peak_recursive(array, low, high): n = int((low + high)/2) # make sure that index is not out of bounds if (n > 0 and array[n] < array[n-1]): # look at the left side of the array return find_peak_recursive(array, low, n) elif (n < len(array) - 1 and array[n] < array[n+1]): # look at the right side of the array return find_peak_recursive(array, n, high) else: # a peak, since array[n-1] <= array[n] <= array[n+1] return array[n] #array = (1, 10, 15, 120, -5, -100, 150, 879, 10, 77) # Answers might differ from the two functions because they approach the problem differently # However, both answers will be peaks. #print "%d" % find_peak_greedy(array) #print "%d" % find_peak_recursive(array, 0, len(array))
[ "lukas_dmx@yahoo.com" ]
lukas_dmx@yahoo.com
7c9eb9b5c4fc24e20e09662ff61f97569dacd082
99e9b8cf7401a729b129f87690b8ac93916fa4e8
/graphme.py
5b5e0e1f75b8bb3c0d07480c4ed6155bf90c4b2e
[ "MIT" ]
permissive
Herringway/graphme
06981b0435b8e87b5e62d3961dbd4c716ab1815f
5d45f35749c3a1e48e38fc08719243d3def4c43f
refs/heads/master
2021-01-12T02:21:56.133039
2014-10-11T02:08:29
2014-10-11T02:08:29
78,505,399
0
0
null
null
null
null
UTF-8
Python
false
false
4,011
py
#!/usr/bin/python2 #Written by Cameron Ross import operator def graphme(numgroups, losestates, groupweights, verify = lambda v,w,x,y,z: True, maxval = 40, victoryverify = lambda a: len(a) == len(list(filter(None, a))), numbermoving = 2): """Programmer-friendly interface to the graph searching algorithm""" assert numgroups == len(groupweights) return graphmeup([False]*numgroups, losestates, groupweights, verify, maxval, victoryverify, numbermoving) def graphmeup(state, losestates, groupweights, verify, maxval, victoryverify, numbermoving): """Searches for and returns a solution required to move all items across the river.""" searchedstates = 0 pathqueue = [] pathqueue.append((0, [state])) while pathqueue: (totalweight, path) = pathqueue.pop(0) node = path[-1] #the current state will be at the tail searchedstates = searchedstates + 1 if victoryverify(node) and totalweight <= maxval: #backtrack count is just number of searched states minus length of path return (totalweight, path, searchedstates-len(path), searchedstates) for (permutation, weight) in permute(verify, list(node), groupweights, numbermoving): if (permutation in path or totalweight > maxval or permutation in losestates or invertgroup(list(permutation)) in losestates): #skip permutations we've already seen, are losers, or whose weight exceeds the limit continue newpath = list(path) newpath.append(list(permutation)) pathqueue.append((totalweight+weight, newpath)) def invertgroup(group): """Flip all the values in the specified group and return it""" return map(operator.not_, group) def permute(verify, state, weights, changes = 2): """Generates possible next states for river crossing problems.""" for i in range(len(state)): statecopy = list(state) statecopy[i] = not(state[i]) for j in range(i+1, len(state)): statecopy[j] = not(state[j]) if changes == 3: #I hate hardcoding this, but... for k in range(j+1, len(state)): statecopy[k] = not(state[k]) if verify(state, statecopy, i, j, k): yield (statecopy, max(weights[i], weights[j])) statecopy[k] = state[k] if verify(state, statecopy, i, j, i): yield (statecopy, max(weights[i], weights[j])) statecopy[j] = state[j] if verify(state, statecopy, i, i, i): yield (statecopy, weights[i]) statecopy[i] = state[i] def prettyprint(solution): """Prints out detailed information on the solution found.""" if solution == None: print("No solution found\n--------------------------------") return (weight, path, backtracks, searchedstates) = solution print('''Path taken: %s Cost: %s Path length: %s Backtrack count: %s States searched: %s --------------------------------''' % (path, weight, len(path), backtracks, searchedstates)) def main(): #The huge blocks of true/false statements represent lose states. prettyprint(graphme(4, [ [True, True, False, False], [False, True, True, False], [True, True, True, False]], [1,1,1,1], verify = lambda x, y, w, z, v: (x[-1] != y[-1]) and (y[w] == y[z] == y[-1]))) prettyprint(graphme(6, [ [True, True, True, False, False, False], [True, True, True, True, False, False], [True, True, True, True, True, False], [True, True, True, False, True, False], [True, True, True, False, False, True], [True, True, True, False, True, True], [True, True, False, True, False, False], [True, True, False, False, True, False], [True, True, False, False, False, True], [True, False, True, True, False, False], [True, False, True, False, True, False], [True, False, True, False, False, True], [False, True, True, True, False, False], [False, True, True, False, True, False], [False, True, True, False, False, True]], [1,1,1,1,1,1])) prettyprint(graphme(5, [], [1,2,5,8,1], maxval = 15, verify = lambda x, y, w, z, v: (x[-1] != y[-1]) and (y[v] == y[w] == y[z] == y[-1]) and ((w != z) or (w != v) or (z != v)), numbermoving=3)) if __name__ == "__main__": main()
[ "elpenguino@gmail.com" ]
elpenguino@gmail.com
1b8b194ca550a0ed437116814126fd3470715067
049c5392123d9e2e3865f75464a8e1788836242c
/venv/lib/python3.9/site-packages/dimod/utilities.py
a900713b2ddd8b5363edff00c1731ffafa9fc7cf
[]
no_license
KiaKafaei1/MasterProject
ec97d801d1d7f0872d3ff5bbdf7e644a2bddc963
1020c6c66d4f35b6f9affb302d31722de5e34979
refs/heads/master
2023-08-28T12:15:11.579203
2021-11-13T10:05:28
2021-11-13T10:05:28
289,909,672
0
0
null
2020-09-11T09:12:06
2020-08-24T11:31:36
Python
UTF-8
Python
false
false
19,977
py
# Copyright 2018 D-Wave Systems Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # ============================================================================= import copy import os import itertools from functools import reduce import numpy as np from dimod.decorators import lockable_method __all__ = ['ising_energy', 'qubo_energy', 'ising_to_qubo', 'qubo_to_ising', 'child_structure_dfs', 'get_include', ] def ising_energy(sample, h, J, offset=0.0): """Calculate the energy for the specified sample of an Ising model. Energy of a sample for a binary quadratic model is defined as a sum, offset by the constant energy offset associated with the model, of the sample multipled by the linear bias of the variable and all its interactions. For an Ising model, .. math:: E(\mathbf{s}) = \sum_v h_v s_v + \sum_{u,v} J_{u,v} s_u s_v + c where :math:`s_v` is the sample, :math:`h_v` is the linear bias, :math:`J_{u,v}` the quadratic bias (interactions), and :math:`c` the energy offset. Args: sample (dict[variable, spin]): Sample for a binary quadratic model as a dict of form {v: spin, ...}, where keys are variables of the model and values are spins (either -1 or 1). h (dict[variable, bias]): Linear biases as a dict of the form {v: bias, ...}, where keys are variables of the model and values are biases. J (dict[(variable, variable), bias]): Quadratic biases as a dict of the form {(u, v): bias, ...}, where keys are 2-tuples of variables of the model and values are quadratic biases associated with the pair of variables (the interaction). offset (numeric, optional, default=0): Constant offset to be applied to the energy. Default 0. Returns: float: The induced energy. Notes: No input checking is performed. Examples: This example calculates the energy of a sample representing two down spins for an Ising model of two variables that have positive biases of value 1 and are positively coupled with an interaction of value 1. >>> sample = {1: -1, 2: -1} >>> h = {1: 1, 2: 1} >>> J = {(1, 2): 1} >>> dimod.ising_energy(sample, h, J, 0.5) -0.5 References ---------- `Ising model on Wikipedia <https://en.wikipedia.org/wiki/Ising_model>`_ """ # add the contribution from the linear biases for v in h: offset += h[v] * sample[v] # add the contribution from the quadratic biases for v0, v1 in J: offset += J[(v0, v1)] * sample[v0] * sample[v1] return offset def qubo_energy(sample, Q, offset=0.0): """Calculate the energy for the specified sample of a QUBO model. Energy of a sample for a binary quadratic model is defined as a sum, offset by the constant energy offset associated with the model, of the sample multipled by the linear bias of the variable and all its interactions. For a quadratic unconstrained binary optimization (QUBO) model, .. math:: E(\mathbf{x}) = \sum_{u,v} Q_{u,v} x_u x_v + c where :math:`x_v` is the sample, :math:`Q_{u,v}` a matrix of biases, and :math:`c` the energy offset. Args: sample (dict[variable, spin]): Sample for a binary quadratic model as a dict of form {v: bin, ...}, where keys are variables of the model and values are binary (either 0 or 1). Q (dict[(variable, variable), coefficient]): QUBO coefficients in a dict of form {(u, v): coefficient, ...}, where keys are 2-tuples of variables of the model and values are biases associated with the pair of variables. Tuples (u, v) represent interactions and (v, v) linear biases. offset (numeric, optional, default=0): Constant offset to be applied to the energy. Default 0. Returns: float: The induced energy. Notes: No input checking is performed. Examples: This example calculates the energy of a sample representing two zeros for a QUBO model of two variables that have positive biases of value 1 and are positively coupled with an interaction of value 1. >>> sample = {1: 0, 2: 0} >>> Q = {(1, 1): 1, (2, 2): 1, (1, 2): 1} >>> dimod.qubo_energy(sample, Q, 0.5) 0.5 References ---------- `QUBO model on Wikipedia <https://en.wikipedia.org/wiki/Quadratic_unconstrained_binary_optimization>`_ """ for v0, v1 in Q: offset += sample[v0] * sample[v1] * Q[(v0, v1)] return offset def ising_to_qubo(h, J, offset=0.0): """Convert an Ising problem to a QUBO problem. Map an Ising model defined on spins (variables with {-1, +1} values) to quadratic unconstrained binary optimization (QUBO) formulation :math:`x' Q x` defined over binary variables (0 or 1 values), where the linear term is contained along the diagonal of Q. Return matrix Q that defines the model as well as the offset in energy between the two problem formulations: .. math:: s' J s + h' s = offset + x' Q x See :meth:`~dimod.utilities.qubo_to_ising` for the inverse function. Args: h (dict[variable, bias]): Linear biases as a dict of the form {v: bias, ...}, where keys are variables of the model and values are biases. J (dict[(variable, variable), bias]): Quadratic biases as a dict of the form {(u, v): bias, ...}, where keys are 2-tuples of variables of the model and values are quadratic biases associated with the pair of variables (the interaction). offset (numeric, optional, default=0): Constant offset to be applied to the energy. Default 0. Returns: (dict, float): A 2-tuple containing: dict: QUBO coefficients. float: New energy offset. Examples: This example converts an Ising problem of two variables that have positive biases of value 1 and are positively coupled with an interaction of value 1 to a QUBO problem and prints the resulting energy offset. >>> h = {1: 1, 2: 1} >>> J = {(1, 2): 1} >>> dimod.ising_to_qubo(h, J, 0.5)[1] -0.5 """ # the linear biases are the easiest q = {(v, v): 2. * bias for v, bias in h.items()} # next the quadratic biases for (u, v), bias in J.items(): if bias == 0.0: continue q[(u, v)] = 4. * bias q[(u, u)] = q.setdefault((u, u), 0) - 2. * bias q[(v, v)] = q.setdefault((v, v), 0) - 2. * bias # finally calculate the offset offset += sum(J.values()) - sum(h.values()) return q, offset def qubo_to_ising(Q, offset=0.0): """Convert a QUBO problem to an Ising problem. Map a quadratic unconstrained binary optimization (QUBO) problem :math:`x' Q x` defined over binary variables (0 or 1 values), where the linear term is contained along the diagonal of Q, to an Ising model defined on spins (variables with {-1, +1} values). Return h and J that define the Ising model as well as the offset in energy between the two problem formulations: .. math:: x' Q x = offset + s' J s + h' s See :meth:`~dimod.utilities.ising_to_qubo` for the inverse function. Args: Q (dict[(variable, variable), coefficient]): QUBO coefficients in a dict of form {(u, v): coefficient, ...}, where keys are 2-tuples of variables of the model and values are biases associated with the pair of variables. Tuples (u, v) represent interactions and (v, v) linear biases. offset (numeric, optional, default=0): Constant offset to be applied to the energy. Default 0. Returns: (dict, dict, float): A 3-tuple containing: dict: Linear coefficients of the Ising problem. dict: Quadratic coefficients of the Ising problem. float: New energy offset. Examples: This example converts a QUBO problem of two variables that have positive biases of value 1 and are positively coupled with an interaction of value 1 to an Ising problem, and shows the new energy offset. >>> Q = {(1, 1): 1, (2, 2): 1, (1, 2): 1} >>> dimod.qubo_to_ising(Q, 0.5)[2] 1.75 """ h = {} J = {} linear_offset = 0.0 quadratic_offset = 0.0 for (u, v), bias in Q.items(): if u == v: if u in h: h[u] += .5 * bias else: h[u] = .5 * bias linear_offset += bias else: if bias != 0.0: J[(u, v)] = .25 * bias if u in h: h[u] += .25 * bias else: h[u] = .25 * bias if v in h: h[v] += .25 * bias else: h[v] = .25 * bias quadratic_offset += bias offset += .5 * linear_offset + .25 * quadratic_offset return h, J, offset def resolve_label_conflict(mapping, old_labels=None, new_labels=None): """Resolve a self-labeling conflict by creating an intermediate labeling. Args: mapping (dict): A dict mapping the current variable labels to new ones. old_labels (set, optional, default=None): The keys of mapping. Can be passed in for performance reasons. These are not checked. new_labels (set, optional, default=None): The values of mapping. Can be passed in for performance reasons. These are not checked. Returns: tuple: A 2-tuple containing: dict: A map from the keys of mapping to an intermediate labeling dict: A map from the intermediate labeling to the values of mapping. """ if old_labels is None: old_labels = set(mapping) if new_labels is None: new_labels = set(mapping.values()) # counter will be used to generate the intermediate labels, as an easy optimization # we start the counter with a high number because often variables are labeled by # integers starting from 0 counter = itertools.count(2 * len(mapping)) old_to_intermediate = {} intermediate_to_new = {} for old, new in mapping.items(): if old == new: # we can remove self-labels continue if old in new_labels or new in old_labels: # try to get a new unique label lbl = next(counter) while lbl in new_labels or lbl in old_labels: lbl = next(counter) # add it to the mapping old_to_intermediate[old] = lbl intermediate_to_new[lbl] = new else: old_to_intermediate[old] = new # don't need to add it to intermediate_to_new because it is a self-label return old_to_intermediate, intermediate_to_new def iter_safe_relabels(mapping, existing): """Iterator over "safe" intermediate relabelings. Args: mapping (dict): A map from old labels to new. existing (set): A container of existing labels. Yields: dict: A "safe" relabelling. """ # put the new labels into a set for fast lookup, also ensures that the # values are valid labels try: new_labels = set(mapping.values()) except TypeError: raise ValueError("mapping targets must be hashable objects") old_labels = mapping.keys() for v in new_labels: if v in existing and v not in old_labels: msg = ("A variable cannot be relabeled {!r} without also " "relabeling the existing variable of the same name") raise ValueError(msg.format(v)) if any(v in new_labels for v in old_labels): yield from resolve_label_conflict(mapping, old_labels, new_labels) else: yield mapping def child_structure_dfs(sampler, seen=None): """Return the structure of a composed sampler using a depth-first search on its children. Args: sampler (:obj:`.Sampler`): :class:`.Structured` or composed sampler with at least one structured child. seen (set, optional, default=False): IDs of already checked child samplers. Returns: :class:`~collections.namedtuple`: A named tuple of the form `Structure(nodelist, edgelist, adjacency)`, where the 3-tuple values are the :attr:`.Structured.nodelist`, :attr:`.Structured.edgelist` and :attr:`.Structured.adjacency` attributes of the first structured sampler found. Raises: ValueError: If no structured sampler is found. Examples: >>> sampler = dimod.TrackingComposite( ... dimod.StructureComposite( ... dimod.ExactSolver(), [0, 1], [(0, 1)])) >>> print(dimod.child_structure_dfs(sampler).nodelist) [0, 1] """ seen = set() if seen is None else seen if sampler not in seen: try: return sampler.structure except AttributeError: # hasattr just tries to access anyway... pass seen.add(sampler) for child in getattr(sampler, 'children', ()): # getattr handles samplers if child in seen: continue try: return child_structure_dfs(child, seen=seen) except ValueError: # tree has no child samplers pass raise ValueError("no structured sampler found") class LockableDict(dict): """A dict that can turn writeablity on and off""" # methods like update, clear etc are not wrappers for __setitem__, # __delitem__ so they need to be overwritten @property def is_writeable(self): return getattr(self, '_writeable', True) @is_writeable.setter def is_writeable(self, b): self._writeable = bool(b) @lockable_method def __setitem__(self, key, value): return super(LockableDict, self).__setitem__(key, value) @lockable_method def __delitem__(self, key): return super(LockableDict, self).__delitem__(key) def __deepcopy__(self, memo): new = type(self)() memo[id(self)] = new new.update((copy.deepcopy(key, memo), copy.deepcopy(value, memo)) for key, value in self.items()) new.is_writeable = self.is_writeable return new @lockable_method def clear(self): return super(LockableDict, self).clear() @lockable_method def pop(self, *args, **kwargs): return super(LockableDict, self).pop(*args, **kwargs) @lockable_method def popitem(self): return super(LockableDict, self).popitem() @lockable_method def setdefault(self, *args, **kwargs): return super(LockableDict, self).setdefault(*args, **kwargs) @lockable_method def update(self, *args, **kwargs): return super(LockableDict, self).update(*args, **kwargs) def get_include(): """Return the directory with dimod's header files.""" return os.path.join(os.path.dirname(__file__), 'include') def _astypearrays(arrays, requirements, min_itemsize, allowed_types): # allowed types can only be numeric for now, see comment below # todo: allow unsafe with warning controlled by kwarg? # We need to get the dtype, and as far as I can tell the only way to do # it for array-like is to actually cast to a numpy array arrays = [np.asarray(arr) for arr in arrays] # get the dtype we can promote to dtype = reduce(np.promote_types, (arr.dtype for arr in arrays)) if not any(np.issubdtype(dtype, type_) for type_ in allowed_types): # put together an appropriate error message descriptors = [] if np.floating in allowed_types: descriptors.append('floating') if np.integer in allowed_types: descriptors.append('integer') elif np.unsignedinteger in allowed_types: if np.signedinteger in allowed_types: descriptors.append('integer') else: descriptors.append('unsigned integer') elif np.signedinteger in allowed_types: descriptors.append('signed integer') raise TypeError( "Cannot safely cast arrays to {} (given {})".format( ', '.join(descriptors), ', '.join(arr.dtype.name for arr in arrays))) if min_itemsize is not None: if min_itemsize >= 1: size = str(2**int(np.ceil(np.log2(min_itemsize)))) else: size = '1' if np.issubdtype(dtype, np.unsignedinteger): kind = 'u' elif np.issubdtype(dtype, np.signedinteger): kind = 'i' elif np.issubdtype(dtype, np.floating): kind = 'f' else: # we could instead read this from the type string, but it's kind of # pandora's box, because there's also structured arrays, complex, # etc. For now, let's just restrict to numeric. raise RuntimeError("unexpected dtype") dtype = np.promote_types(dtype, kind+size) arrays = tuple(np.require(arr, dtype=dtype, requirements=requirements) for arr in arrays) if len(arrays) > 1: return arrays else: return arrays[0] # Not a public function (yet) def asintegerarrays(*arrays, requirements=None, min_itemsize=None): """Cast the given array(s) to the same integer type. Not a public function. This is useful when calling cython functions. Args: *arrays (array-like): At least one array-like. requirements (str/list[str], optional): See :func:`numpy.require`. min_itemsize (int, optional): The minimum itemsize (in bytes) for the output arrays. Returns: Numpy array(s) satisfying the above requirements. They will all have the same dtype. """ # empty arrays are a problem because numy defaults them to float, so let's # do a tiny bit of prechecking arrays = [arr if len(arr) else np.asarray(arr, dtype=np.int8) for arr in arrays] if not arrays: raise TypeError('asintegerarrays() takes at least 1 array (0 given)') return _astypearrays(arrays, requirements, min_itemsize, [np.integer]) # Not a public function (yet) def asnumericarrays(*arrays, requirements=None, min_itemsize=None): """Cast the given array(s) to the same floating type. Not a public function. This is useful when calling cython functions. Args: *arrays (array-like): At least one array-like. requirements (str/list[str], optional): See :func:`numpy.require`. min_itemsize (int, optional): The minimum itemsize (in bytes) for the output arrays. Returns: Numpy array(s) satisfying the above requirements. They will all have the same dtype. """ if not arrays: raise TypeError('asnumericarrays() takes at least 1 array (0 given)') return _astypearrays(arrays, requirements, min_itemsize, [np.integer, np.floating])
[ "kia0896@yahoo.dk" ]
kia0896@yahoo.dk
a4c1a9242bba0717fb4e57280314b144d76d42b9
e96e89ff40324fe099cd5aab73563308b66e6d52
/part4/recognize_face.py
74777cdb8d96106f65cf8a52ef799679e41af96b
[]
no_license
chunxuwu/opencv3_python
15898b4d7ee6978778df3e06ef4dfadec1190bbb
dfb6034f94abd4f65676d88516c1ff62cd12f73a
refs/heads/master
2020-06-09T22:03:07.634080
2019-07-02T15:48:44
2019-07-02T15:48:44
193,514,974
0
0
null
null
null
null
UTF-8
Python
false
false
4,325
py
import cv2 import os import numpy as np # 建立标签 label_num = [0] label_name = ["chunxu"] images = [] labels = [] # 将图像数组和CSV文件加载到人脸识别的算法中 def read_images(path): # 定义数据和标签 # 获取path文件下的文件及文件夹并返回名称列表 for dir_item in os.listdir(path): # 返回path规范化的绝对路径 path_abs = os.path.abspath(os.path.join(path, dir_item)) # 判断path_abs是文件还是文件还是文件夹 try: # str.endswith()是判断文件str后缀是否为指定格式 # 本图像指定为.pgm格式 if path_abs.endswith('.pgm'): # print("try:", path_abs) # 读取训练数据 img = cv2.imread(path_abs) # 统一输入文件的尺寸大小 img = cv2.resize(img, (200, 200)) # 统一图像文件的元素dtype,并将其加入images中 images.append(np.asarray(img, dtype=np.uint8)) # 为训练数据赋标签 # 简单地可以用,当文件夹为me时,标签设置为0 # if(dir_item.endswith('gengyi')): # labels.append(0) # 为了代码更具有实用性,拟以下处理 # 先将path_abs分割,注意分割线\\,而不是// path_piece = path_abs.split('\\') # 为训练数据赋标签,较多标签通过elif追加即可 if label_name[0] in path_piece: labels.append(label_num[0]) elif label_name[1] in path_piece: labels.append(label_num[1]) elif label_name[2] in path_piece: labels.append(label_num[2]) else: # 没有对应标签则删除训练数据 images.pop() # pass # 若为文件夹则递归调用,循环读取子子文件内容 elif os.path.isdir(path_abs): read_images(path_abs) # 若为其他情况则循环运行 else: continue # 当发生异常时则抛出异常信息e except Exception as e: print("REASON:", e) print('labels:', labels) print("images:", images) return images, labels # 基于Eigenfaces的模型训练 def face_model(): # 使用label_num作为全局变量 # 每当脚本识别出一个ID,就会将相应名称数组中的名字打印到人脸上 global label_num # 获取文件所在文件夹的绝对路径 path = os.getcwd() # 调用图像读入函数,获取训练数据及标签 images, labels = read_images(path) # print("face_model_images:", images) # 实例化人脸识别模型 model = cv2.face.EigenFaceRecognizer_create() # 通过图像数组和标签来训练模型 model.train(np.asarray(images), np.asarray(labels)) return model def face_rec(): # 调用训练好的模型 face_model_trained = face_model() # 初始化摄像头 camera = cv2.VideoCapture(0) # 实例化人脸识别级联分类器 face_cascade = cv2.CascadeClassifier('./haarcascades/haarcascade_frontalface_default.xml') while True: read, img = camera.read() faces = face_cascade.detectMultiScale(img, 1.3, 5) for (x, y, w, h) in faces: img = cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) roi = gray[x:x + w, y:y + h] try: roi = cv2.resize(roi, (200, 200), interpolation=cv2.INTER_LINEAR) # predict()预测函数,返回预测标签和置信度 params = face_model_trained.predict(roi) print("Label: %s, confidence: %0.2f" % (label_name[params[0]], params[1])) cv2.putText(img, label_name[params[0]], (x, y - 20), cv2.FONT_HERSHEY_SIMPLEX, 3, (255, 0, 0), 2) except Exception as e: print("face_rec_REASON:", e) cv2.imshow('camera', img) if cv2.waitKey(10) & 0xff == ord('q'): break cv2.destroyAllWindows() if __name__ == '__main__': face_rec()
[ "chunxuwu_zjut@163.com" ]
chunxuwu_zjut@163.com
1d64866e18bbd874d1be4840170e198249a642dc
3f97b3718cf3db745235d3e46abc775ac9fc0ea7
/app/y2020/d10_adapter_array.py
74bafc570e9a0b7317c55d7e2266ab070f0d29e9
[]
no_license
bolmstedt/advent-of-code-python
ce48f7c90fe3861bc7947e3bc918d49166233fe8
b1e7f12318c5bd642dfe29f862680f51c0f66bb5
refs/heads/master
2023-01-27T14:26:00.365229
2020-12-12T00:43:04
2020-12-12T00:43:04
317,562,248
0
0
null
null
null
null
UTF-8
Python
false
false
2,251
py
"""Solution for day 10, 2020.""" import collections from typing import Dict, List, Set, Union from app.base_solver import BaseSolver class Solver(BaseSolver): """Solver for day 10, 2020.""" day = '10' year = '2020' name = r"""Adapter Array""" def part_one(self, data: str) -> Union[int, str]: """Solve part one.""" jumps: Dict[int, int] = collections.defaultdict(int) adapters = self._parse_input(data) sequence = collections.deque(adapters[:1], 2) for adapter in adapters[1:]: sequence.append(adapter) jumps[sequence[1] - sequence[0]] += 1 return jumps[1] * jumps[3] def part_two(self, data: str) -> Union[int, str]: """Solve part two.""" return self._solve_part_two( self._get_branches(self._parse_input(data)), {}, ) @staticmethod def _get_branches(adapters: List[int]) -> Dict[int, Set[int]]: branches = collections.defaultdict(set) for index, adapter in enumerate(adapters): max_jump = adapter + 3 for upcoming in adapters[index + 2:index + 4]: if upcoming <= max_jump: branches[adapter].add(upcoming) return branches @classmethod def _solve_part_two( cls, branches: Dict[int, Set[int]], solved: Dict[Union[int, str], int], start: int = 0, ) -> int: if start not in solved: arrangements = 1 for adapter, jumps in branches.items(): if adapter < start: continue for jump in jumps: branch = f'{adapter}:{jump}' if branch not in solved: solved[branch] = cls._solve_part_two( branches, solved, jump, ) arrangements += solved[branch] solved[start] = arrangements return solved[start] @classmethod def _parse_input(cls, data: str) -> List[int]: adapters = cls._parse_int_lines(data) return sorted(adapters + [0, max(adapters) + 3])
[ "marcus@bolmstedt.com" ]
marcus@bolmstedt.com
9a79bc2d121ab5020b5787e81900966444fc6e38
76fefdb20c453e830b8db86f32a1b42d79108cdd
/venv/bin/django-admin.py
a02fc84b3fe69bf065a0bd2537f44fe524e8d4d0
[]
no_license
Belie06Loryn/chaty-final-master
35f6762e9bf45e1086db6280cd0bd7dc0828fb96
86e839c069eb54dad3390e84c6b7534d165a3942
refs/heads/master
2022-12-09T14:31:19.478090
2019-12-08T16:00:45
2019-12-08T16:00:45
226,365,156
0
0
null
2022-12-08T06:59:44
2019-12-06T16:08:51
Python
UTF-8
Python
false
false
166
py
#!/home/alexie/Documents/chaty-final-master/venv/bin/python from django.core import management if __name__ == "__main__": management.execute_from_command_line()
[ "maniralexie@gmail.com" ]
maniralexie@gmail.com
73b8eea0e247cc2fb5986af3fd0beca8578749f2
f0d713996eb095bcdc701f3fab0a8110b8541cbb
/E9FwvGyad5CDbiH4C_9.py
d750b3eefe4c93d1c5db878cb337dcc042cf9e95
[]
no_license
daniel-reich/turbo-robot
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
a7a25c63097674c0a81675eed7e6b763785f1c41
refs/heads/main
2023-03-26T01:55:14.210264
2021-03-23T16:08:01
2021-03-23T16:08:01
350,773,815
0
0
null
null
null
null
UTF-8
Python
false
false
1,348
py
""" Create a function that takes a 2D array as an argument and returns the number of people whose view is blocked by a tall person. The concert stage is pointed towards the top of the 2D array and the tall person (represented by a 2) blocks the view of all the people (represented by a 1) behind them. ### Examples block([ [1, 1, 1, 1, 1], [1, 1, 1, 1, 1], [1, 1, 1, 1, 2], [1, 1, 1, 1, 1], [1, 1, 1, 1, 1] ]) ➞ 2 # The tall person blocks 2 people behind him thus # the function returns 2. block([ [1, 2, 1, 1], [1, 1, 1, 2], [1, 1, 1, 1], [1, 1, 1, 1], ]) ➞ 5 # There are 2 tall people that block everyone behind # them. The first tall person in the first row blocks 3 # people behind him while the second tall person in # the second row blocks 2 people behind him thus the # function returns 5. block([ [1, 1, 1, 1], [2, 1, 1, 2], [1, 1, 1, 1], [1, 1, 1, 1], ]) ➞ 4 ### Notes 1. There is only a maximum of 1 tall person in every column. 2. No view is blocked if the tall person is in the last row. """ def block(lst): total = 0 for x in range(len(lst[0])): for y in range(len(lst)-1, 0, -1): if lst[y][x] < lst[y-1][x]: total += len(lst) - y return total
[ "daniel.reich@danielreichs-MacBook-Pro.local" ]
daniel.reich@danielreichs-MacBook-Pro.local
5c354b1c02a7628a71303fea1386ada6e1ca5032
cb4b3f85237354ea385d1fa3447af08cab0bab60
/web/server/codechecker_server/api/mass_store_run.py
8f1ab79c2afd13e72d80f76189e833deadd6990e
[ "LLVM-exception", "Apache-2.0" ]
permissive
hanwoody/codechecker
dece3ac6068f7b8163e976819abe5458279fcc53
f3ba8fd9a5ebafbe95715a515ff271708c6e8446
refs/heads/master
2023-06-20T09:27:39.181804
2021-07-08T08:23:40
2021-07-08T08:23:40
null
0
0
null
null
null
null
UTF-8
Python
false
false
48,981
py
# ------------------------------------------------------------------------- # # Part of the CodeChecker project, under the Apache License v2.0 with # LLVM Exceptions. See LICENSE for license information. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception # # ------------------------------------------------------------------------- import base64 import os import sqlalchemy import tempfile import time import zipfile import zlib from collections import defaultdict from datetime import datetime from hashlib import sha256 from tempfile import TemporaryDirectory from typing import Any, Dict, List, NamedTuple, Optional, Set import codechecker_api_shared from codechecker_api.codeCheckerDBAccess_v6 import ttypes from codechecker_common import plist_parser, skiplist_handler, util from codechecker_common.logger import get_logger from codechecker_common.source_code_comment_handler import \ SourceCodeCommentHandler, SpellException, contains_codechecker_comment from codechecker_report_hash.hash import get_report_path_hash from ..database import db_cleanup from ..database.config_db_model import Product from ..database.database import DBSession from ..database.run_db_model import AnalysisInfo, AnalyzerStatistic, \ BugPathEvent, BugReportPoint, ExtendedReportData, File, FileContent, \ Report, Run, RunHistory, RunLock from ..metadata import checker_is_unavailable, get_analyzer_name, \ MetadataInfoParser from .report_server import ThriftRequestHandler from .thrift_enum_helper import report_extended_data_type_str LOG = get_logger('server') # FIXME: when these types are introduced we need to use those. SourceLineComments = List[Any] ReportType = Any MainSection = Dict class PathEvents(NamedTuple): paths: List[ttypes.BugPathPos] events: List[ttypes.BugPathEvent] extended_data: List[ttypes.ExtendedReportData] def unzip(b64zip: str, output_dir: str) -> int: """ This function unzips the base64 encoded zip file. This zip is extracted to a temporary directory and the ZIP is then deleted. The function returns the size of the extracted decompressed zip file. """ if len(b64zip) == 0: return 0 with tempfile.NamedTemporaryFile(suffix='.zip') as zip_file: LOG.debug("Unzipping mass storage ZIP '%s' to '%s'...", zip_file.name, output_dir) zip_file.write(zlib.decompress(base64.b64decode(b64zip))) with zipfile.ZipFile(zip_file, 'r', allowZip64=True) as zipf: try: zipf.extractall(output_dir) return os.stat(zip_file.name).st_size except Exception: LOG.error("Failed to extract received ZIP.") import traceback traceback.print_exc() raise return 0 def get_file_content(file_path: str) -> bytes: """Return the file content for the given filepath. """ with open(file_path, 'rb') as f: return f.read() def parse_codechecker_review_comment( source_file_name: str, report_line: int, checker_name: str ) -> SourceLineComments: """Parse the CodeChecker review comments from a source file at a given position. Returns an empty list if there are no comments. """ src_comment_data = [] with open(source_file_name, encoding='utf-8', errors='ignore') as f: if contains_codechecker_comment(f): sc_handler = SourceCodeCommentHandler() try: src_comment_data = sc_handler.filter_source_line_comments( f, report_line, checker_name) except SpellException as ex: LOG.warning("File %s contains %s", source_file_name, ex) return src_comment_data def collect_paths_events( report: ReportType, file_ids: Dict[str, int], files: Dict[str, str] ) -> PathEvents: """ This function creates the BugPathPos and BugPathEvent objects which belong to a report. report -- A report object from the parsed plist file. file_ids -- A dictionary which maps the file paths to file IDs in the database. files -- A list containing the file paths from the parsed plist file. The order of this list must be the same as in the plist file. #TODO Multiple ranges could belong to an event or control node. Only the first range from the list of ranges is stored into the database. Further improvement can be to store and view all ranges if there are more than one. """ path_events = PathEvents([], [], []) events = [i for i in report.bug_path if i.get('kind') == 'event'] # Create remaining data for bugs and send them to the server. In plist # file the source and target of the arrows are provided as starting and # ending ranges of the arrow. The path A->B->C is given as A->B and # B->C, thus range B is provided twice. So in the loop only target # points of the arrows are stored, and an extra insertion is done for # the source of the first arrow before the loop. report_path = [i for i in report.bug_path if i.get('kind') == 'control'] if report_path: start_range = report_path[0]['edges'][0]['start'] start1_line = start_range[0]['line'] start1_col = start_range[0]['col'] start2_line = start_range[1]['line'] start2_col = start_range[1]['col'] source_file_path = files[start_range[1]['file']] path_events.paths.append(ttypes.BugPathPos( start1_line, start1_col, start2_line, start2_col, file_ids[source_file_path])) for path in report_path: try: end_range = path['edges'][0]['end'] end1_line = end_range[0]['line'] end1_col = end_range[0]['col'] end2_line = end_range[1]['line'] end2_col = end_range[1]['col'] source_file_path = files[end_range[1]['file']] path_events.paths.append(ttypes.BugPathPos( end1_line, end1_col, end2_line, end2_col, file_ids[source_file_path])) except IndexError: # Edges might be empty nothing can be stored. continue for event in events: file_path = files[event['location']['file']] start_loc = event['location'] end_loc = event['location'] # Range can provide more precise location information. # Use that if available. ranges = event.get("ranges") if ranges: start_loc = ranges[0][0] end_loc = ranges[0][1] path_events.events.append(ttypes.BugPathEvent( start_loc['line'], start_loc['col'], end_loc['line'], end_loc['col'], event['message'], file_ids[file_path])) for macro in report.macro_expansions: if not macro['expansion']: continue file_path = files[macro['location']['file']] start_loc = macro['location'] end_loc = macro['location'] # Range can provide more precise location information. # Use that if available. ranges = macro.get("ranges") if ranges: start_loc = ranges[0][0] end_loc = ranges[0][1] path_events.extended_data.append(ttypes.ExtendedReportData( ttypes.ExtendedReportDataType.MACRO, start_loc['line'], start_loc['col'], end_loc['line'], end_loc['col'], macro['expansion'], file_ids[file_path])) for note in report.notes: if not note['message']: continue file_path = files[note['location']['file']] start_loc = note['location'] end_loc = note['location'] # Range can provide more precise location information. # Use that if available. ranges = note.get("ranges") if ranges: start_loc = ranges[0][0] end_loc = ranges[0][1] path_events.extended_data.append(ttypes.ExtendedReportData( ttypes.ExtendedReportDataType.NOTE, start_loc['line'], start_loc['col'], end_loc['line'], end_loc['col'], note['message'], file_ids[file_path])) return path_events def add_file_record( session: DBSession, file_path: str, content_hash: str ) -> Optional[int]: """ Add the necessary file record pointing to an already existing content. Returns the added file record id or None, if the content_hash is not found. This function must not be called between add_checker_run() and finish_checker_run() functions when SQLite database is used! add_checker_run() function opens a transaction which is closed by finish_checker_run() and since SQLite doesn't support parallel transactions, this API call will wait until the other transactions finish. In the meantime the run adding transaction times out. """ file_record = session.query(File) \ .filter(File.content_hash == content_hash, File.filepath == file_path) \ .one_or_none() if file_record: return file_record.id try: file_record = File(file_path, content_hash) session.add(file_record) session.commit() except sqlalchemy.exc.IntegrityError as ex: LOG.error(ex) # Other transaction might have added the same file in the # meantime. session.rollback() file_record = session.query(File) \ .filter(File.content_hash == content_hash, File.filepath == file_path).one_or_none() return file_record.id if file_record else None class MassStoreRun: def __init__( self, report_server: ThriftRequestHandler, name: str, tag: Optional[str], version: Optional[str], b64zip: str, force: bool, trim_path_prefixes: Optional[List[str]], description: Optional[str] ): """ Initialize object. """ self.__report_server = report_server self.__name = name self.__tag = tag self.__version = version self.__b64zip = b64zip self.__force = force self.__trim_path_prefixes = trim_path_prefixes self.__description = description self.__mips: Dict[str, MetadataInfoParser] = {} self.__analysis_info: Dict[str, AnalysisInfo] = {} self.__duration: int = 0 self.__wrong_src_code_comments: List[str] = [] self.__already_added_report_hashes: Set[str] = set() self.__new_report_hashes: Set[str] = set() self.__all_report_checkers: Set[str] = set() @property def __manager(self): return self.__report_server._manager @property def __Session(self): return self.__report_server._Session @property def __config_database(self): return self.__report_server._config_database @property def __product(self): return self.__report_server._product @property def __context(self): return self.__report_server._context @property def user_name(self): return self.__report_server._get_username() def __check_run_limit(self): """ Checks the maximum allowed of uploadable runs for the current product. """ max_run_count = self.__manager.get_max_run_count() with DBSession(self.__config_database) as session: product = session.query(Product).get(self.__product.id) if product.run_limit: max_run_count = product.run_limit # Session that handles constraints on the run. with DBSession(self.__Session) as session: if not max_run_count: return LOG.debug("Check the maximum number of allowed runs which is %d", max_run_count) run = session.query(Run) \ .filter(Run.name == self.__name) \ .one_or_none() # If max_run_count is not set in the config file, it will allow # the user to upload unlimited runs. run_count = session.query(Run.id).count() # If we are not updating a run or the run count is reached the # limit it will throw an exception. if not run and run_count >= max_run_count: remove_run_count = run_count - max_run_count + 1 raise codechecker_api_shared.ttypes.RequestFailed( codechecker_api_shared.ttypes.ErrorCode.GENERAL, f"You reached the maximum number of allowed runs " f"({run_count}/{max_run_count})! Please remove at least " f"{remove_run_count} run(s) before you try it again.") def __store_run_lock(self, session: DBSession): """ Store a RunLock record for the given run name into the database. """ try: # If the run can be stored, we need to lock it first. If there is # already a lock in the database for the given run name which is # expired and multiple processes are trying to get this entry from # the database for update we may get the following exception: # could not obtain lock on row in relation "run_locks" # This is the reason why we have to wrap this query to a try/except # block. run_lock = session.query(RunLock) \ .filter(RunLock.name == self.__name) \ .with_for_update(nowait=True).one_or_none() except (sqlalchemy.exc.OperationalError, sqlalchemy.exc.ProgrammingError) as ex: LOG.error("Failed to get run lock for '%s': %s", self.__name, ex) raise codechecker_api_shared.ttypes.RequestFailed( codechecker_api_shared.ttypes.ErrorCode.DATABASE, "Someone is already storing to the same run. Please wait " "while the other storage is finished and try it again.") if not run_lock: # If there is no lock record for the given run name, the run # is not locked -- create a new lock. run_lock = RunLock(self.__name, self.user_name) session.add(run_lock) elif run_lock.has_expired( db_cleanup.RUN_LOCK_TIMEOUT_IN_DATABASE): # There can be a lock in the database, which has already # expired. In this case, we assume that the previous operation # has failed, and thus, we can re-use the already present lock. run_lock.touch() run_lock.username = self.user_name else: # In case the lock exists and it has not expired, we must # consider the run a locked one. when = run_lock.when_expires( db_cleanup.RUN_LOCK_TIMEOUT_IN_DATABASE) username = run_lock.username if run_lock.username is not None \ else "another user" LOG.info("Refusing to store into run '%s' as it is locked by " "%s. Lock will expire at '%s'.", self.__name, username, when) raise codechecker_api_shared.ttypes.RequestFailed( codechecker_api_shared.ttypes.ErrorCode.DATABASE, "The run named '{0}' is being stored into by {1}. If the " "other store operation has failed, this lock will expire " "at '{2}'.".format(self.__name, username, when)) # At any rate, if the lock has been created or updated, commit it # into the database. try: session.commit() except (sqlalchemy.exc.IntegrityError, sqlalchemy.orm.exc.StaleDataError): # The commit of this lock can fail. # # In case two store ops attempt to lock the same run name at the # same time, committing the lock in the transaction that commits # later will result in an IntegrityError due to the primary key # constraint. # # In case two store ops attempt to lock the same run name with # reuse and one of the operation hangs long enough before COMMIT # so that the other operation commits and thus removes the lock # record, StaleDataError is raised. In this case, also consider # the run locked, as the data changed while the transaction was # waiting, as another run wholly completed. LOG.info("Run '%s' got locked while current transaction " "tried to acquire a lock. Considering run as locked.", self.__name) raise codechecker_api_shared.ttypes.RequestFailed( codechecker_api_shared.ttypes.ErrorCode.DATABASE, "The run named '{0}' is being stored into by another " "user.".format(self.__name)) def __free_run_lock(self, session: DBSession): """ Remove the lock from the database for the given run name. """ # Using with_for_update() here so the database (in case it supports # this operation) locks the lock record's row from any other access. run_lock = session.query(RunLock) \ .filter(RunLock.name == self.__name) \ .with_for_update(nowait=True).one() session.delete(run_lock) session.commit() def __store_source_files( self, source_root: str, filename_to_hash: Dict[str, str] ) -> Dict[str, int]: """ Storing file contents from plist. """ file_path_to_id = {} for file_name, file_hash in filename_to_hash.items(): source_file_name = os.path.join(source_root, file_name.strip("/")) source_file_name = os.path.realpath(source_file_name) LOG.debug("Storing source file: %s", source_file_name) trimmed_file_path = util.trim_path_prefixes( file_name, self.__trim_path_prefixes) if not os.path.isfile(source_file_name): # The file was not in the ZIP file, because we already # have the content. Let's check if we already have a file # record in the database or we need to add one. LOG.debug('%s not found or already stored.', trimmed_file_path) with DBSession(self.__Session) as session: fid = add_file_record( session, trimmed_file_path, file_hash) if not fid: LOG.error("File ID for %s is not found in the DB with " "content hash %s. Missing from ZIP?", source_file_name, file_hash) file_path_to_id[trimmed_file_path] = fid LOG.debug("%d fileid found", fid) continue with DBSession(self.__Session) as session: file_path_to_id[trimmed_file_path] = self.__add_file_content( session, trimmed_file_path, source_file_name, file_hash) return file_path_to_id def __add_file_content( self, session: DBSession, file_path: str, source_file_name: str, content_hash: str ) -> int: """ Add the necessary file contents. If the file is already stored in the database then its ID returns. If content_hash in None then this function calculates the content hash. Or if is available at the caller and is provided then it will not be calculated again. This function must not be called between add_checker_run() and finish_checker_run() functions when SQLite database is used! add_checker_run() function opens a transaction which is closed by finish_checker_run() and since SQLite doesn't support parallel transactions, this API call will wait until the other transactions finish. In the meantime the run adding transaction times out. """ source_file_content = None if not content_hash: source_file_content = get_file_content(source_file_name) hasher = sha256() hasher.update(source_file_content) content_hash = hasher.hexdigest() file_content = session.query(FileContent).get(content_hash) if not file_content: if not source_file_content: source_file_content = get_file_content(source_file_name) try: compressed_content = zlib.compress(source_file_content, zlib.Z_BEST_COMPRESSION) fc = FileContent(content_hash, compressed_content) session.add(fc) session.commit() except sqlalchemy.exc.IntegrityError: # Other transaction moght have added the same content in # the meantime. session.rollback() file_record = session.query(File) \ .filter(File.content_hash == content_hash, File.filepath == file_path) \ .one_or_none() if not file_record: try: file_record = File(file_path, content_hash) session.add(file_record) session.commit() except sqlalchemy.exc.IntegrityError as ex: LOG.error(ex) # Other transaction might have added the same file in the # meantime. session.rollback() file_record = session.query(File) \ .filter(File.content_hash == content_hash, File.filepath == file_path) \ .one_or_none() return file_record.id def __store_analysis_statistics( self, session: DBSession, run_history_id: int ): """ Store analysis statistics for the given run history. It will unique the statistics for each analyzer type based on the metadata information. """ stats = defaultdict(lambda: { "versions": set(), "failed_sources": set(), "successful_sources": set(), "successful": 0 }) for mip in self.__mips.values(): self.__duration += int(sum(mip.check_durations)) for analyzer_type, res in mip.analyzer_statistics.items(): if "version" in res: stats[analyzer_type]["versions"].add(res["version"]) if "failed_sources" in res: if self.__version == '6.9.0': stats[analyzer_type]["failed_sources"].add( 'Unavailable in CodeChecker 6.9.0!') else: stats[analyzer_type]["failed_sources"].update( res["failed_sources"]) if "successful_sources" in res: stats[analyzer_type]["successful_sources"].update( res["successful_sources"]) if "successful" in res: stats[analyzer_type]["successful"] += res["successful"] for analyzer_type, stat in stats.items(): analyzer_version = None if stat["versions"]: analyzer_version = zlib.compress( "; ".join(stat["versions"]).encode('utf-8'), zlib.Z_BEST_COMPRESSION) failed = 0 compressed_files = None if stat["failed_sources"]: compressed_files = zlib.compress( '\n'.join(stat["failed_sources"]).encode('utf-8'), zlib.Z_BEST_COMPRESSION) failed = len(stat["failed_sources"]) successful = len(stat["successful_sources"]) \ if stat["successful_sources"] else stat["successful"] analyzer_statistics = AnalyzerStatistic( run_history_id, analyzer_type, analyzer_version, successful, failed, compressed_files) session.add(analyzer_statistics) def __store_analysis_info( self, session: DBSession, run_history: RunHistory ): """ Store analysis info for the given run history. """ for src_dir_path, mip in self.__mips.items(): for analyzer_command in mip.check_commands: cmd = zlib.compress( analyzer_command.encode("utf-8"), zlib.Z_BEST_COMPRESSION) analysis_info_rows = session \ .query(AnalysisInfo) \ .filter(AnalysisInfo.analyzer_command == cmd) \ .all() if analysis_info_rows: # It is possible when multiple runs are stored # simultaneously to the server with the same analysis # command that multiple entries are stored into the # database. In this case we will select the first one. analysis_info = analysis_info_rows[0] else: analysis_info = AnalysisInfo(analyzer_command=cmd) session.add(analysis_info) run_history.analysis_info.append(analysis_info) self.__analysis_info[src_dir_path] = analysis_info def __add_checker_run( self, session: DBSession, run_history_time: datetime ) -> int: """ Store run related data to the database. By default updates the results if name already exists. Using the force flag removes existing analysis results for a run. """ try: LOG.debug("Adding run '%s'...", self.__name) run = session.query(Run) \ .filter(Run.name == self.__name) \ .one_or_none() if run and self.__force: # Clean already collected results. if not run.can_delete: # Deletion is already in progress. msg = f"Can't delete {run.id}" LOG.debug(msg) raise codechecker_api_shared.ttypes.RequestFailed( codechecker_api_shared.ttypes.ErrorCode.DATABASE, msg) LOG.info('Removing previous analysis results...') session.delete(run) # Not flushing after delete leads to a constraint violation # error later, when adding run entity with the same name as # the old one. session.flush() checker_run = Run(self.__name, self.__version) session.add(checker_run) session.flush() run_id = checker_run.id elif run: # There is already a run, update the results. run.date = datetime.now() run.duration = -1 session.flush() run_id = run.id else: # There is no run create new. checker_run = Run(self.__name, self.__version) session.add(checker_run) session.flush() run_id = checker_run.id # Add run to the history. LOG.debug("Adding run history.") if self.__tag is not None: run_history = session.query(RunHistory) \ .filter(RunHistory.run_id == run_id, RunHistory.version_tag == self.__tag) \ .one_or_none() if run_history: run_history.version_tag = None session.add(run_history) cc_versions = set() for mip in self.__mips.values(): if mip.cc_version: cc_versions.add(mip.cc_version) cc_version = '; '.join(cc_versions) if cc_versions else None run_history = RunHistory( run_id, self.__tag, self.user_name, run_history_time, cc_version, self.__description) session.add(run_history) session.flush() LOG.debug("Adding run done.") self.__store_analysis_statistics(session, run_history.id) self.__store_analysis_info(session, run_history) session.flush() LOG.debug("Storing analysis statistics done.") return run_id except Exception as ex: raise codechecker_api_shared.ttypes.RequestFailed( codechecker_api_shared.ttypes.ErrorCode.GENERAL, str(ex)) def __add_report( self, session: DBSession, run_id: int, file_id: int, main_section: MainSection, path_events: PathEvents, detection_status: str, detection_time: datetime, analysis_info: AnalysisInfo, analyzer_name: Optional[str] = None ) -> int: """ Add report to the database. """ def store_bug_events(report_id: int): """ Add bug path events. """ for i, event in enumerate(path_events.events): bpe = BugPathEvent( event.startLine, event.startCol, event.endLine, event.endCol, i, event.msg, event.fileId, report_id) session.add(bpe) def store_bug_path(report_id: int): """ Add bug path points. """ for i, piece in enumerate(path_events.paths): brp = BugReportPoint( piece.startLine, piece.startCol, piece.endLine, piece.endCol, i, piece.fileId, report_id) session.add(brp) def store_extended_bug_data(report_id: int): """ Add extended bug data objects to the database session. """ for data in path_events.extended_data: data_type = report_extended_data_type_str(data.type) red = ExtendedReportData( data.startLine, data.startCol, data.endLine, data.endCol, data.message, data.fileId, report_id, data_type) session.add(red) try: checker_name = main_section['check_name'] severity_name = self.__context.severity_map.get(checker_name) severity = ttypes.Severity._NAMES_TO_VALUES[severity_name] report = Report( run_id, main_section['issue_hash_content_of_line_in_context'], file_id, main_section['description'], checker_name or 'NOT FOUND', main_section['category'], main_section['type'], main_section['location']['line'], main_section['location']['col'], severity, detection_status, detection_time, len(path_events.events), analyzer_name) session.add(report) session.flush() LOG.debug("storing bug path") store_bug_path(report.id) LOG.debug("storing events") store_bug_events(report.id) LOG.debug("storing extended report data") store_extended_bug_data(report.id) if analysis_info: report.analysis_info.append(analysis_info) return report.id except Exception as ex: raise codechecker_api_shared.ttypes.RequestFailed( codechecker_api_shared.ttypes.ErrorCode.GENERAL, str(ex)) def __process_report_file( self, report_file_path: str, session: DBSession, source_root: str, run_id: int, file_path_to_id: Dict[str, int], run_history_time: datetime, skip_handler: Optional[skiplist_handler.SkipListHandler], hash_map_reports: Dict[str, List[Any]] ) -> bool: """ Process and save reports from the given report file to the database. """ try: files, reports = plist_parser.parse_plist_file(report_file_path) except Exception as ex: LOG.warning('Parsing the plist failed: %s', str(ex)) return False if not reports: return True trimmed_files = {} file_ids = {} missing_ids_for_files = [] for k, v in files.items(): trimmed_files[k] = \ util.trim_path_prefixes(v, self.__trim_path_prefixes) for file_name in trimmed_files.values(): file_id = file_path_to_id.get(file_name, -1) if file_id == -1: missing_ids_for_files.append(file_name) continue file_ids[file_name] = file_id if missing_ids_for_files: LOG.warning("Failed to get file path id for '%s'!", ' '.join(missing_ids_for_files)) return False def set_review_status(report: ReportType): """ Set review status for the given report if there is any source code comment. """ checker_name = report.main['check_name'] last_report_event = report.bug_path[-1] # The original file path is needed here not the trimmed # because the source files are extracted as the original # file path. file_name = files[last_report_event['location']['file']] source_file_name = os.path.realpath( os.path.join(source_root, file_name.strip("/"))) # Check and store source code comments. if not os.path.isfile(source_file_name): return report_line = last_report_event['location']['line'] source_file = os.path.basename(file_name) src_comment_data = parse_codechecker_review_comment( source_file_name, report_line, checker_name) if len(src_comment_data) == 1: status = src_comment_data[0]['status'] rw_status = ttypes.ReviewStatus.FALSE_POSITIVE if status == 'confirmed': rw_status = ttypes.ReviewStatus.CONFIRMED elif status == 'intentional': rw_status = ttypes.ReviewStatus.INTENTIONAL self.__report_server._setReviewStatus( session, report.report_hash, rw_status, src_comment_data[0]['message'], run_history_time) elif len(src_comment_data) > 1: LOG.warning( "Multiple source code comment can be found " "for '%s' checker in '%s' at line %s. " "This bug will not be suppressed!", checker_name, source_file, report_line) self.__wrong_src_code_comments.append( f"{source_file}|{report_line}|{checker_name}") root_dir_path = os.path.dirname(report_file_path) mip = self.__mips[root_dir_path] analysis_info = self.__analysis_info.get(root_dir_path) for report in reports: self.__all_report_checkers.add(report.check_name) if skip_handler and skip_handler.should_skip(report.file_path): continue report.trim_path_prefixes(self.__trim_path_prefixes) report_path_hash = get_report_path_hash(report) if report_path_hash in self.__already_added_report_hashes: LOG.debug('Not storing report. Already added: %s', report) continue LOG.debug("Storing report to the database...") bug_id = report.report_hash detection_status = 'new' detected_at = run_history_time if bug_id in hash_map_reports: old_report = hash_map_reports[bug_id][0] old_status = old_report.detection_status detection_status = 'reopened' \ if old_status == 'resolved' else 'unresolved' detected_at = old_report.detected_at analyzer_name = get_analyzer_name( report.check_name, mip.checker_to_analyzer, report.metadata) path_events = collect_paths_events(report, file_ids, trimmed_files) report_id = self.__add_report( session, run_id, file_ids[report.file_path], report.main, path_events, detection_status, detected_at, analysis_info, analyzer_name) self.__new_report_hashes.add(bug_id) self.__already_added_report_hashes.add(report_path_hash) set_review_status(report) LOG.debug("Storing report done. ID=%d", report_id) return True def __store_reports( self, session: DBSession, report_dir: str, source_root: str, run_id: int, file_path_to_id: Dict[str, int], run_history_time: datetime ): """ Parse up and store the plist report files. """ def get_skip_handler( report_dir: str ) -> Optional[skiplist_handler.SkipListHandler]: """ Get a skip list handler based on the given report directory.""" skip_file_path = os.path.join(report_dir, 'skip_file') if not os.path.exists(skip_file_path): return LOG.debug("Pocessing skip file %s", skip_file_path) try: with open(skip_file_path, encoding="utf-8", errors="ignore") as f: skip_content = f.read() LOG.debug(skip_content) return skiplist_handler.SkipListHandler(skip_content) except (IOError, OSError) as err: LOG.warning("Failed to open skip file: %s", err) # Reset internal data. self.__already_added_report_hashes = set() self.__new_report_hashes = set() self.__all_report_checkers = set() all_reports = session.query(Report) \ .filter(Report.run_id == run_id) \ .all() hash_map_reports = defaultdict(list) for report in all_reports: hash_map_reports[report.bug_id].append(report) enabled_checkers: Set[str] = set() disabled_checkers: Set[str] = set() # Processing PList files. for root_dir_path, _, report_file_paths in os.walk(report_dir): LOG.debug("Get reports from '%s' directory", root_dir_path) skip_handler = get_skip_handler(root_dir_path) mip = self.__mips[root_dir_path] enabled_checkers.update(mip.enabled_checkers) disabled_checkers.update(mip.disabled_checkers) for f in report_file_paths: if not f.endswith('.plist'): continue LOG.debug("Parsing input file '%s'", f) report_file_path = os.path.join(root_dir_path, f) self.__process_report_file( report_file_path, session, source_root, run_id, file_path_to_id, run_history_time, skip_handler, hash_map_reports) # If a checker was found in a plist file it can not be disabled so we # will add this to the enabled checkers list and remove this checker # from the disabled checkers list. # Also if multiple report directories are stored and a checker was # enabled in one report directory but it was disabled in another # directory we will mark this checker as enabled. enabled_checkers |= self.__all_report_checkers disabled_checkers -= self.__all_report_checkers reports_to_delete = set() for bug_hash, reports in hash_map_reports.items(): if bug_hash in self.__new_report_hashes: reports_to_delete.update([x.id for x in reports]) else: for report in reports: # We set the fix date of a report only if the report # has not been fixed before. if report.fixed_at: continue checker = report.checker_id if checker in disabled_checkers: report.detection_status = 'off' elif checker_is_unavailable(checker, enabled_checkers): report.detection_status = 'unavailable' else: report.detection_status = 'resolved' report.fixed_at = run_history_time if reports_to_delete: self.__report_server._removeReports( session, list(reports_to_delete)) def finish_checker_run( self, session: DBSession, run_id: int ) -> bool: """ Finish the storage of the given run. """ try: LOG.debug("Finishing checker run") run = session.query(Run).get(run_id) if not run: return False run.mark_finished() run.duration = self.__duration return True except Exception as ex: LOG.error(ex) return False def store(self) -> int: """ Store run results to the server. """ start_time = time.time() # Check constraints of the run. self.__check_run_limit() with DBSession(self.__Session) as session: self.__store_run_lock(session) try: with TemporaryDirectory( dir=self.__context.codechecker_workspace ) as zip_dir: LOG.info("[%s] Unzip storage file...", self.__name) zip_size = unzip(self.__b64zip, zip_dir) LOG.info("[%s] Unzip storage file done.", self.__name) if zip_size == 0: raise codechecker_api_shared.ttypes.RequestFailed( codechecker_api_shared.ttypes. ErrorCode.GENERAL, "The received zip file content is empty!") LOG.debug("Using unzipped folder '%s'", zip_dir) source_root = os.path.join(zip_dir, 'root') report_dir = os.path.join(zip_dir, 'reports') content_hash_file = os.path.join( zip_dir, 'content_hashes.json') filename_to_hash = \ util.load_json_or_empty(content_hash_file, {}) LOG.info("[%s] Store source files...", self.__name) file_path_to_id = self.__store_source_files( source_root, filename_to_hash) LOG.info("[%s] Store source files done.", self.__name) run_history_time = datetime.now() # Parse all metadata information from the report directory. for root_dir_path, _, _ in os.walk(report_dir): metadata_file_path = os.path.join( root_dir_path, 'metadata.json') self.__mips[root_dir_path] = \ MetadataInfoParser(metadata_file_path) # When we use multiple server instances and we try to run # multiple storage to each server which contain at least two # reports which have the same report hash and have source code # comments it is possible that the following exception will be # thrown: (psycopg2.extensions.TransactionRollbackError) # deadlock detected. # The problem is that the report hash is the key for the # review data table and both of the store actions try to # update the same review data row. # Neither of the two processes can continue, and they will wait # for each other indefinitely. PostgreSQL in this case will # terminate one transaction with the above exception. # For this reason in case of failure we will wait some seconds # and try to run the storage again. # For more information see #2655 and #2653 issues on github. max_num_of_tries = 3 num_of_tries = 0 sec_to_wait_after_failure = 60 while True: try: # This session's transaction buffer stores the actual # run data into the database. with DBSession(self.__Session) as session: # Load the lock record for "FOR UPDATE" so that the # transaction that handles the run's store # operations has a lock on the database row itself. run_lock = session.query(RunLock) \ .filter(RunLock.name == self.__name) \ .with_for_update(nowait=True).one() # Do not remove this seemingly dummy print, we need # to make sure that the execution of the SQL # statement is not optimised away and the fetched # row is not garbage collected. LOG.debug("Storing into run '%s' locked at '%s'.", self.__name, run_lock.locked_at) # Actual store operation begins here. run_id = self.__add_checker_run( session, run_history_time) LOG.info("[%s] Store reports...", self.__name) self.__store_reports( session, report_dir, source_root, run_id, file_path_to_id, run_history_time) LOG.info("[%s] Store reports done.", self.__name) self.finish_checker_run(session, run_id) session.commit() LOG.info("'%s' stored results (%s KB " "/decompressed/) to run '%s' (id: %d) in " "%s seconds.", self.user_name, round(zip_size / 1024), self.__name, run_id, round(time.time() - start_time, 2)) return run_id except (sqlalchemy.exc.OperationalError, sqlalchemy.exc.ProgrammingError) as ex: num_of_tries += 1 if num_of_tries == max_num_of_tries: raise codechecker_api_shared.ttypes.RequestFailed( codechecker_api_shared.ttypes. ErrorCode.DATABASE, "Storing reports to the database failed: " "{0}".format(ex)) LOG.error("Storing reports of '%s' run failed: " "%s.\nWaiting %d sec before trying to store " "it again!", self.__name, ex, sec_to_wait_after_failure) time.sleep(sec_to_wait_after_failure) sec_to_wait_after_failure *= 2 except Exception as ex: LOG.error("Failed to store results: %s", ex) import traceback traceback.print_exc() raise finally: # In any case if the "try" block's execution began, a run lock must # exist, which can now be removed, as storage either completed # successfully, or failed in a detectable manner. # (If the failure is undetectable, the coded grace period expiry # of the lock will allow further store operations to the given # run name.) with DBSession(self.__Session) as session: self.__free_run_lock(session) if self.__wrong_src_code_comments: raise codechecker_api_shared.ttypes.RequestFailed( codechecker_api_shared.ttypes.ErrorCode.SOURCE_FILE, "Multiple source code comment can be found with the same " "checker name for same bug!", self.__wrong_src_code_comments)
[ "csordasmarton92@gmail.com" ]
csordasmarton92@gmail.com
53b09cda44362a8837373232a16c18428dcb871d
ca7aa979e7059467e158830b76673f5b77a0f5a3
/Python_codes/p02585/s497439083.py
c64c92fb3dffd1704057e5332cba0a7d2217f5e0
[]
no_license
Aasthaengg/IBMdataset
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
refs/heads/main
2023-04-22T10:22:44.763102
2021-05-13T17:27:22
2021-05-13T17:27:22
367,112,348
0
0
null
null
null
null
UTF-8
Python
false
false
1,416
py
n,k = map(int,input().split()) P = list(map(int,input().split())) C = list(map(int,input().split())) g = [[0]*(n) for _ in range(n)] A = [n]*n # for i in range(n): # tmp = 0 # idx = i # cnt = 0 # set_ =set() # while cnt<n: # if C[idx] not in set_: # tmp += C[idx] # set_.add(C[idx]) # g[i][cnt] = tmp # idx = P[idx]-1 # cnt += 1 # else: # p = len(set_) # A[i] = p # break ans = -float('inf') for i in range(n): S = [] idx = P[i]-1 S.append(C[idx]) while idx != i: idx = P[idx]-1 S.append(S[-1] +C[idx]) v,w = k//len(S),k%len(S) if k<=len(S): val = max(S[:k]) elif S[-1]<=0: val = max(S) else: val1 = S[-1] *(v-1) val1 += max(S) val2 = S[-1]*v if w!=0: val2 += max(0,max(S[:w])) val = max(val1,val2) ans = max(ans,val) # for i in range(n): # v,w = k//A[i],k%A[i] # if A[i]<k: # if g[i][A[i]-1]<=0: # val = max(g[i][:A[i]]) # else: # val1 = (v-1)*g[i][A[i]-1] # val1 += max(g[i][:A[i]]) # val2 = v*g[i][A[i]-1] # if w!=0: # val2 += max(0,max(g[i][:w])) # val = max(val1,val2) # else: # val = max(g[i][:k]) # ans = max(ans,val) print(ans)
[ "66529651+Aastha2104@users.noreply.github.com" ]
66529651+Aastha2104@users.noreply.github.com
9290a1f679623cb6793f2eaef635da4b4689e597
6fce025097cebfd9d1dd37f6611e7fdfdbea90e6
/rainfields/model_conv.py
8a22d603ee800aed0a84aee26d7728f6111a7b66
[]
no_license
ANU-WALD/pluvi_pondus
ec0439d19acdcf4fdf712d6b14a1714297d661b2
ff8680f7115ab2cb75138bf6705abb59618e47d1
refs/heads/master
2021-07-01T14:32:14.501631
2020-08-22T09:41:28
2020-08-22T09:41:28
138,804,652
0
0
null
null
null
null
UTF-8
Python
false
false
2,287
py
from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Conv2D, Flatten from tensorflow.keras.optimizers import Adam, SGD import numpy as np x = np.load("x_conv.npy")[:10000000] print(x.shape) y = np.load("y_conv.npy")[:10000000,None] print(y.shape) prec_mask = np.nonzero(y>0) print(prec_mask) print(len(prec_mask)) #print(prec_mask.shape) #print(prec_mask[0]) print(prec_mask[0].shape) x_prec = x[prec_mask[0], :] y_prec = y[prec_mask[0], :] print(x_prec.shape, y_prec.shape) zero_mask = np.nonzero(y==0) x_dry = x[zero_mask[0], :] y_dry = y[zero_mask[0], :] print(x_dry.shape, y_dry.shape) idxs = np.arange(x_dry.shape[0]) np.random.seed(0) np.random.shuffle(idxs) n = x_prec.shape[0] * 2 x_dry = x_dry[idxs[:n],:] y_dry = y_dry[idxs[:n],:] print(x_dry.shape, y_dry.shape) x = np.concatenate((x_prec, x_dry), axis=0) y = np.concatenate((y_prec, y_dry), axis=0) print(x.shape, y.shape) idxs = np.arange(x.shape[0]) np.random.shuffle(idxs) x = x[idxs,:] x = np.reshape(x, (x.shape[0], -1)) y = y[idxs,:] print(x.shape, y.shape) model= Sequential() model.add(Dense(100, activation='relu', input_dim=100)) model.add(Dense(200, activation='relu')) model.add(Dense(400, activation='relu')) model.add(Dense(100, activation='relu')) model.add(Dense(50, activation='relu')) model.add(Dense(1, activation='relu')) """ model= Sequential() model.add(Conv2D(16, kernel_size=3, activation='relu', padding='same', input_shape=(5,5,4))) model.add(Conv2D(32, kernel_size=3, activation='relu', padding='same')) model.add(Conv2D(64, kernel_size=3, activation='relu', padding='same')) model.add(Conv2D(32, kernel_size=3, activation='relu', padding='same')) model.add(Flatten()) model.add(Dense(1, activation='relu')) """ x_train = x[:175000,:] x_test = x[175000:,:] y_train = y[:175000,:] y_test = y[175000:,:] print(y_train.shape, y_test.shape) print(np.square(y_train).mean(axis=0)) print(np.square(y_test).mean(axis=0)) print(np.abs(y_train).mean(axis=0)) print(np.abs(y_test).mean(axis=0)) #classifier.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) model.compile(optimizer=Adam(lr=0.000001), loss='mse', metrics=['mae', 'mse']) model.fit(x_train, y_train, batch_size=32, nb_epoch=10, validation_data=(x_test, y_test))
[ "pablo.larraondo@anu.edu.au" ]
pablo.larraondo@anu.edu.au
8fb462ddad89edc7fbb7b5e557a717ad88b70e9c
8357c11dc5199ac3d7f688befef2318f4bad0605
/objectoriented/Sunil AI game/Part1.py
e3cb76f2651331e7ed10da02e9a08ae0ac444a70
[]
no_license
bibash28/python_projects
d92bd8b49dd75417233424dab5ea39bfc8dcf470
049795771ec876052b3fd6967c0d7575d6348c98
refs/heads/main
2023-05-16T17:36:28.780358
2021-06-09T03:40:14
2021-06-09T03:40:14
373,839,559
0
0
null
null
null
null
UTF-8
Python
false
false
5,745
py
import math #______________________________________________________________________________ # Missionaries and Cannibals Problem class State(): def __init__(self, cannibalLeft, missionaryLeft, boat, cannibalRight, missionaryRight): self.cannibalLeft = cannibalLeft self.missionaryLeft = missionaryLeft self.boat = boat self.cannibalRight = cannibalRight self.missionaryRight = missionaryRight self.parent = None def is_goal(self): if self.cannibalLeft == 0 and self.missionaryLeft == 0: return True else: return False def is_valid(self): if self.missionaryLeft >= 0 and self.missionaryRight >= 0 \ and self.cannibalLeft >= 0 and self.cannibalRight >= 0 \ and (self.missionaryLeft == 0 or self.missionaryLeft >= self.cannibalLeft) \ and (self.missionaryRight == 0 or self.missionaryRight >= self.cannibalRight): return True else: return False def __eq__(self, other): return self.cannibalLeft == other.cannibalLeft and self.missionaryLeft == other.missionaryLeft \ and self.boat == other.boat and self.cannibalRight == other.cannibalRight \ and self.missionaryRight == other.missionaryRight def __hash__(self): return hash((self.cannibalLeft, self.missionaryLeft, self.boat, self.cannibalRight, self.missionaryRight)) def successors(cur_state): children = []; if cur_state.boat == 'left': new_state = State(cur_state.cannibalLeft, cur_state.missionaryLeft - 2, 'right', cur_state.cannibalRight, cur_state.missionaryRight + 2) ## Two missionaries cross left to right. if new_state.is_valid(): new_state.parent = cur_state children.append(new_state) new_state = State(cur_state.cannibalLeft - 2, cur_state.missionaryLeft, 'right', cur_state.cannibalRight + 2, cur_state.missionaryRight) ## Two cannibals cross left to right. if new_state.is_valid(): new_state.parent = cur_state children.append(new_state) new_state = State(cur_state.cannibalLeft - 1, cur_state.missionaryLeft - 1, 'right', cur_state.cannibalRight + 1, cur_state.missionaryRight + 1) ## One missionary and one cannibal cross left to right. if new_state.is_valid(): new_state.parent = cur_state children.append(new_state) new_state = State(cur_state.cannibalLeft, cur_state.missionaryLeft - 1, 'right', cur_state.cannibalRight, cur_state.missionaryRight + 1) ## One missionary crosses left to right. if new_state.is_valid(): new_state.parent = cur_state children.append(new_state) new_state = State(cur_state.cannibalLeft - 1, cur_state.missionaryLeft, 'right', cur_state.cannibalRight + 1, cur_state.missionaryRight) ## One cannibal crosses left to right. if new_state.is_valid(): new_state.parent = cur_state children.append(new_state) else: new_state = State(cur_state.cannibalLeft, cur_state.missionaryLeft + 2, 'left', cur_state.cannibalRight, cur_state.missionaryRight - 2) ## Two missionaries cross right to left. if new_state.is_valid(): new_state.parent = cur_state children.append(new_state) new_state = State(cur_state.cannibalLeft + 2, cur_state.missionaryLeft, 'left', cur_state.cannibalRight - 2, cur_state.missionaryRight) ## Two cannibals cross right to left. if new_state.is_valid(): new_state.parent = cur_state children.append(new_state) new_state = State(cur_state.cannibalLeft + 1, cur_state.missionaryLeft + 1, 'left', cur_state.cannibalRight - 1, cur_state.missionaryRight - 1) ## One missionary and one cannibal cross right to left. if new_state.is_valid(): new_state.parent = cur_state children.append(new_state) new_state = State(cur_state.cannibalLeft, cur_state.missionaryLeft + 1, 'left', cur_state.cannibalRight, cur_state.missionaryRight - 1) ## One missionary crosses right to left. if new_state.is_valid(): new_state.parent = cur_state children.append(new_state) new_state = State(cur_state.cannibalLeft + 1, cur_state.missionaryLeft, 'left', cur_state.cannibalRight - 1, cur_state.missionaryRight) ## One cannibal crosses right to left. if new_state.is_valid(): new_state.parent = cur_state children.append(new_state) return children def breadth_first_search(): initial_state = State(3,3,'left',0,0) if initial_state.is_goal(): return initial_state frontier = list() explored = set() frontier.append(initial_state) while frontier: state = frontier.pop(0) if state.is_goal(): return state explored.add(state) children = successors(state) print(children[0]) for child in children: if (child not in explored) or (child not in frontier): frontier.append(child) return None def print_solution(solution): path = [] path.append(solution) parent = solution.parent while parent: path.append(parent) parent = parent.parent for t in range(len(path)): state = path[len(path) - t - 1] print ("(" + str(state.cannibalLeft) + "," + str(state.missionaryLeft) \ + "," + state.boat + "," + str(state.cannibalRight) + "," + \ str(state.missionaryRight) + ")") def main(): solution = breadth_first_search() print ("Missionaries and Cannibals solution:") print ("(cannibalLeft,missionaryLeft,boat,cannibalRight,missionaryRight)") print_solution(solution) # if called from the command line, call main() if __name__ == "__main__": main()
[ "bibashshrestha@gmail.com" ]
bibashshrestha@gmail.com
7585bda41bed0e298beec5495edf905f2f6233f0
b30dec6c5792458c7e435d36c2144196c717a420
/product/models.py
43e1cfa1c440363749017c67a90aeb1314811156
[]
no_license
RowdyKGZ/e-shop
09e8e938b82ae074e29066362e4753d37f4c574b
98e3d0d10d3585dae10c2906fe2212b299c76d66
refs/heads/master
2023-02-19T08:53:11.243969
2021-01-21T14:19:33
2021-01-21T14:19:33
331,340,387
0
0
null
null
null
null
UTF-8
Python
false
false
2,008
py
import uuid from time import time from django.contrib.auth import get_user_model from django.db import models from pytils.translit import slugify def gen_slug(s): slug = slugify(s) return slug + '-' + str(int(time())) class Category(models.Model): name = models.CharField(max_length=255, unique=True) slug = models.SlugField(max_length=150, unique=True, primary_key=True, blank=True) parent = models.ForeignKey('self', related_name='children', on_delete=models.CASCADE, null=True, blank=True) def __str__(self): return self.name def save(self): if not self.slug: self.slug = gen_slug(self.name) super().save() class Meta: verbose_name = 'Category' verbose_name_plural = 'Categories' class Product(models.Model): uuid = models.UUIDField(primary_key=True, blank=True) title = models.CharField(max_length=255) description = models.TextField() price = models.DecimalField(max_digits=10, decimal_places=2) category = models.ManyToManyField(Category) def __str__(self): return self.title def save(self, *args, **kwargs): if not self.uuid: self.uuid = str(uuid.uuid4()) super().save(*args, **kwargs) class Meta: ordering = ('price',) class ProductImage(models.Model): image = models.ImageField(upload_to='products') product = models.ForeignKey(Product, related_name='images', on_delete=models.CASCADE) class Comment(models.Model): product = models.ForeignKey(Product, on_delete=models.CASCADE, related_name='comments') text = models.TextField(max_length=500) author = models.ForeignKey(get_user_model(), on_delete=models.CASCADE, related_name='comments') create_at = models.DateTimeField(auto_now_add=True) def __str__(self): return f'Comment by {self.author} on {self.product}, created at {self.create_at}'
[ "RowdyKG@gmail.com" ]
RowdyKG@gmail.com
f70a7a2863d3e542f444742ecfd691c846a9bed6
8986ec16c3a7fa6ac7bd2e7e73a9e31c740c67fb
/Python-Repo/Automate/BeautifulRequests.py
400c5205f2bba2f1799c797190d613124fe78045
[]
no_license
anurag00/Practice-Repo
e365cddfc61fcb50c2a7e1e9e5b5f8c15980c2f3
f7e7c849d8e6d332d918671798ecfd97dbb963d8
refs/heads/master
2022-12-10T02:39:41.424384
2022-11-23T06:28:55
2022-11-23T06:28:55
67,098,851
0
0
null
null
null
null
UTF-8
Python
false
false
445
py
import requests, bs4 def getAmazonPrice(producturl): res = requests.get(producturl) res.raise_for_status() soup = bs4.BeautifulSoup(res.text,'html.parser') elems = soup.select('#priceblock_ourprice') return elems[0].text.strip() if __name__ == "__main__": price = getAmazonPrice('https://www.amazon.in/gp/product/B073JYVKNX/ref=ox_sc_sfl_title_1?ie=UTF8&psc=1&smid=A14CZOWI0VEHLG') print('Price is Rupees ' + price)
[ "anuragsethi.inc@gmail.com" ]
anuragsethi.inc@gmail.com
c0c384a71cdcc66ece3df3757b91a264386fb28b
967d7b69c22dfe1887ce28531689087e701d7c1f
/Udacity_Nanodegree/Machine_Learning/decision_tree/dt_author_id.py
69c7d408e5c44c785dbef6be23a9f1c11c0f2e5b
[]
no_license
nicky7711/Data-Analysis
2d5cc981ffcff26d735423ee13d3b824c5ad0f2d
fcbcec9e7b38cc627adb0bf8580fb109ee15b91b
refs/heads/master
2021-01-15T16:51:40.559906
2017-11-02T21:26:48
2017-11-02T21:26:48
99,729,967
1
0
null
null
null
null
UTF-8
Python
false
false
1,020
py
#!/usr/bin/python """ This is the code to accompany the Lesson 3 (decision tree) mini-project. Use a Decision Tree to identify emails from the Enron corpus by author: Sara has label 0 Chris has label 1 """ import sys from time import time sys.path.append("../tools/") from email_preprocess import preprocess ### features_train and features_test are the features for the training ### and testing datasets, respectively ### labels_train and labels_test are the corresponding item labels features_train, features_test, labels_train, labels_test = preprocess() ######################################################### ### your code goes here ### from sklearn import tree clf = tree.DecisionTreeClassifier(min_samples_split=40) clf.fit(features_train, labels_train) pred = clf.predict(features_test) from sklearn.metrics import accuracy_score acc = accuracy_score(pred, labels_test) print acc print len(features_train[0]) #########################################################
[ "noreply@github.com" ]
noreply@github.com
0da70bd447adc54400de446fd008c174e5c6dbf1
d37eef0fa9fb9498266e06cb64799e736d89853c
/tests/wampcc/client_sends_bad_message.py
dbc0767b9e9b481217b2a735f8c210055b5ae684
[ "MIT" ]
permissive
samangh/wampcc
5d3d2ce56e6ded99c94b069e0eed743cb2fb4605
210502b57d10ab7b41d83e9e4c39b8da1460648a
refs/heads/master
2022-12-02T01:47:57.842335
2022-11-24T06:24:46
2022-11-24T07:00:55
173,782,913
0
0
MIT
2020-08-17T12:26:57
2019-03-04T16:40:05
C++
UTF-8
Python
false
false
619
py
#!/usr/bin/env python # # Copyright (c) 2017 Darren Smith # # wampcc is free software; you can redistribute it and/or modify # it under the terms of the MIT license. See LICENSE for details. # import socket import time s = socket.socket() # Create a socket object host = socket.gethostname() # Get local machine name port = 55555 # Reserve a port for your service. s.connect((host, port)) # print "connecting ... not doing anything" # while True: # time.sleep(60) # sleep 1 minute # print s.recv(1024) # s.close s.send("hello") while True: time.sleep(60) # sleep 1 s.close
[ "git@darrenjs.net" ]
git@darrenjs.net
e99cf2e9adcb19df3756f4140c9f29418dca891a
a1db93eacf80566089dfd88794b28e2069e1c9ab
/StudentApp.py
775ae412a77a85cfc6ebf6fb2c73d88b6d99d1f2
[]
no_license
karthikkalidas/StudentApp
b5f80adde9dbe24eacf8f7d53945571e9eecd98c
4d94706eec9906a4709f8e94081aa997b15a8242
refs/heads/master
2021-01-19T02:33:33.590767
2016-06-26T16:04:53
2016-06-26T16:04:53
61,212,507
0
0
null
null
null
null
UTF-8
Python
false
false
2,030
py
import json from pprint import pprint import sys import os class Student: def __init__(self, Name, PhoneNumber, Marks1, Marks2, Marks3): self.Name=Name self.PhoneNumber=PhoneNumber self.Marks1=Marks1 self.Marks2=Marks2 self.Marks3=Marks3 def __repr__(self): return (self.Name+' '+self.PhoneNumber+' '+self.Marks1+' '+self.Marks2+' '+self.Marks3) StudentList = [] def Add(): sName = input("Enter Student's Name : ") sPhoneNumber = input("Enter Student's PhoneNumber : ") sMarks1 = input("Enter Marks1 : ") sMarks2 = input("Enter Marks2 : ") sMarks3 = input("Enter Marks3 : ") new_student=Student(sName,sPhoneNumber,sMarks1,sMarks2,sMarks3) StudentList.append(new_student) with open("StudentListFile.txt", 'a') as StudentListFile: for s in StudentList: StudentListFile.write(str(s) + '\n') def Read(): with open("StudentListFile.txt", 'r') as f: StudentList = [line.rstrip('\n') for line in f] print(StudentList) def Edit(): sindex =int(input("Enter Index : "))-1 sName = input("Enter Student's Name : ") sPhoneNumber = input("Enter Student's PhoneNumber : ") sMarks1 = input("Enter Marks1 : ") sMarks2 = input("Enter Marks2 : ") sMarks3 = input("Enter Marks3 : ") # read a list of lines into data with open('StudentListFile.txt', 'r') as file: data = file.readlines() data[sindex] = sName+' '+sPhoneNumber+' '+sMarks1+' '+sMarks2+' '+sMarks3+'\n' # and write everything back with open('StudentListFile.txt', 'w') as file: file.writelines( data ) while True: os.system('clear') func=int(input("Choose Any Number : \n(1) Read\n(2) Add\n(3) Edit\n(4) Quit\n")) os.system('clear') if func == 1: Read() elif func == 2: Add() elif func == 3: Edit() elif func == 4: quit() else: print("Your option doesn't make sense to me") input("\nPress any key to continue : ")
[ "karthik.kalidas@gmail.com" ]
karthik.kalidas@gmail.com
5f28d3473174758f29072135291cc13603f342ab
94bb77d0847df86ead773650cf4aa0885ed3ad4e
/dappcrowd/restapi/submissions_endpoint.py
d2fb9fe38acbfc271ba77225c557ec7a4ae17f5a
[]
no_license
Tribler/dappcoder
3766f0b252ac38d889ad3596b5b6335669d31100
8ae43d51a284929bc081c87debc9ef003d1f9116
refs/heads/master
2020-04-01T07:53:38.236183
2019-01-17T13:07:53
2019-01-17T13:07:53
null
0
0
null
null
null
null
UTF-8
Python
false
false
3,196
py
import json from twisted.web import http from twisted.web.server import NOT_DONE_YET from dappcrowd.restapi.root_endpoint import DAppCrowdEndpoint class SubmissionsEndpoint(DAppCrowdEndpoint): def render_GET(self, request): """ Get all submissions. """ dappcrowd_overlay = self.get_dappcrowd_overlay() return json.dumps({"submissions": dappcrowd_overlay.persistence.get_submissions()}) def getChild(self, path, request): return SubmissionPKEndpoint(self.ipv8, self.ipfs_api, path) def render_PUT(self, request): """ Create a new submission for an app request. """ parameters = http.parse_qs(request.content.read(), 1) required_params = ['project_pk', 'project_id', 'submission'] for required_param in required_params: if required_param not in parameters: request.setResponseCode(http.BAD_REQUEST) return json.dumps({"error": "missing parameter %s" % required_param}) def on_block_created(blocks): request.write(json.dumps({"success": True})) request.finish() self.get_dappcrowd_overlay().create_submission(parameters['project_pk'][0].decode('hex'), parameters['project_id'][0], parameters['submission'][0]).addCallback(on_block_created) return NOT_DONE_YET class SubmissionPKEndpoint(DAppCrowdEndpoint): def __init__(self, ipv8, ipfs_api, public_key): DAppCrowdEndpoint.__init__(self, ipv8, ipfs_api) self.public_key = public_key.decode('hex') def getChild(self, path, request): return SpecificSubmissionEndpoint(self.ipv8, self.ipfs_api, self.public_key, path) class SpecificSubmissionEndpoint(DAppCrowdEndpoint): def __init__(self, ipv8, ipfs_api, public_key, submission_id): DAppCrowdEndpoint.__init__(self, ipv8, ipfs_api) self.public_key = public_key self.submission_id = submission_id self.putChild("reviews", SpecificSubmissionReviewsEndpoint(ipv8, ipfs_api, public_key, submission_id)) def render_GET(self, request): if not self.get_dappcrowd_overlay().persistence.has_submission(self.public_key, self.submission_id): request.setResponseCode(http.NOT_FOUND) return json.dumps({"error": "the submission is not found"}) return json.dumps({ "submission": self.get_dappcrowd_overlay().persistence.get_submission(self.public_key, self.submission_id) }) class SpecificSubmissionReviewsEndpoint(DAppCrowdEndpoint): def __init__(self, ipv8, ipfs_api, public_key, submission_id): DAppCrowdEndpoint.__init__(self, ipv8, ipfs_api) self.public_key = public_key self.submission_id = submission_id def render_GET(self, request): if not self.get_dappcrowd_overlay().persistence.has_submission(self.public_key, self.submission_id): request.setResponseCode(http.NOT_FOUND) return json.dumps({"error": "the submission is not found"}) return json.dumps({ "reviews": self.get_dappcrowd_overlay().persistence.get_reviews(self.public_key, self.submission_id) })
[ "mdmartijndevos@gmail.com" ]
mdmartijndevos@gmail.com
d339a26f897381ccf5df4ce4cafc14487f785f32
8f5f6be343ad93c0f3df37675ded1ab2911d9fd8
/sqlterm/lib/chardet/langcyrillicmodel.py
a8cf1c376400588d2f2f584a5e8faff5d6d77a18
[]
no_license
johnfouf/monetdb_federated_poc
a69b6053234bb19a3ac8b8fa8e16c07646fd707d
dd6557996d25c49f8751b00c4ce2c9bfc5457097
refs/heads/master
2023-02-05T12:44:13.111544
2020-12-24T11:28:38
2020-12-24T11:28:38
277,555,501
1
1
null
null
null
null
UTF-8
Python
false
false
45,260
py
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is Mozilla Communicator client code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 1998 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### import constants # KOI8-R language model # Character Mapping Table: KOI8R_CharToOrderMap = ( 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 254, 255, 255, 254, 255, 255, # 00 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, # 10 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, # 20 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 253, 253, 253, 253, 253, 253, # 30 253, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 74, 153, 75, 154, # 40 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 253, 253, 253, 253, 253, # 50 253, 71, 172, 66, 173, 65, 174, 76, 175, 64, 176, 177, 77, 72, 178, 69, # 60 67, 179, 78, 73, 180, 181, 79, 182, 183, 184, 185, 253, 253, 253, 253, 253, # 70 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, # 80 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, # 90 223, 224, 225, 68, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, # a0 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, # b0 27, 3, 21, 28, 13, 2, 39, 19, 26, 4, 23, 11, 8, 12, 5, 1, # c0 15, 16, 9, 7, 6, 14, 24, 10, 17, 18, 20, 25, 30, 29, 22, 54, # d0 59, 37, 44, 58, 41, 48, 53, 46, 55, 42, 60, 36, 49, 38, 31, 34, # e0 35, 43, 45, 32, 40, 52, 56, 33, 61, 62, 51, 57, 47, 63, 50, 70, # f0 ) win1251_CharToOrderMap = ( 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 254, 255, 255, 254, 255, 255, # 00 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, # 10 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, # 20 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 253, 253, 253, 253, 253, 253, # 30 253, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 74, 153, 75, 154, # 40 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 253, 253, 253, 253, 253, # 50 253, 71, 172, 66, 173, 65, 174, 76, 175, 64, 176, 177, 77, 72, 178, 69, # 60 67, 179, 78, 73, 180, 181, 79, 182, 183, 184, 185, 253, 253, 253, 253, 253, # 70 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 68, 247, 248, 249, 250, 251, 252, 253, 37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35, 45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43, 3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15, 9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16, ) latin5_CharToOrderMap = ( 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 254, 255, 255, 254, 255, 255, # 00 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, # 10 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, # 20 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 253, 253, 253, 253, 253, 253, # 30 253, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 74, 153, 75, 154, # 40 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 253, 253, 253, 253, 253, # 50 253, 71, 172, 66, 173, 65, 174, 76, 175, 64, 176, 177, 77, 72, 178, 69, # 60 67, 179, 78, 73, 180, 181, 79, 182, 183, 184, 185, 253, 253, 253, 253, 253, # 70 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35, 45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43, 3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15, 9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16, 239, 68, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 255, ) macCyrillic_CharToOrderMap = ( 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 254, 255, 255, 254, 255, 255, # 00 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, # 10 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, # 20 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 253, 253, 253, 253, 253, 253, # 30 253, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 74, 153, 75, 154, # 40 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 253, 253, 253, 253, 253, # 50 253, 71, 172, 66, 173, 65, 174, 76, 175, 64, 176, 177, 77, 72, 178, 69, # 60 67, 179, 78, 73, 180, 181, 79, 182, 183, 184, 185, 253, 253, 253, 253, 253, # 70 37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35, 45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 68, 16, 3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15, 9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 255, ) IBM855_CharToOrderMap = ( 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 254, 255, 255, 254, 255, 255, # 00 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, # 10 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, # 20 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 253, 253, 253, 253, 253, 253, # 30 253, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 74, 153, 75, 154, # 40 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 253, 253, 253, 253, 253, # 50 253, 71, 172, 66, 173, 65, 174, 76, 175, 64, 176, 177, 77, 72, 178, 69, # 60 67, 179, 78, 73, 180, 181, 79, 182, 183, 184, 185, 253, 253, 253, 253, 253, # 70 191, 192, 193, 194, 68, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 27, 59, 54, 70, 3, 37, 21, 44, 28, 58, 13, 41, 2, 48, 39, 53, 19, 46, 218, 219, 220, 221, 222, 223, 224, 26, 55, 4, 42, 225, 226, 227, 228, 23, 60, 229, 230, 231, 232, 233, 234, 235, 11, 36, 236, 237, 238, 239, 240, 241, 242, 243, 8, 49, 12, 38, 5, 31, 1, 34, 15, 244, 245, 246, 247, 35, 16, 248, 43, 9, 45, 7, 32, 6, 40, 14, 52, 24, 56, 10, 33, 17, 61, 249, 250, 18, 62, 20, 51, 25, 57, 30, 47, 29, 63, 22, 50, 251, 252, 255, ) IBM866_CharToOrderMap = ( 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 254, 255, 255, 254, 255, 255, # 00 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, # 10 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, # 20 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 253, 253, 253, 253, 253, 253, # 30 253, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 74, 153, 75, 154, # 40 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 253, 253, 253, 253, 253, # 50 253, 71, 172, 66, 173, 65, 174, 76, 175, 64, 176, 177, 77, 72, 178, 69, # 60 67, 179, 78, 73, 180, 181, 79, 182, 183, 184, 185, 253, 253, 253, 253, 253, # 70 37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35, 45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43, 3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16, 239, 68, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 255, ) # Model Table: # total sequences: 100% # first 512 sequences: 97.6601% # first 1024 sequences: 2.3389% # rest sequences: 0.1237% # negative sequences: 0.0009% RussianLangModel = ( 0, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 1, 1, 3, 3, 3, 3, 1, 3, 3, 3, 2, 3, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 3, 2, 2, 2, 2, 2, 0, 0, 2, 3, 3, 3, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 3, 3, 0, 0, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 3, 2, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 3, 3, 0, 0, 3, 3, 3, 3, 3, 3, 3, 3, 2, 3, 3, 1, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 2, 3, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 3, 3, 0, 0, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 1, 0, 0, 0, 0, 0, 1, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 3, 1, 3, 3, 1, 3, 3, 3, 3, 2, 2, 3, 0, 2, 2, 2, 3, 3, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3, 2, 3, 3, 3, 3, 3, 2, 2, 3, 2, 3, 3, 3, 2, 1, 2, 2, 0, 1, 2, 2, 2, 2, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 3, 0, 2, 2, 3, 3, 2, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3, 2, 3, 3, 1, 2, 3, 2, 2, 3, 2, 3, 3, 3, 3, 2, 2, 3, 0, 3, 2, 2, 3, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 3, 3, 3, 3, 3, 2, 3, 3, 3, 3, 2, 2, 2, 0, 3, 3, 3, 2, 2, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 3, 2, 3, 3, 3, 3, 3, 3, 2, 3, 2, 2, 0, 1, 3, 2, 1, 2, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 1, 1, 3, 0, 1, 1, 1, 1, 2, 1, 1, 0, 2, 2, 2, 1, 2, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3, 2, 3, 3, 2, 2, 2, 2, 1, 3, 2, 3, 2, 3, 2, 1, 2, 2, 0, 1, 1, 2, 1, 2, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 3, 2, 3, 3, 3, 2, 2, 2, 2, 0, 2, 2, 2, 2, 3, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 2, 3, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 1, 3, 2, 0, 0, 3, 3, 3, 3, 2, 3, 3, 3, 3, 2, 3, 2, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 3, 3, 3, 3, 3, 2, 2, 3, 3, 0, 2, 1, 0, 3, 2, 3, 2, 3, 0, 0, 1, 2, 0, 0, 1, 0, 1, 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 3, 0, 2, 3, 3, 3, 3, 2, 3, 3, 3, 3, 1, 2, 2, 0, 0, 2, 3, 2, 2, 2, 3, 2, 3, 2, 2, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 2, 3, 0, 2, 3, 2, 3, 0, 1, 2, 3, 3, 2, 0, 2, 3, 0, 0, 2, 3, 2, 2, 0, 1, 3, 1, 3, 2, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 1, 3, 0, 2, 3, 3, 3, 3, 3, 3, 3, 3, 2, 1, 3, 2, 0, 0, 2, 2, 3, 3, 3, 2, 3, 3, 0, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3, 2, 2, 3, 3, 2, 2, 2, 3, 3, 0, 0, 1, 1, 1, 1, 1, 2, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3, 2, 2, 3, 3, 3, 3, 3, 3, 3, 0, 3, 2, 3, 3, 2, 3, 2, 0, 2, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3, 2, 3, 3, 3, 2, 2, 2, 2, 3, 1, 3, 2, 3, 1, 1, 2, 1, 0, 2, 2, 2, 2, 1, 3, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 3, 3, 3, 3, 3, 1, 2, 2, 1, 3, 1, 0, 3, 0, 0, 3, 0, 0, 0, 1, 1, 0, 1, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 2, 2, 1, 1, 3, 3, 3, 2, 2, 1, 2, 2, 3, 1, 1, 2, 0, 0, 2, 2, 1, 3, 0, 0, 2, 1, 1, 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 2, 3, 3, 3, 3, 1, 2, 2, 2, 1, 2, 1, 3, 3, 1, 1, 2, 1, 2, 1, 2, 2, 0, 2, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 3, 3, 3, 3, 3, 2, 1, 3, 2, 2, 3, 2, 0, 3, 2, 0, 3, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 2, 3, 3, 3, 2, 2, 2, 3, 3, 1, 2, 1, 2, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 2, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 1, 1, 2, 1, 2, 3, 3, 2, 2, 1, 2, 2, 3, 0, 2, 1, 0, 0, 2, 2, 3, 2, 1, 2, 2, 2, 2, 2, 3, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 1, 1, 0, 1, 1, 2, 2, 1, 1, 3, 0, 0, 1, 3, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 1, 3, 3, 3, 2, 0, 0, 0, 2, 1, 0, 1, 0, 2, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 1, 0, 0, 2, 3, 2, 2, 2, 1, 2, 2, 2, 1, 2, 1, 0, 0, 1, 1, 1, 0, 2, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 2, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 3, 3, 3, 3, 0, 0, 0, 0, 1, 0, 0, 0, 0, 3, 0, 1, 2, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 2, 0, 0, 1, 1, 2, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 2, 2, 3, 2, 2, 2, 3, 1, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 3, 3, 3, 2, 2, 2, 2, 3, 2, 2, 1, 1, 2, 2, 2, 2, 1, 1, 3, 1, 2, 1, 2, 0, 0, 1, 1, 0, 1, 0, 2, 1, 1, 1, 1, 1, 1, 2, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 2, 0, 0, 1, 0, 3, 2, 2, 2, 2, 1, 2, 1, 2, 1, 2, 0, 0, 0, 2, 1, 2, 2, 1, 1, 2, 2, 0, 1, 1, 0, 2, 1, 1, 1, 1, 1, 0, 1, 1, 1, 2, 1, 1, 1, 2, 1, 0, 1, 2, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 3, 2, 2, 2, 1, 1, 1, 2, 3, 0, 0, 0, 0, 2, 0, 2, 2, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 2, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 2, 3, 2, 3, 2, 1, 2, 2, 2, 2, 1, 0, 0, 0, 2, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 2, 1, 1, 1, 2, 1, 0, 2, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 3, 0, 0, 1, 0, 2, 2, 2, 3, 2, 2, 2, 2, 2, 2, 2, 0, 0, 0, 2, 1, 2, 1, 1, 1, 2, 2, 0, 0, 0, 1, 2, 1, 1, 1, 1, 1, 0, 1, 2, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 2, 3, 2, 3, 3, 2, 0, 1, 1, 1, 0, 0, 1, 0, 2, 0, 1, 1, 3, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 2, 3, 3, 3, 3, 1, 2, 2, 2, 2, 0, 1, 1, 0, 2, 1, 1, 1, 2, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 3, 3, 3, 2, 0, 0, 1, 1, 2, 2, 1, 0, 0, 2, 0, 1, 1, 3, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 2, 1, 1, 1, 2, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 3, 2, 3, 2, 1, 0, 0, 2, 2, 2, 0, 1, 0, 2, 0, 1, 1, 1, 0, 1, 0, 0, 0, 3, 0, 1, 1, 0, 0, 2, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 2, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 3, 1, 2, 1, 1, 2, 2, 2, 2, 2, 2, 1, 2, 2, 1, 1, 0, 0, 0, 2, 2, 2, 0, 0, 0, 1, 2, 1, 0, 1, 0, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 2, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 3, 0, 0, 0, 0, 2, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 3, 3, 2, 2, 0, 0, 0, 2, 2, 0, 0, 0, 1, 2, 0, 1, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 2, 3, 2, 3, 2, 0, 0, 0, 0, 1, 1, 0, 0, 0, 2, 0, 2, 0, 2, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 2, 0, 1, 2, 1, 0, 1, 1, 2, 1, 1, 1, 1, 1, 2, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 3, 2, 2, 2, 1, 0, 0, 2, 2, 1, 0, 1, 2, 2, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 2, 3, 1, 2, 2, 2, 2, 2, 2, 1, 1, 0, 0, 0, 1, 0, 1, 0, 2, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 2, 0, 2, 0, 0, 1, 0, 3, 2, 1, 2, 1, 2, 2, 0, 1, 0, 0, 0, 2, 1, 0, 0, 2, 1, 1, 1, 1, 0, 2, 0, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 2, 2, 2, 2, 1, 0, 0, 1, 0, 0, 0, 0, 0, 2, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 2, 0, 0, 2, 0, 1, 0, 1, 1, 1, 2, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 2, 1, 2, 2, 2, 0, 3, 0, 1, 1, 0, 0, 0, 0, 2, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 2, 2, 3, 2, 2, 0, 0, 1, 1, 2, 0, 1, 2, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 2, 2, 1, 1, 2, 1, 2, 2, 2, 2, 2, 1, 2, 2, 0, 1, 0, 0, 0, 1, 2, 2, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 2, 2, 2, 2, 0, 1, 0, 2, 2, 0, 0, 0, 0, 2, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 2, 2, 2, 0, 0, 0, 2, 2, 2, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 2, 2, 2, 0, 0, 0, 0, 1, 0, 0, 1, 1, 2, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 2, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 2, 2, 1, 1, 2, 0, 2, 1, 1, 1, 1, 0, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 2, 1, 2, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 2, 0, 1, 2, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, ) Koi8rModel = { "charToOrderMap": KOI8R_CharToOrderMap, "precedenceMatrix": RussianLangModel, "mTypicalPositiveRatio": 0.976601, "keepEnglishLetter": constants.False, "charsetName": "KOI8-R", } Win1251CyrillicModel = { "charToOrderMap": win1251_CharToOrderMap, "precedenceMatrix": RussianLangModel, "mTypicalPositiveRatio": 0.976601, "keepEnglishLetter": constants.False, "charsetName": "windows-1251", } Latin5CyrillicModel = { "charToOrderMap": latin5_CharToOrderMap, "precedenceMatrix": RussianLangModel, "mTypicalPositiveRatio": 0.976601, "keepEnglishLetter": constants.False, "charsetName": "ISO-8859-5", } MacCyrillicModel = { "charToOrderMap": macCyrillic_CharToOrderMap, "precedenceMatrix": RussianLangModel, "mTypicalPositiveRatio": 0.976601, "keepEnglishLetter": constants.False, "charsetName": "MacCyrillic", } Ibm866Model = { "charToOrderMap": IBM866_CharToOrderMap, "precedenceMatrix": RussianLangModel, "mTypicalPositiveRatio": 0.976601, "keepEnglishLetter": constants.False, "charsetName": "IBM866", } Ibm855Model = { "charToOrderMap": IBM855_CharToOrderMap, "precedenceMatrix": RussianLangModel, "mTypicalPositiveRatio": 0.976601, "keepEnglishLetter": constants.False, "charsetName": "IBM855", }
[ "openaire@dl126.madgik.di.uoa.gr" ]
openaire@dl126.madgik.di.uoa.gr
b4b798b4b5b230c3088019cf13bf6acb5fe54680
95d4c8f4cda7ad1c7623a2df02da8cee1ad0941d
/src/classifier/german_pos_tagger.py
a66192956c55f524f2b1142c45afdf0df24c1383
[ "MIT" ]
permissive
tiefenauer/ip7-python
8f587b7b77430facff19b24441490756b01d5b17
512105ba39110ec77d2ea0961dd7c2a42d4ec26d
refs/heads/master
2021-09-09T20:37:21.647146
2018-02-20T12:07:46
2018-02-20T12:07:46
107,635,390
0
0
null
null
null
null
UTF-8
Python
false
false
3,382
py
# -*- coding: utf-8 -*- # # Natural Language Toolkit: ClassifierBasedGermanTagger # # URL: <http://www.experimentallabor.de/> # # Copyright 2011 Philipp Nolte # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Tag German text. """ import re from nltk.tag.sequential import ClassifierBasedTagger class ClassifierBasedGermanTagger(ClassifierBasedTagger): """A classifier based German part-of-speech tagger. It has an accuracy of 96.09% after being trained on 90% of the German TIGER corpus. The tagger extends the NLTK ClassifierBasedTagger and implements a slightly modified feature detector. """ def feature_detector(self, tokens, index, history): """Implementing a slightly modified feature detector. @param tokens: The tokens from the sentence to tag. @param index: The current token index to tag. @param history: The previous tagged tokens. """ word = tokens[index] if index == 0: # At the beginning of the sentence prevword = prevprevword = None prevtag = prevprevtag = None # word = word.lower() # Lowercase at the beginning of sentence elif index == 1: prevword = tokens[index - 1] # Note: no lowercase prevprevword = None prevtag = history[index - 1] prevprevtag = None else: prevword = tokens[index - 1] prevprevword = tokens[index - 2] prevtag = history[index - 1] prevprevtag = history[index - 2] if re.match('[0-9]+([\.,][0-9]*)?|[0-9]*[\.,][0-9]+$', word): # Included "," as decimal point shape = 'number' elif re.compile('\W+$', re.UNICODE).match(word): # Included unicode flag shape = 'punct' elif re.match('([A-ZÄÖÜ]+[a-zäöüß]*-?)+$', word): # Included dash for dashed words and umlauts shape = 'upcase' elif re.match('[a-zäöüß]+', word): # Included umlauts shape = 'downcase' elif re.compile("\w+", re.UNICODE).match(word): # Included unicode flag shape = 'mixedcase' else: shape = 'other' features = { 'prevtag': prevtag, 'prevprevtag': prevprevtag, 'word': word, 'word.lower': word.lower(), 'suffix3': word.lower()[-3:], # 'suffix2': word.lower()[-2:], # 'suffix1': word.lower()[-1:], 'preffix1': word[:1], # included 'prevprevword': prevprevword, 'prevword': prevword, 'prevtag+word': '%s+%s' % (prevtag, word), 'prevprevtag+word': '%s+%s' % (prevprevtag, word), 'prevword+word': '%s+%s' % (prevword, word), 'shape': shape } return features
[ "git@tiefenauer.info" ]
git@tiefenauer.info
66ca04f0fddbc514ac5faf9ad4beca5f98b42a32
e23e32d8371f1211240b19c657820661bb121e02
/Taxi.py
cc6ae4f7b85ab8769471200c32780483452111a3
[]
no_license
ThomasElijah/Week_9
b55ad5f846d8249a353377904b51d3fd0a5218f5
09693027349d6e4df384ba78098c63cc7aa4f9dd
refs/heads/master
2021-01-16T23:05:46.399803
2016-09-20T08:40:32
2016-09-20T08:40:32
68,567,882
0
0
null
null
null
null
UTF-8
Python
false
false
1,958
py
""" CP1404/CP5632 Practical Car class """ class Car: """ represent a car object """ def __init__(self, name="", fuel=0): """ initialise a Car instance """ self.name = name self.fuel = fuel self.odometer = 0 def __str__(self): return "{}, fuel={}, odo={}".format(self.name, self.fuel, self.odometer) def add_fuel(self, amount): """ add amount to the car's fuel""" self.fuel += amount def drive(self, distance): """ drive the car a given distance if it has enough fuel or drive until fuel runs out return the distance actually driven """ if distance > self.fuel: distance_driven = self.fuel self.fuel = 0 else: self.fuel -= distance distance_driven = distance self.odometer += distance_driven return distance_driven class Taxi(Car): """ specialised version of a Car that includes fare costs """ price_per_km = 1.2 def __init__(self, name, fuel): """ initialise a Taxi instance, based on parent class Car """ super().__init__(name, fuel) self.current_fare_distance = 0 def __str__(self): """ return a string representation like a car but with current fare distance""" return "{}, ${:.2f}/km, {}km on current fare".format(super().__str__(), Taxi.price_per_km, self.current_fare_distance) def get_fare(self): """ get the price for the taxi trip """ return Taxi.price_per_km * self.current_fare_distance def start_fare(self): """ begin a new fare """ self.current_fare_distance = 0 def drive(self, distance): """ drive like parent Car but calculate fare distance as well""" distance_driven = super().drive(distance) self.current_fare_distance += distance_driven return distance_driven
[ "elijah.thomas@my.jcu.edu.au" ]
elijah.thomas@my.jcu.edu.au
fe687c852349e7fe2e04398f4929f9c94248ece8
2d52b8bf7f60043a77538743962095aeb7dd80e9
/Práctica 2/ID3/ID3.py
bdc2e0377e578e7fe46b7f71cbd167f2098b867a
[]
no_license
Javi96/SGDI
aa09b06f2e53ee1e2e6771803f1c8a6e4cc74ea4
1ffd55e577e1c3d594390c40dbda805649288bf5
refs/heads/master
2020-03-29T22:49:03.546186
2019-07-03T16:15:21
2019-07-03T16:15:21
150,441,863
0
0
null
null
null
null
UTF-8
Python
false
false
13,699
py
'''JoseJavierCortesTejada y AitorCayonRuano declaramos que esta solución es fruto exclusivamente de nuestro trabajo personal. No hemos sido ayudados por ninguna otra persona ni hemos obtenido la solución de fuentes externas, y tampoco hemos compartido nuestra solución con nadie. Declaramos además que no hemos realizado de manera desho- nesta ninguna otra actividad que pueda mejorar nuestros resultados ni perjudicar los resultados de los demás.''' import json import csv import sys import operator import random import math from termcolor import colored from termcolor import * from subprocess import call class Node(): node_count = 0 def __init__(self, _type, value = '', edge = ''): self.value = value self.edge = edge self.type = _type self.nodes = [] Node.node_count += 1 self.node_id = Node.node_count self.node_info = '' def add_son(self, son): """ Añade un nuevo nodo a la lista de hijos. Parametros ---------- son : Node Hijo a añadir a la lista """ self.nodes.append(son) def get_tree(self): """ Genera una cadena de texto con la información del nodo. Recopila la información de un nodo y sus hijos para generar un fichero .dot. Para ello toma de cada nodo su identificador y le añade una etiqueta 'label' con su valor. Si el nodo en cuestión no tiene hijos añade una nueva etiqueta 'shape' para modificar la representación del mismo en el .dot. También toma las aristas que conectan los nodos entre sí. Retorno ------- str Información del nodo """ if len(self.nodes) == 0: self.node_info += str(self.node_id) + '''[label="''' + str(self.value) + '''", shape="box"];\n''' else: self.node_info += str(self.node_id) + '''[label="''' + str(self.value) + '''"];\n''' for node in self.nodes: self.node_info += str(self.node_id) + '->' + str(node.node_id) + '''[label="''' + node.edge + '''"];\n''' self.node_info += node.get_tree() return self.node_info class ID3Tree(): def __init__(self, data, attributes): self.attributes = attributes self.data = data self.nodo = self.generate_tree(attributes, data) def get_major_class(self, instances): """ Devuelve la clase que más aparece en el conjunto de instancias. Genera un diccionario para cada valor del atributo 'class' del conjunto de instancias. Hecho esto devuelve la clase que más se repite y el numero de clases distintas de la entrada. Parametros ---------- instances : list Conjunto de instancias a evaluar Retorno ------- str Clase que más se repite int Numero de clases distintas """ classes = {} for instance in instances: class_value = instance.get('class') if class_value in classes: classes[class_value] = 1 + classes[class_value] else: classes[class_value] = 1 major_class = max(classes.items(), key=operator.itemgetter(1))[0] return major_class, len(classes) def group_by_attribute(self, attributes, instances): """ Agrupa el conjunto de instancias en función del valor de cada atributo. Genera un diccionario con la forma: {attr1 : {valor1 : [instA, ..., instN]} {valor2 : [instB, ..., instM]} {attr2 : {valor2 : ...} donde atributo almacena una lista de instancias en funcion de los valores del mismo. Parametros ---------- attributes : list Lista de atributos instances : list Conjunto de instancias a clasificar Retorno ------- dict Diccionario con las instancias clasificadas """ result = {} for attribute in attributes: # para cada atributo if attribute == 'class': continue grouped_instances = {} for instance in instances: copy_instance = instance.copy() copy_instance.pop(attribute) if instance[attribute] in grouped_instances: grouped_instances[instance[attribute]] = [copy_instance] + grouped_instances[instance[attribute]] else: grouped_instances[instance[attribute]] = [copy_instance] result[attribute] = grouped_instances return result def get_partition(self, instances, attribute, value): """ Genera un nuevo conjunto de instancias en base a un valor concreto de un atributo conocido. Parametros ---------- instances : list Lista de atributos attribute : str Atributo concreto a usar value : str Valor del atributo por el que filtrar Retorno ------- list Lista de instancias filtradas """ partition = [] for instance in instances: if instance[attribute] == value: partition.append(instance) return partition def generate_tree(self, attributes, instances, edge = ''): """ Genera el arbol de clasificacion. Completa un nodo que representa el arbol de clasificacion. Para ello: - agrupa las instancias por el valor de la clase. Si no quedan atributos por los que clasificar o todas las instancias tienen la misma clase se termina de expandir la rama actual. - genera un conjunto de instancias para los valores de cada atributo y encuentra el atributo con menor entropia (mayor ganancia de informacion). - genera particiones del conjunto de instancias inicial y expande el arbol usando cada posible valor del atributo con menor entropia. Parametros ---------- attributes : list Lista de atributos instances : list Conjunto de instancias edge : str Valor del atributo usado en la anterior expansion del arbol Retorno ------- Node Nodo con el arbol de clasificación """ major_class, count_class = self.get_major_class(instances) if count_class == 1 or len(attributes) == 1: return Node('leaf', major_class, edge) else: group_by_attribute = self.group_by_attribute(attributes, instances) min_entropy = self.get_entropy(group_by_attribute) nodo = Node('inner', min_entropy, edge) for elem in group_by_attribute[min_entropy].items(): new_data = self.get_partition(instances, min_entropy, elem[0]) if len(new_data) == 0: aux_node = Node('leaf', major_class, edge) else: new_attributes = attributes.copy() new_attributes.remove(min_entropy) aux_node = self.generate_tree(new_attributes, new_data, elem[0]) nodo.add_son(aux_node) return nodo def get_entropy(self, grouped_instances): """ Determina el atributo con menor entropía por el que expandir el arbol. Parametros ---------- grouped_instances : dict Instancias agrupadas por atributo en base a cada valor Retorno ------- str Atributo por el que expandir """ total_entropy = {} for group in grouped_instances.items(): # iteramos para cada atributo group_by_class = self.group_by_class(group) for elem in group_by_class.items(): # para cada valor del atributo entropies_and_elems = [] count = sum(elem[1].values()) entropy = 0 for i in elem[1].items(): # para cada valor del atributo calculamos su entropia entropy += -i[1]/count*math.log2(i[1]/count) entropies_and_elems.append((entropy, count)) if group[0] not in total_entropy: total_entropy[group[0]] = entropies_and_elems else: total_entropy[group[0]] = entropies_and_elems + total_entropy[group[0]] result = {} for attribute in total_entropy.items(): acc = 0 for i in range(0, len(attribute[1])): acc += attribute[1][i][0]*attribute[1][i][1]/len(self.data) result[attribute[0]] = acc return min(result.items(), key=operator.itemgetter(1))[0] def group_by_class(self, grouped_instances): """ Agrupa las instancias por clase. Parametros ---------- grouped_instances : dict Instancias agrupadas por atributo en base a cada valor Retorno ------- dict Instancias agrupadas por clase """ group_by_class = {} for group in grouped_instances[1].items(): aux = {} for instance in group[1]: if instance['class'] not in aux: aux[instance['class']] = 1 else: aux[instance['class']] = 1 + aux[instance['class']] group_by_class[group[0]] = aux return group_by_class def save_tree(self, file): """ Crea una representación del arbol de clasificacion apta para la herramienta xdot. Parametros ---------- file : str Nombre del fichero sobre el que volcar el arbol """ with open(file, "w+") as file: file.write('''digraph tree {''' + self.nodo.get_tree() + '''}''') def clasifica(self, instance, node): """ Clasifica una instancia. Parametros ---------- instance : dict Instancia a clasificar node : Node Nodo para expandir el arbol Retorno ------- str Valor delvuelto por el clasificador. Puede ser: - el valor del nodo hoja (la clase predicha) - un valor arbitrario para indicar que no se ha clasificado """ if node.type == 'leaf': return node.value attribute = instance[node.value] for child in node.nodes: if child.edge == attribute: del instance[node.value] return self.clasifica(instance, child) return '-----' class ID3(object): def __init__(self, file): instances, attributes = self.read_csv(file) self.tree = ID3Tree(instances, attributes) def read_csv(self, file): """ Lee el conjunto de instancias de un fichero .csv. Parametros ---------- file : str Fichero del que leer los datos Retorno ------- list Conjunto de instancias list Lista de atributos """ attributes = [] instances = [] with open(file) as input_file: line = csv.reader(input_file, delimiter = ',') attributes = next(line) for word in line: instances.append({attributes[i] : word[i] for i in range(0, len(word))}) return instances, attributes def clasifica(self, instance): """ Clasifica una instancia. Parametros ---------- instance : dict Instancia a clasificar Retorno ------- str Valor devuelto por el clasificador """ return self.tree.clasifica(instance, self.tree.nodo) def test(self, file): """ Clasifica un conjunto de instancias desde un fichero. Parametros ---------- file : str Fichero del que leer los datos Retorno ------- dict Tasa de aciertos, fallos y el numero de instancias clasificadas """ instances, attributes = self.read_csv(file) hits = 0 for instance in instances: result = self.tree.clasifica(instance.copy(), self.tree.nodo) print('Instance: ', instance, '\t\nresult: ', result) if instance['class'] == result: hits += 1 print(colored('Hit', 'green')) else: print(colored('Fail', 'red')) return {'Hits: ': hits, 'Fails: ': len(instances)-hits, 'Total: ': len(instances)} def save_tree(self, file): """ Guarda la representacion del arbol de clasificacion en un fichero .dot. Parametros ---------- file : str Fichero en el que escribir los datos """ self.tree.save_tree(file) call(['xdot', file]) if __name__ == '__main__': id3 = ID3(sys.argv[1]) id3.save_tree('example.dot') '''print(colored('class: ' + id3.clasifica({'season':'winter','rain':'heavy','wind':'high','day':'weekday'}), 'yellow'))''' #print(colored('class: ' + id3.clasifica({'season':'winter','rain':'heavy','wind':'high','day':'saturday'}), 'yellow')) result = id3.test(sys.argv[2]) print(result)
[ "javi.tejada96@gmail.com" ]
javi.tejada96@gmail.com
208a6b8fc1e0c1f592e3b7d18c9b54892e832b7b
7cfef2dccf09644d49c8dfb8bd4197be51531ff4
/onegroup/wsgi.py
1355d18ee200da572cb95ddd2a21c78ffec71991
[]
no_license
varunagrawal/OneGroup
2235c808ec9394022e147fa1a8800b4d48ab3bae
fd9c92f2cc0c043c5c398efd99f2356a1475a64a
refs/heads/master
2022-05-06T12:52:25.933823
2022-04-12T21:44:22
2022-04-12T21:44:22
62,920,156
0
0
null
2022-04-22T23:41:50
2016-07-08T22:10:10
Python
UTF-8
Python
false
false
393
py
""" WSGI config for onegroup project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault("DJANGO_SETTINGS_MODULE", "onegroup.settings") application = get_wsgi_application()
[ "vagrawal@pindropsecurity.com" ]
vagrawal@pindropsecurity.com
1d7903952b84f753cada1f0be0c24e33b9d97d63
e3d06f7bf45f16cbfa68586a2ce6d662ce302af9
/packets_filter/storeHomeStateData.py
9d9dc73d9d66f8a038146dfda7ce214015387895
[]
no_license
y0d4a/Gsm-Packets-Analysis-Framework
f211972f3e02806483452c2fca066447d36f8e8a
060c4467154e05b2479b0df27075d5ded5e07216
refs/heads/master
2021-01-16T17:49:55.987440
2016-06-02T19:59:04
2016-06-02T19:59:04
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,773
py
from __future__ import print_function from twisted.internet import defer, reactor from txmongo.connection import ConnectionPool url = "mongodb://localhost:27017" cursor = None @defer.inlineCallbacks def getConnection(): global cursor cursor = yield ConnectionPool(url) @defer.inlineCallbacks def getHomeStates(): # this will return pid->homezone population = 2000 retVal = {} db = cursor.GsmSimulatedData col = db.RawPackets print('home state function called') for i in range(population+1): print('id->'+str(i)) pid = i skip = 0 limit = 100 count = {'nw': 0, 'ne': 0, 'e': 0, 'w': 0, 'n': 0, 's': 0, 'c': 0, 'sw': 0} flag = False while True: docs = yield col.find(spec={'id': pid}, skip=skip, limit=limit) if len(docs): for doc in docs: time = doc['time'] if time <= 1440 and time >= 1200: count[doc['tower']['zone']] += 1 val = max(count.values()) for k in count.keys(): if count[k] == val and val >= 3: flag = True retVal[pid] = k break if not flag: skip += limit print('loop again') else: break else: break print('Home state finished') db = cursor.GsmSimulatedData col = db.PeopleHomeZones for k, v in retVal.items(): yield col.insert({'id': k, 'zone': v}) print(retVal) if __name__ == '__main__': getConnection() getHomeStates().addCallback(lambda ign: reactor.stop()) reactor.run()
[ "arorashwetankkdm@gmail.com" ]
arorashwetankkdm@gmail.com
10177a53490eb98107c90432833b44de0dc5241f
36e3d735e06d0642f1e8c26bff57305a01cc627c
/nms/priClient/settings.py
70dc9c3a439ae7865ae186e64032e891229bbeb1
[]
no_license
WilsonWangTHU/ipv6_server
5c768cdaeaf22ee508c5fff162b208481a42f95d
5088f58ab25061e65127699ed328ddaab24f9aac
refs/heads/master
2021-01-18T21:18:39.653994
2016-05-27T04:22:23
2016-05-27T04:22:23
55,656,523
4
0
null
null
null
null
UTF-8
Python
false
false
2,819
py
""" Django settings for subClient project. Generated by 'django-admin startproject' using Django 1.9.5. For more information on this file, see https://docs.djangoproject.com/en/1.9/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.9/ref/settings/ """ import os # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = '2+ytb#pjeh*g!9_3m(id@&mn$c+f56$q6fp=*%lkr)wp8hpfz%' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'net_data' ] MIDDLEWARE_CLASSES = [ # 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', # 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', # 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'priClient.urls' TEMPLATE_DEBUG = True TEMPLATE_DIRS = ( os.path.join(BASE_DIR, 'templates'), ) WSGI_APPLICATION = 'priClient.wsgi.application' # Database # https://docs.djangoproject.com/en/1.9/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Password validation # https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/1.9/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.9/howto/static-files/ STATIC_URL = os.path.join(BASE_DIR, 'static/')
[ "wode406@hotmail.com" ]
wode406@hotmail.com
2d4b29a8afb8ba840c0c97a4e5296c98779b4382
1f696631898e0279951709e150da6d87045e4bc4
/mysite/blog/migrations/0003_auto_20201018_1329.py
55a90dffcfe644d3983ed6e04e389fcac44cd412
[]
no_license
henryfrstr/django_project_blog
9f50b004b2fed59304c3f5a1f05247d44a232992
0f3c391a3cd790ae504fb84a786158a1d775abda
refs/heads/main
2022-12-31T05:05:20.989719
2020-10-25T11:26:27
2020-10-25T11:26:27
305,067,579
0
0
null
null
null
null
UTF-8
Python
false
false
357
py
# Generated by Django 3.0.8 on 2020-10-18 10:29 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('blog', '0002_auto_20201018_1327'), ] operations = [ migrations.RenameField( model_name='post', old_name='titles', new_name='title', ), ]
[ "63148122+henryfrstr@users.noreply.github.com" ]
63148122+henryfrstr@users.noreply.github.com
2249aa29e8fd1a24bd328de828fb13bc0214fffb
c72d83320274e7afe2dd4ac361ebab4bef40d317
/pythonDoZero/start.py
d84f16dd42551a1a78d1055a2f334e898ffc07a5
[]
no_license
abelrufino/python
fcc3cf661428dc9bbba226a0d09d40a78cc9c29d
ef3146456e75ac2aaae238d1e7c82c0e056afdb4
refs/heads/master
2023-08-28T00:35:49.379108
2021-09-25T21:21:37
2021-09-25T21:21:37
410,381,673
1
0
null
null
null
null
UTF-8
Python
false
false
54
py
# Primeiro programa em Python print('Hello, World!!')
[ "anetosib@gmail.com" ]
anetosib@gmail.com
3018141d8e5f0cc37a97e4845ce6f68e04162871
3ac914deee02b35300103573f25f9f6d1d637f59
/Earthquake_destructiveness_five_factors.py
0cf719c44aaed919bc07faff021027d6e8b23d0b
[]
no_license
anup5889/NaturalCalamity
3737802be735eadbad4edcc633548067e6b5d83a
d5e5d6b4efeef214246be9c2f59a30e4381d2a60
refs/heads/master
2016-09-12T17:42:52.861802
2016-05-08T00:18:06
2016-05-08T00:18:06
56,535,354
0
1
null
null
null
null
UTF-8
Python
false
false
3,652
py
__author__ = 'anupdudani' __author__ = 'anupdudani' __author__ = 'anupdudani' __author__ = 'anupdudani' __author__ = 'anupdudani' __author__ = 'anupdudani' import os import matplotlib.pyplot as plt import subprocess import pandas as pd from sklearn.metrics import r2_score from sklearn import datasets, linear_model import numpy as np from sklearn.tree import DecisionTreeClassifier from sklearn.cross_validation import train_test_split from sklearn.metrics import accuracy_score import math from sklearn.naive_bayes import GaussianNB from sklearn.tree import DecisionTreeRegressor from sklearn.preprocessing import Imputer from sklearn import linear_model from encoder import encode_feature import pylab from sklearn.cross_validation import cross_val_score from sklearn.tree import DecisionTreeClassifier from sklearn import svm from sklearn.neighbors import KNeighborsClassifier os.chdir("/Users/anupdudani/Documents/NaturalCalamity") earthQuakeDestructivenessDf=pd.read_csv("destructiveNessOfEarthQuake.csv") modified_earthquake_destructiveness_df = encode_feature(earthQuakeDestructivenessDf,"destructiveness") modified_earthquake_destructiveness_df = encode_feature(modified_earthquake_destructiveness_df,"Population") modified_earthquake_destructiveness_df = encode_feature(modified_earthquake_destructiveness_df,"Secondary effects") modified_earthquake_destructiveness_df = encode_feature(modified_earthquake_destructiveness_df,"Architechture") print(modified_earthquake_destructiveness_df.info()) #print weather_df.head() #print weather_df.tail() #print modified_weather_df #print modified_weather_df.columns.values print "Classification with six Factors" modified_earthquake_destructiveness_df["Population_param"].fillna(0) modified_earthquake_destructiveness_df["Secondary effects_param"].fillna(0) modified_earthquake_destructiveness_df["Architechture_param"].fillna(0) modified_earthquake_destructiveness_df["Magnitude"].fillna(0) modified_earthquake_destructiveness_df["Depth"].fillna(0) target= modified_earthquake_destructiveness_df["destructiveness_param"] y = target #print "target", y y=y.tolist() #print "target list", y feature_list=[] print "INFO:before for loop" for a,b,c,d,e in modified_earthquake_destructiveness_df[["Secondary effects_param", "Magnitude", "Depth","Population_param", "Architechture_param" ]].itertuples(index=False): feature_list.append([a,b,c,d,e]) print "INFO: after for loop" #print feature_list[1:100] X=feature_list X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42) """ Decision Tree Classifier """ clfDT = DecisionTreeClassifier(random_state=0) clfDT.fit(X_train, y_train) predict_values=clfDT.predict(X_test) dt_score=r2_score(y_test,predict_values) print y_test print predict_values print "Accuracy of DecisionTree Classifier", dt_score """ SVM """ clfSVC = svm.SVC() clfSVC.fit(X_train, y_train) predict_values=clfSVC.predict(X_test) svm_score=r2_score(y_test,predict_values) print "Accuracy of SVM", svm_score """ svm.LinearSVC() """ clfLSVC = svm.LinearSVC() clfLSVC.fit(X_train, y_train) predict_values=clfLSVC.predict(X_test) svmlc_score=r2_score(y_test,predict_values) print "Accuracy of Linear " , svmlc_score """ naive bayes """ clfNB=GaussianNB() clfNB.fit(X_train, y_train) predict_values=clfNB.predict(X_test) nb_score=r2_score(y_test,predict_values) print "Accuracy of Naive Bayes", nb_score """ Knn classifier """ clfKNN=KNeighborsClassifier(n_neighbors=5) clfKNN.fit(X_train, y_train) predict_values=clfKNN.predict(X_test) KNN_score=r2_score(y_test,predict_values) print "Accuracy of KNN", KNN_score
[ "anup5889@gmail.com" ]
anup5889@gmail.com
c94b68b2254773744cf3e5e54a2cc322ed8e5b93
0389e0bf1e2942089fa84ce8ab79ef859f5d8215
/teacher/migrations/0002_timetable.py
ecf4c3a8525870a4c484d1b6dfd09e382fb1afe2
[]
no_license
jitin2707/SchoolManagement
89f78e35b2b1f387083115064b0a54423de09cc7
7024d84dc0dfed4864a0ff9c58d045a1453bdb06
refs/heads/master
2020-08-05T11:39:56.958757
2019-11-23T03:57:01
2019-11-23T03:57:01
212,488,353
0
0
null
null
null
null
UTF-8
Python
false
false
1,199
py
# Generated by Django 2.0.6 on 2019-11-13 13:32 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('students', '0005_remove_section_class_id'), ('teacher', '0001_initial'), ] operations = [ migrations.CreateModel( name='TimeTable', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('table_date', models.CharField(default='', max_length=255)), ('table_day', models.CharField(default='', max_length=255)), ('time_slot', models.CharField(default='', max_length=255)), ('subject', models.CharField(default='', max_length=255)), ('classid', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='students.Student_class')), ('section', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='students.Section')), ('teacher', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='teacher.TeacherDetail')), ], ), ]
[ "truepx247@gmail.com" ]
truepx247@gmail.com
ee692b4e30708d7c40cc7efe0f041f668c08dcb4
f63314b4852fb97ad740e53e450110fcd08a515b
/app.py
ce08afaac1bc0e74d4fe1216c543d83672b30fd1
[]
no_license
xsomam/askfm-scrapper
ee0e2c4439d5be3e4ebd7fceb27d01fbff3aa4da
2e997268f40cd37dfc56bc7adc496d609106b327
refs/heads/master
2023-04-09T03:45:56.074797
2020-08-07T23:48:31
2020-08-07T23:48:31
384,990,560
0
0
null
2023-03-17T21:32:19
2021-07-11T15:56:16
null
WINDOWS-1250
Python
false
false
7,611
py
# Basic imports import os import time import json import logging # Modules imports import requests from bs4 import BeautifulSoup # Imports from files from answer import SinglePost from locators import * logging.basicConfig(format='%(asctime)s %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s', datefmt='%d-%m-%Y %H:%M:%S', level=logging.INFO, filename='logs.txt') logger = logging.getLogger('app') ask_url = 'https://ask.fm' #Base Ask URL BASE_DIR = os.getcwd() # List with pending profile links to scrape. # First link is simply base URL: ask.fm/profile. # After first visit, if there is "next" button on page (next page) # Then this link is appended to this array and program continues scraping _pending_list = [] # Array for each Singlequestion_obj object. # Singlequestion_obj object contains every answer encapsulated within one question_obj # With info about question, answer etc. _question = [] # Retrieving function. # First loops through profile, and stops looping if there are no more "next" pages left # If there are none, starts writing text content to drive by iterating through questions array def retrieve(askfm_nick, dl_img, to_csv, to_json): logger.info('Running "retrieve" function.') _n = 1 # Append base Askfm profile link to pending list to be scraped first _pending_list.append(ask_url + '/' + askfm_nick) # scraping będzie trwał tak długo, jak istnieje chociaż 1 link w liście "_pending_list". # len = 0 oznacza, że nie ma więcej linków # Trial of creating folder for downloaded content with user's nickname as name try: logger.info('Trial of creation of directory for scraping: '+ BASE_DIR) os.mkdir(askfm_nick) except Exception: logger.info('FAIL of creation of directory for scraping: ' + BASE_DIR) print('Directory already exists or another error happened. Skipping...') pass # Loop runs as long as there is some page to scrape. # If there was no "next" page, loop ends while len(_pending_list) > 0: for link in _pending_list: print(f'Connecting : {link}') logger.info('Establishing connection to: ' + link) # Get content of page, parse it with BS4 try: site = requests.get(link).content soup = BeautifulSoup(site, 'html.parser') # Select each indivifual question on profile all_questions = soup.select(SiteLocators.QUESTIONS) except Exception: print(f'Connection error at: {link}. Retrial in 5 seconds...') # logger.info(f'Connection error at: {link}. Retrial in 5 seconds...') time.sleep(5) continue # From array of questions we crate actual objects which will contain # Only important data (like question, answer in text etc.) # WHich we will perform operations on later for question in all_questions: question_obj = SinglePost(question) _question.append(question_obj) logger.info(f'Adding question #{_n}.') # If given question had image, we access it and download it if question_obj.image: save_one_image(askfm_nick, question_obj) _n += 1 # Remove already scraped profile from pedning list _pending_list.remove(link) print(f'{link} removed from temp...') logger.info(f'{link} removed from temp.') # If there is next page, we again start looping next_page = soup.select_one(SiteLocators.NEXT) logger.info('Retrieving next page link') print('Retrieving next page link') if next_page: _pending_list.append(ask_url + next_page.attrs['href']) logger.info(f"Link to next site appended to temp list: {ask_url}{next_page.attrs['href']}") print(f"{ask_url}{next_page.attrs['href']} appending successful! Looping back...") else: logger.info(f'No "Next" link found. Retrieving done.') print('No "Next" link found. Retrieving done.') perform_file_operations(askfm_nick, dl_img, to_csv, to_json) # Function dispatching file operations def perform_file_operations(askfm_nick, dl_img, to_csv, to_json): for each in _question: if to_csv: save_to_csv(askfm_nick) if to_json: save_to_json(askfm_nick) def save_to_json(askfm_nick): logger.info('Running "save_to_json" function.') print('Saving to JSON') _list = [] file_name = f'{askfm_nick}.json' with open(os.path.join(BASE_DIR, askfm_nick,file_name), 'w') as file: for each in _question: json_dict = { 'question': each.question, 'answer': each.answer, 'likes': each.likes, 'time': each.date, 'link': each.link, 'img': f"{askfm_nick}-{each.link.split('/')[-1]}.{each.image_extension}" if each.image else None, 'asker_url': each.asker } _list.append(json_dict) # if each.image: # save_images(each.image_link, each.image_extension, each.link) json.dump(_list, file, indent=4, ensure_ascii=True) print(f'Saved to JSON: {file_name}') def save_to_csv(askfm_nick): logger.info('Running "save_to_csv" function.') pass def save_images(askfm_nick): logger.info('Running "save_images" function.') for each in _question: if each.image: print('Saving image....') _photo_name = f"{askfm_nick}-{each.link.split('/')[-1]}.{each.image_extension}" try: logger.info('Trial of saving image begins.') logger.info('Requesting image from: ' + each.image_link) photo_file = requests.get(each.image_link).content img_path = os.path.join(BASE_DIR, askfm_nick, _photo_name) with open(img_path, 'wb') as f: f.write(photo_file) logger.info('Saving image to: ' + img_path) print(f"Image saved: {_photo_name}") except Exception: print(f"Could not get image {_photo_name}. Skipping...") logger.info('Error with saving image: ' + _photo_name) pass def save_one_image(askfm_nick, question_obj): logger.info('Running "save_one_image" function.') print('Saving image....') _photo_name = f"{askfm_nick}-{question_obj.link.split('/')[-1]}.{question_obj.image_extension}" try: logger.info('Trial of saving image begins.') logger.info('Requesting image from: ' + question_obj.image_link) photo_file = requests.get(question_obj.image_link).content img_path = os.path.join(BASE_DIR, askfm_nick,_photo_name) with open(img_path, 'wb') as f: f.write(photo_file) logger.info('Saving image to: ' + img_path) print(f"Image saved: {_photo_name}") except Exception: print(f"Could not get image {_photo_name}. Skipping...") logger.info('Error with saving image: ' + _photo_name) pass
[ "a@b.com" ]
a@b.com
6d751b6338153469f545cb98bced25f45d54cb90
63e8b2fafdb5a9d18a4db98646aae8f37d0930d7
/tests/test_aggregators.py
b1fafd0cf4e1d31711d127c062b08b06f78b419c
[ "MIT" ]
permissive
mikecokina/pyopentsdb
7b5b1b3669a59e71675d21f66c37574fa9f864e9
e1a27802a5cb31fc0a6b8d8ced50f5403480f14b
refs/heads/master
2020-03-15T01:52:42.052483
2018-10-29T18:19:53
2018-10-29T18:19:53
131,904,099
0
0
null
null
null
null
UTF-8
Python
false
false
2,298
py
import unittest from pyopentsdb import tsdb from pyopentsdb import errors from unittest import mock from tests.testutils import get_mock_requests_get, mock_tsdb_error_get, mock_unexpected_error_get from tests.testutils import GeneralUrlTestCase class AggregatorsTestCase(unittest.TestCase): __TEST_AGGREGATORS__ = ["mult", "p90", "zimsum", "mimmax", "sum", "p50", "none", "p95", "ep99r7"] def setUp(self): self._host = 'mockhttp://localhost:5896/' self._c = tsdb.tsdb_connection(self._host) @mock.patch('requests.Session.get', side_effect=get_mock_requests_get(None)) def test_url(self, _): GeneralUrlTestCase.test_url(self, "/api/aggregators/", "aggregators") @mock.patch('requests.Session.get', side_effect=get_mock_requests_get(__TEST_AGGREGATORS__)) def test_aggregators(self, _): response = self._c.aggregators() self.assertEqual(sorted(response), sorted(AggregatorsTestCase.__TEST_AGGREGATORS__)) @mock.patch('requests.Session.get', side_effect=get_mock_requests_get( response_content={"error": {"message": "Response code differ 200"}}, status_code=403)) def test_aggregators_403(self, _): with self.assertRaises(Exception) as context: self._c.aggregators() self.assertTrue(isinstance(context.exception, errors.UncaughtError)) @mock.patch('requests.Session.get', side_effect=get_mock_requests_get( response_content={"error": {"message": "Response code differ 200"}}, status_code=400)) def test_aggregators_403(self, _): with self.assertRaises(Exception) as context: self._c.aggregators() self.assertTrue(isinstance(context.exception, errors.ArgumentError)) @mock.patch('requests.Session.get', side_effect=mock_tsdb_error_get) def test_aggregators_tsdberror(self, _): with self.assertRaises(Exception) as context: self._c.aggregators() self.assertTrue(isinstance(context.exception, errors.TsdbError)) @mock.patch('requests.Session.get', side_effect=mock_unexpected_error_get) def test_aggregators_unexpectederror(self, _): with self.assertRaises(Exception) as context: self._c.aggregators() self.assertTrue(isinstance(context.exception, errors.UncaughtError))
[ "mikecokina@gmail.com" ]
mikecokina@gmail.com
9fe346f7b4238f028ddcac3690d2ae20fb8f9ee2
ee771f2ac4dcc256ae483b3e75aada07183aa5b7
/2021/11/solve.py
f6e822aa6e32b66f8f267cad6cf03faae248548a
[]
no_license
mdiazv/adventofcode
bd12387ed42c72b848ef75ce793ed6a8ed764870
cb64304d144a8dc0f70f07c1efe31f287a6e2206
refs/heads/master
2023-01-12T06:23:06.079420
2022-12-27T17:34:50
2022-12-27T17:34:50
48,266,768
0
0
null
2020-12-28T11:36:02
2015-12-19T03:17:10
Java
UTF-8
Python
false
false
1,373
py
import sys class World: def __init__(self, w): self.w = w self.R, self.C = len(w), len(w[0]) self.gen, self.flashes = 0, 0 self.superflashes = [] def step(self): self.flashed = set() for i in range(self.R): for j in range(self.C): self.inc(i, j) for i, j in self.flashed: self.w[i][j] = 0 self.flashes += len(self.flashed) if len(self.flashed) == self.R*self.C: self.superflashes.append(self.gen) self.gen += 1 def inc(self, i, j): self.w[i][j] += 1 if self.w[i][j] > 9 and (i, j) not in self.flashed: self.flashed.add( (i, j) ) for ii, jj in self.neighbors(i, j): self.inc(ii, jj) def neighbors(self, i, j): return ((ii, jj) for ii in range(i-1, i+2) for jj in range(j-1, j+2) if (i, j) != (ii, jj) and ii >= 0 and jj >= 0 and ii < self.R and jj < self.C) def __repr__(self): return '\n'.join(''.join(map(str, row)) for row in self.w) + f'\ngen {self.gen} - {self.flashes} flashes' w = World([list(map(int, line[:-1])) for line in sys.stdin]) while not w.superflashes: if w.gen == 100: print (w) print(f'Flashes after 100 steps: {w.flashes}') w.step() print (w) print(f'First super flash at step {w.gen}')
[ "diazvillarroelm@gmail.com" ]
diazvillarroelm@gmail.com
36cf33d8e7752a13987e46b90e29cb1c203c8fab
0747fae9472fb33a012c629d43811785cbaa2456
/wdg.py
20ed53d6d59fc28697fffe962db12d947d8cbd17
[]
no_license
fernandoeqc/satelital_server
891718315d50f1ad85094625016b8b9f8fd25b13
6c1f25502485b201fc5fefd8e8af6fa88a390ff0
refs/heads/master
2023-06-06T05:45:45.665154
2021-06-28T17:06:34
2021-06-28T17:06:34
380,112,129
0
0
null
null
null
null
UTF-8
Python
false
false
1,835
py
""" import logging import sys import time from watchdog.events import LoggingEventHandler from watchdog.observers import Observer if __name__ == "__main__": logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S') path = sys.argv[1] if len(sys.argv) > 1 else '.' event_handler = LoggingEventHandler() observer = Observer() observer.schedule(event_handler, path, recursive=True) observer.start() try: while True: time.sleep(1) except KeyboardInterrupt: observer.stop() observer.join() """ import parse_xml import watchdog.events import watchdog.observers import time import sys class Handler(watchdog.events.PatternMatchingEventHandler): def __init__(self): # Set the patterns for PatternMatchingEventHandler watchdog.events.PatternMatchingEventHandler.__init__(self, patterns=['*.ok'], ignore_directories=True, case_sensitive=False) def on_created(self, event): #print("Watchdog received CREATED event - % s." % event.src_path) # Event is created, you can process it now file = event.src_path parse_xml.look_for_esn(file) def on_deleted(self, event): print("Watchdog received DELETED event - % s." % event.src_path) if __name__ == "__main__": src_path = sys.argv[1] if len(sys.argv) > 1 else './folder_target' event_handler = Handler() observer = watchdog.observers.Observer() observer.schedule(event_handler, path=src_path, recursive=True) observer.start() try: while True: time.sleep(1) except KeyboardInterrupt: observer.stop() observer.join()
[ "fernandoeqc@live.com" ]
fernandoeqc@live.com
81083ec81fafe82eb5da65f0db9a46cf5a74c630
0b7022778b31a7f5082255f637665d81cafa0cc1
/Map and Reduce/Task2/map.py
17eb896646277699e7beeb10b69f1bea5c18a58b
[]
no_license
sailikhithk/NYC_Open_Data-Map-Reduce-and-Pyspark
09913efc3d6b2646a2314fa812c3d7a8c371a1f1
93d469b8e982ca915cf62e34e3c3aa8a6f7dacca
refs/heads/master
2022-03-31T23:07:28.751500
2020-01-18T23:56:45
2020-01-18T23:56:45
209,429,016
2
1
null
null
null
null
UTF-8
Python
false
false
384
py
import sys import string import csv for line in sys.stdin: line = line.strip() row = line.split(',') key_consolidate = row[2] value_consolidate = 1 key_consolidate_value = {key_consolidate:value_consolidate} sys.stdout.write(str(key_consolidate_value).replace("{","").replace("}","").replace(":","\t").replace("(","").replace(")","").replace("'","") + "\n")
[ "noreply@github.com" ]
noreply@github.com
9ce1a053a0e37ba990a8f6d8eb0d3e918d099320
d92e5e9073dae7631f2f43aa3618cd91d8c7ee1a
/Artificial Intelligence/search/searchAgents.py
72343acf4145aec0efbfcefb05e111d38e9c9477
[]
no_license
madhavSoni/Some-Projects
6dd8e1ea9486e0974ca517a229c855e7babc0330
10b74f929faf57ee980153d7714b2fc7ae134184
refs/heads/master
2021-09-24T14:19:57.765294
2018-10-10T08:58:09
2018-10-10T08:58:09
124,698,499
0
1
null
null
null
null
UTF-8
Python
false
false
22,181
py
# searchAgents.py # --------------- # Licensing Information: You are free to use or extend these projects for # educational purposes provided that (1) you do not distribute or publish # solutions, (2) you retain this notice, and (3) you provide clear # attribution to UC Berkeley, including a link to http://ai.berkeley.edu. # # Attribution Information: The Pacman AI projects were developed at UC Berkeley. # The core projects and autograders were primarily created by John DeNero # (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu). # Student side autograding was added by Brad Miller, Nick Hay, and # Pieter Abbeel (pabbeel@cs.berkeley.edu). """ This file contains all of the agents that can be selected to control Pacman. To select an agent, use the '-p' option when running pacman.py. Arguments can be passed to your agent using '-a'. For example, to load a SearchAgent that uses depth first search (dfs), run the following command: > python pacman.py -p SearchAgent -a fn=depthFirstSearch Commands to invoke other search strategies can be found in the project description. Please only change the parts of the file you are asked to. Look for the lines that say "*** YOUR CODE HERE ***" The parts you fill in start about 3/4 of the way down. Follow the project description for details. Good luck and happy searching! """ from game import Directions from game import Agent from game import Actions import util import time import search class GoWestAgent(Agent): "An agent that goes West until it can't." def getAction(self, state): "The agent receives a GameState (defined in pacman.py)." if Directions.WEST in state.getLegalPacmanActions(): return Directions.WEST else: return Directions.STOP ####################################################### # This portion is written for you, but will only work # # after you fill in parts of search.py # ####################################################### class SearchAgent(Agent): """ This very general search agent finds a path using a supplied search algorithm for a supplied search problem, then returns actions to follow that path. As a default, this agent runs DFS on a PositionSearchProblem to find location (1,1) Options for fn include: depthFirstSearch or dfs breadthFirstSearch or bfs Note: You should NOT change any code in SearchAgent """ def __init__(self, fn='depthFirstSearch', prob='PositionSearchProblem', heuristic='nullHeuristic'): # Warning: some advanced Python magic is employed below to find the right functions and problems # Get the search function from the name and heuristic if fn not in dir(search): raise AttributeError, fn + ' is not a search function in search.py.' func = getattr(search, fn) if 'heuristic' not in func.func_code.co_varnames: print('[SearchAgent] using function ' + fn) self.searchFunction = func else: if heuristic in globals().keys(): heur = globals()[heuristic] elif heuristic in dir(search): heur = getattr(search, heuristic) else: raise AttributeError, heuristic + ' is not a function in searchAgents.py or search.py.' print('[SearchAgent] using function %s and heuristic %s' % (fn, heuristic)) # Note: this bit of Python trickery combines the search algorithm and the heuristic self.searchFunction = lambda x: func(x, heuristic=heur) # Get the search problem type from the name if prob not in globals().keys() or not prob.endswith('Problem'): raise AttributeError, prob + ' is not a search problem type in SearchAgents.py.' self.searchType = globals()[prob] print('[SearchAgent] using problem type ' + prob) def registerInitialState(self, state): """ This is the first time that the agent sees the layout of the game board. Here, we choose a path to the goal. In this phase, the agent should compute the path to the goal and store it in a local variable. All of the work is done in this method! state: a GameState object (pacman.py) """ if self.searchFunction == None: raise Exception, "No search function provided for SearchAgent" starttime = time.time() problem = self.searchType(state) # Makes a new search problem self.actions = self.searchFunction(problem) # Find a path totalCost = problem.getCostOfActions(self.actions) print('Path found with total cost of %d in %.1f seconds' % (totalCost, time.time() - starttime)) if '_expanded' in dir(problem): print('Search nodes expanded: %d' % problem._expanded) def getAction(self, state): """ Returns the next action in the path chosen earlier (in registerInitialState). Return Directions.STOP if there is no further action to take. state: a GameState object (pacman.py) """ if 'actionIndex' not in dir(self): self.actionIndex = 0 i = self.actionIndex self.actionIndex += 1 if i < len(self.actions): return self.actions[i] else: return Directions.STOP class PositionSearchProblem(search.SearchProblem): """ A search problem defines the state space, start state, goal test, successor function and cost function. This search problem can be used to find paths to a particular point on the pacman board. The state space consists of (x,y) positions in a pacman game. Note: this search problem is fully specified; you should NOT change it. """ def __init__(self, gameState, costFn = lambda x: 1, goal=(1,1), start=None, warn=True, visualize=True): """ Stores the start and goal. gameState: A GameState object (pacman.py) costFn: A function from a search state (tuple) to a non-negative number goal: A position in the gameState """ self.walls = gameState.getWalls() self.startState = gameState.getPacmanPosition() if start != None: self.startState = start self.goal = goal self.costFn = costFn self.visualize = visualize if warn and (gameState.getNumFood() != 1 or not gameState.hasFood(*goal)): print 'Warning: this does not look like a regular search maze' # For display purposes self._visited, self._visitedlist, self._expanded = {}, [], 0 # DO NOT CHANGE def getStartState(self): return self.startState def isGoalState(self, state): isGoal = state == self.goal # For display purposes only if isGoal and self.visualize: self._visitedlist.append(state) import __main__ if '_display' in dir(__main__): if 'drawExpandedCells' in dir(__main__._display): #@UndefinedVariable __main__._display.drawExpandedCells(self._visitedlist) #@UndefinedVariable return isGoal def getSuccessors(self, state): """ Returns successor states, the actions they require, and a cost of 1. As noted in search.py: For a given state, this should return a list of triples, (successor, action, stepCost), where 'successor' is a successor to the current state, 'action' is the action required to get there, and 'stepCost' is the incremental cost of expanding to that successor """ successors = [] for action in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]: x,y = state dx, dy = Actions.directionToVector(action) nextx, nexty = int(x + dx), int(y + dy) if not self.walls[nextx][nexty]: nextState = (nextx, nexty) cost = self.costFn(nextState) successors.append( ( nextState, action, cost) ) # Bookkeeping for display purposes self._expanded += 1 # DO NOT CHANGE if state not in self._visited: self._visited[state] = True self._visitedlist.append(state) return successors def getCostOfActions(self, actions): """ Returns the cost of a particular sequence of actions. If those actions include an illegal move, return 999999. """ if actions == None: return 999999 x,y= self.getStartState() cost = 0 for action in actions: # Check figure out the next state and see whether its' legal dx, dy = Actions.directionToVector(action) x, y = int(x + dx), int(y + dy) if self.walls[x][y]: return 999999 cost += self.costFn((x,y)) return cost class StayEastSearchAgent(SearchAgent): """ An agent for position search with a cost function that penalizes being in positions on the West side of the board. The cost function for stepping into a position (x,y) is 1/2^x. """ def __init__(self): self.searchFunction = search.uniformCostSearch costFn = lambda pos: .5 ** pos[0] self.searchType = lambda state: PositionSearchProblem(state, costFn, (1, 1), None, False) class StayWestSearchAgent(SearchAgent): """ An agent for position search with a cost function that penalizes being in positions on the East side of the board. The cost function for stepping into a position (x,y) is 2^x. """ def __init__(self): self.searchFunction = search.uniformCostSearch costFn = lambda pos: 2 ** pos[0] self.searchType = lambda state: PositionSearchProblem(state, costFn) def manhattanHeuristic(position, problem, info={}): "The Manhattan distance heuristic for a PositionSearchProblem" xy1 = position xy2 = problem.goal return abs(xy1[0] - xy2[0]) + abs(xy1[1] - xy2[1]) def euclideanHeuristic(position, problem, info={}): "The Euclidean distance heuristic for a PositionSearchProblem" xy1 = position xy2 = problem.goal return ( (xy1[0] - xy2[0]) ** 2 + (xy1[1] - xy2[1]) ** 2 ) ** 0.5 ##################################################### # This portion is incomplete. Time to write code! # ##################################################### class CornersProblem(search.SearchProblem): """ This search problem finds paths through all four corners of a layout. You must select a suitable state space and successor function """ def __init__(self, startingGameState): """ Stores the walls, pacman's starting position and corners. """ self.walls = startingGameState.getWalls() self.startingPosition = startingGameState.getPacmanPosition() top, right = self.walls.height-2, self.walls.width-2 self.corners = ((1,1), (1,top), (right, 1), (right, top)) for corner in self.corners: if not startingGameState.hasFood(*corner): print 'Warning: no food in corner ' + str(corner) self._expanded = 0 # DO NOT CHANGE; Number of search nodes expanded # Please add any code here which you would like to use # in initializing the problem "*** YOUR CODE HERE ***" def getStartState(self): """ Returns the start state (in your state space, not the full Pacman state space) """ "*** YOUR CODE HERE ***" return (self.startingPosition, self.corners) def isGoalState(self, state): """ Returns whether this search state is a goal state of the problem. """ "*** YOUR CODE HERE ***" return len(state[1]) == 0 def getSuccessors(self, state): """ Returns successor states, the actions they require, and a cost of 1. As noted in search.py: For a given state, this should return a list of triples, (successor, action, stepCost), where 'successor' is a successor to the current state, 'action' is the action required to get there, and 'stepCost' is the incremental cost of expanding to that successor """ successors = [] for action in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]: # Add a successor state to the successor list if the action is legal # Here's a code snippet for figuring out whether a new position hits a wall: # x,y = currentPosition # dx, dy = Actions.directionToVector(action) # nextx, nexty = int(x + dx), int(y + dy) # hitsWall = self.walls[nextx][nexty] "*** YOUR CODE HERE ***" x,y = state[0] dx, dy = Actions.directionToVector(action) nextx, nexty = int(x + dx), int(y + dy) hitsWall = self.walls[nextx][nexty] if not hitsWall: # remove corners eaten by that move corners = tuple(x for x in state[1] if x != (nextx, nexty)) successors.append((((nextx, nexty), corners), action, 1)) self._expanded += 1 # DO NOT CHANGE return successors def getCostOfActions(self, actions): """ Returns the cost of a particular sequence of actions. If those actions include an illegal move, return 999999. This is implemented for you. """ if actions == None: return 999999 x,y= self.startingPosition for action in actions: dx, dy = Actions.directionToVector(action) x, y = int(x + dx), int(y + dy) if self.walls[x][y]: return 999999 return len(actions) def cornersHeuristic(state, problem): """ A heuristic for the CornersProblem that you defined. state: The current search state (a data structure you chose in your search problem) problem: The CornersProblem instance for this layout. This function should always return a number that is a lower bound on the shortest path from the state to a goal of the problem; i.e. it should be admissible (as well as consistent). """ corners = problem.corners # These are the corner coordinates walls = problem.walls # These are the walls of the maze, as a Grid (game.py) distances = [0] for corner in state[1]: distances.append(util.manhattanDistance(state[0], corner)) return max(distances) class AStarCornersAgent(SearchAgent): "A SearchAgent for FoodSearchProblem using A* and your foodHeuristic" def __init__(self): self.searchFunction = lambda prob: search.aStarSearch(prob, cornersHeuristic) self.searchType = CornersProblem class FoodSearchProblem: """ A search problem associated with finding the a path that collects all of the food (dots) in a Pacman game. A search state in this problem is a tuple ( pacmanPosition, foodGrid ) where pacmanPosition: a tuple (x,y) of integers specifying Pacman's position foodGrid: a Grid (see game.py) of either True or False, specifying remaining food """ def __init__(self, startingGameState): self.start = (startingGameState.getPacmanPosition(), startingGameState.getFood()) self.walls = startingGameState.getWalls() self.startingGameState = startingGameState self._expanded = 0 # DO NOT CHANGE self.heuristicInfo = {} # A dictionary for the heuristic to store information def getStartState(self): return self.start def isGoalState(self, state): return state[1].count() == 0 def getSuccessors(self, state): "Returns successor states, the actions they require, and a cost of 1." successors = [] self._expanded += 1 # DO NOT CHANGE for direction in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]: x,y = state[0] dx, dy = Actions.directionToVector(direction) nextx, nexty = int(x + dx), int(y + dy) if not self.walls[nextx][nexty]: nextFood = state[1].copy() nextFood[nextx][nexty] = False successors.append( ( ((nextx, nexty), nextFood), direction, 1) ) return successors def getCostOfActions(self, actions): """Returns the cost of a particular sequence of actions. If those actions include an illegal move, return 999999""" x,y= self.getStartState()[0] cost = 0 for action in actions: # figure out the next state and see whether it's legal dx, dy = Actions.directionToVector(action) x, y = int(x + dx), int(y + dy) if self.walls[x][y]: return 999999 cost += 1 return cost class AStarFoodSearchAgent(SearchAgent): "A SearchAgent for FoodSearchProblem using A* and your foodHeuristic" def __init__(self): self.searchFunction = lambda prob: search.aStarSearch(prob, foodHeuristic) self.searchType = FoodSearchProblem def foodHeuristic(state, problem): """ Your heuristic for the FoodSearchProblem goes here. This heuristic must be consistent to ensure correctness. First, try to come up with an admissible heuristic; almost all admissible heuristics will be consistent as well. If using A* ever finds a solution that is worse uniform cost search finds, your heuristic is *not* consistent, and probably not admissible! On the other hand, inadmissible or inconsistent heuristics may find optimal solutions, so be careful. The state is a tuple ( pacmanPosition, foodGrid ) where foodGrid is a Grid (see game.py) of either True or False. You can call foodGrid.asList() to get a list of food coordinates instead. If you want access to info like walls, capsules, etc., you can query the problem. For example, problem.walls gives you a Grid of where the walls are. If you want to *store* information to be reused in other calls to the heuristic, there is a dictionary called problem.heuristicInfo that you can use. For example, if you only want to count the walls once and store that value, try: problem.heuristicInfo['wallCount'] = problem.walls.count() Subsequent calls to this heuristic can access problem.heuristicInfo['wallCount'] """ position, foodGrid = state distances = [0] for food in foodGrid.asList(): # distances.append(util.manhattanDistance(position, food)) distances.append(mazeDistance(position, food, problem.startingGameState)) distanceFurthest = max(distances) return distanceFurthest class ClosestDotSearchAgent(SearchAgent): "Search for all food using a sequence of searches" def registerInitialState(self, state): self.actions = [] currentState = state while(currentState.getFood().count() > 0): nextPathSegment = self.findPathToClosestDot(currentState) # The missing piece self.actions += nextPathSegment for action in nextPathSegment: legal = currentState.getLegalActions() if action not in legal: t = (str(action), str(currentState)) raise Exception, 'findPathToClosestDot returned an illegal move: %s!\n%s' % t currentState = currentState.generateSuccessor(0, action) self.actionIndex = 0 print 'Path found with cost %d.' % len(self.actions) def findPathToClosestDot(self, gameState): """ Returns a path (a list of actions) to the closest dot, starting from gameState. """ # Here are some useful elements of the startState startPosition = gameState.getPacmanPosition() food = gameState.getFood() walls = gameState.getWalls() problem = AnyFoodSearchProblem(gameState) "*** YOUR CODE HERE ***" return search.astar(problem) class AnyFoodSearchProblem(PositionSearchProblem): """ A search problem for finding a path to any food. This search problem is just like the PositionSearchProblem, but has a different goal test, which you need to fill in below. The state space and successor function do not need to be changed. The class definition above, AnyFoodSearchProblem(PositionSearchProblem), inherits the methods of the PositionSearchProblem. You can use this search problem to help you fill in the findPathToClosestDot method. """ def __init__(self, gameState): "Stores information from the gameState. You don't need to change this." # Store the food for later reference self.food = gameState.getFood() # Store info for the PositionSearchProblem (no need to change this) self.walls = gameState.getWalls() self.startState = gameState.getPacmanPosition() self.costFn = lambda x: 1 self._visited, self._visitedlist, self._expanded = {}, [], 0 # DO NOT CHANGE def isGoalState(self, state): """ The state is Pacman's position. Fill this in with a goal test that will complete the problem definition. """ x,y = state "*** YOUR CODE HERE ***" return self.food[x][y] def mazeDistance(point1, point2, gameState): """ Returns the maze distance between any two points, using the search functions you have already built. The gameState can be any game state -- Pacman's position in that state is ignored. Example usage: mazeDistance( (2,4), (5,6), gameState) This might be a useful helper function for your ApproximateSearchAgent. """ x1, y1 = point1 x2, y2 = point2 walls = gameState.getWalls() assert not walls[x1][y1], 'point1 is a wall: ' + str(point1) assert not walls[x2][y2], 'point2 is a wall: ' + str(point2) prob = PositionSearchProblem(gameState, start=point1, goal=point2, warn=False, visualize=False) return len(search.bfs(prob))
[ "madhav.soni@berkeley.edu" ]
madhav.soni@berkeley.edu
023d1d8dece7491ac60d165dac3295008bf0a004
6109a95a284891792c35d0d19906ab8d1697f9c7
/src/k8s-configuration/azext_k8s_configuration/vendored_sdks/v2021_05_01_preview/aio/operations/_operations.py
3c82397333fb155b013c9017f213f97b26d8e9e6
[ "MIT", "LicenseRef-scancode-generic-cla" ]
permissive
Tatsinnit/azure-cli-extensions
3e5a1752edced00d7c33660027d2c17fae074569
a1959b123d4c11149adae2728ab5791949889d54
refs/heads/master
2022-10-05T17:40:10.825889
2022-03-16T10:33:56
2022-03-16T10:33:56
250,102,909
0
0
MIT
2020-03-25T22:12:01
2020-03-25T22:12:01
null
UTF-8
Python
false
false
4,949
py
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar import warnings from azure.core.async_paging import AsyncItemPaged, AsyncList from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest from azure.mgmt.core.exceptions import ARMErrorFormat from ... import models as _models T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] class Operations: """Operations async operations. You should not instantiate this class directly. Instead, you should create a Client instance that instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. :type models: ~azure.mgmt.kubernetesconfiguration.v2021_05_01_preview.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ models = _models def __init__(self, client, config, serializer, deserializer) -> None: self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config def list( self, **kwargs: Any ) -> AsyncIterable["_models.ResourceProviderOperationList"]: """List all the available operations the KubernetesConfiguration resource provider supports. :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either ResourceProviderOperationList or the result of cls(response) :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.kubernetesconfiguration.v2021_05_01_preview.models.ResourceProviderOperationList] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.ResourceProviderOperationList"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2021-05-01-preview" accept = "application/json" def prepare_request(next_link=None): # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') if not next_link: # Construct URL url = self.list.metadata['url'] # type: ignore # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} # type: Dict[str, Any] request = self._client.get(url, query_parameters, header_parameters) return request async def extract_data(pipeline_response): deserialized = self._deserialize('ResourceProviderOperationList', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, AsyncList(list_of_elem) async def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response) map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) return pipeline_response return AsyncItemPaged( get_next, extract_data ) list.metadata = {'url': '/providers/Microsoft.KubernetesConfiguration/operations'} # type: ignore
[ "noreply@github.com" ]
noreply@github.com
3197b6b704d03772985a826493370366f41f9961
78f1aaaeffb8f48b4a012f6700125348e396fe90
/LaTeX/report_test/inscription_tournoi.py
82f5bb36a479f342ead3a36b23ba89960c9e63d3
[]
no_license
fthuin/software_engineering_project
eda625fe90f04f2a0d4652be6d77cd92a240a014
3e2bc1bb1ba4655060c3c41367bb736961eae031
refs/heads/master
2021-01-16T23:01:33.869567
2015-12-15T20:29:02
2015-12-15T20:29:02
42,591,013
1
0
null
null
null
null
UTF-8
Python
false
false
6,632
py
# -*- coding: utf-8 -*- from selenium import webdriver from selenium.webdriver.common.by import By from selenium.webdriver.common.keys import Keys from selenium.webdriver.support.ui import Select from selenium.common.exceptions import NoSuchElementException from selenium.common.exceptions import NoAlertPresentException import unittest, time, re WAIT_TIME = 1 class InscriptionTournoi(unittest.TestCase): def setUp(self): profile = webdriver.FirefoxProfile("/home/florian/.mozilla/firefox/mwad0hks.default") self.driver = webdriver.Firefox(profile) self.driver.implicitly_wait(30) self.base_url = "http://localhost:8000" self.verificationErrors = [] self.accept_next_alert = True def test_inscription_tournoi(self): driver = self.driver driver.get(self.base_url + "/") time.sleep(WAIT_TIME) driver.find_element_by_css_selector("button.btn.btn-default").click() driver.find_element_by_name("username").clear() driver.find_element_by_name("username").send_keys("Florian") time.sleep(WAIT_TIME) driver.find_element_by_name("password").clear() driver.find_element_by_name("password").send_keys("azerty") time.sleep(WAIT_TIME) driver.find_element_by_xpath("//button[@type='submit']").click() time.sleep(WAIT_TIME) driver.find_element_by_xpath("//button[@onclick=\"location.href='/tournoi/inscriptionTournoi';\"]").click() time.sleep(WAIT_TIME) driver.find_element_by_xpath("//td[2]").click() time.sleep(WAIT_TIME) driver.find_element_by_name("extra").click() time.sleep(WAIT_TIME) driver.find_element_by_id("remarque").clear() driver.find_element_by_id("remarque").send_keys("Ceci est un commentaire") time.sleep(WAIT_TIME) driver.find_element_by_id("InscriptionButton").click() time.sleep(WAIT_TIME) driver.find_element_by_xpath("(//button[@type='button'])[2]").click() time.sleep(WAIT_TIME) driver.find_element_by_link_text("Deconnexion").click() time.sleep(WAIT_TIME) driver.find_element_by_css_selector("button.btn.btn-default").click() driver.find_element_by_name("username").clear() driver.find_element_by_name("username").send_keys("Abires") time.sleep(WAIT_TIME) driver.find_element_by_name("password").clear() driver.find_element_by_name("password").send_keys("Eev4eede0h") time.sleep(WAIT_TIME) driver.find_element_by_xpath("//button[@type='submit']").click() time.sleep(WAIT_TIME) driver.find_element_by_css_selector("body").click() time.sleep(WAIT_TIME) driver.find_element_by_xpath("//td[4]").click() time.sleep(WAIT_TIME) driver.find_element_by_name("extra").click() time.sleep(WAIT_TIME) driver.find_element_by_id("remarque").clear() driver.find_element_by_id("remarque").send_keys("Commentaire ><") time.sleep(WAIT_TIME) driver.find_element_by_name("action").click() time.sleep(WAIT_TIME) driver.find_element_by_link_text("Terrains").click() time.sleep(WAIT_TIME) driver.find_element_by_xpath("//button[@onclick=\"location.href='/terrain/enregistrement';\"]").click() time.sleep(WAIT_TIME) driver.find_element_by_id("street").clear() time.sleep(WAIT_TIME) driver.find_element_by_id("street").send_keys(u"Rue Archimède") driver.find_element_by_id("number").clear() time.sleep(WAIT_TIME) driver.find_element_by_id("number").send_keys("2") driver.find_element_by_id("postalcode").clear() time.sleep(WAIT_TIME) driver.find_element_by_id("postalcode").send_keys("1348") driver.find_element_by_id("locality").clear() time.sleep(WAIT_TIME) driver.find_element_by_id("locality").send_keys("Ottignies-Louvain-la-Neuve") driver.find_element_by_id("acces").clear() time.sleep(WAIT_TIME) driver.find_element_by_id("acces").send_keys(u"Par derrière la maison") time.sleep(WAIT_TIME) Select(driver.find_element_by_name("matiere")).select_by_visible_text("Quick") time.sleep(WAIT_TIME) Select(driver.find_element_by_name("type")).select_by_visible_text("Ouvert") time.sleep(WAIT_TIME) Select(driver.find_element_by_name("etat")).select_by_visible_text("Bon") time.sleep(WAIT_TIME) driver.find_element_by_name("dispoSamedi").click() time.sleep(WAIT_TIME) driver.find_element_by_id("comment").clear() driver.find_element_by_id("comment").send_keys(u"Merci de ramasser vos déchets cette année") time.sleep(WAIT_TIME) driver.find_element_by_xpath("(//button[@type='button'])[3]").click() time.sleep(WAIT_TIME) driver.find_element_by_xpath("//td[3]").click() time.sleep(WAIT_TIME) driver.find_element_by_xpath("(//button[@type='button'])[2]").click() time.sleep(WAIT_TIME) driver.find_element_by_link_text("Deconnexion").click() time.sleep(WAIT_TIME) driver.get(self.base_url + "/") driver.find_element_by_css_selector("button.btn.btn-default").click() driver.find_element_by_name("password").clear() driver.find_element_by_name("password").send_keys("azerty") time.sleep(WAIT_TIME) driver.find_element_by_name("username").clear() driver.find_element_by_name("username").send_keys("Florian") time.sleep(WAIT_TIME*5) time.sleep(WAIT_TIME) driver.find_element_by_xpath("//button[@type='submit']").click() time.sleep(WAIT_TIME*10) driver.close() def is_element_present(self, how, what): try: self.driver.find_element(by=how, value=what) except NoSuchElementException as e: return False return True def is_alert_present(self): try: self.driver.switch_to_alert() except NoAlertPresentException as e: return False return True def close_alert_and_get_its_text(self): try: alert = self.driver.switch_to_alert() alert_text = alert.text if self.accept_next_alert: alert.accept() else: alert.dismiss() return alert_text finally: self.accept_next_alert = True def tearDown(self): self.driver.quit() self.assertEqual([], self.verificationErrors) if __name__ == "__main__": unittest.main()
[ "florian.thuin@student.uclouvain.be" ]
florian.thuin@student.uclouvain.be
752dbda69d594e120006e5dafc97ceed904f7a19
ea13d23b9d9c7fa5b11c8f0780fbe2702ba604a3
/단계별/9_math_1/10250.py
59b9906024958adf1846c1a954e7bf31d5ccddc5
[]
no_license
boxtobox/aimicpc
51c2ab1b5b384be6eb9f4a6d4a88e0db05ff6875
25649d99d89fa31a3315a7b1c46413cb77de1ca6
refs/heads/master
2022-11-21T14:13:23.725494
2020-07-18T08:05:15
2020-07-18T08:05:15
258,359,511
0
0
null
null
null
null
UTF-8
Python
false
false
263
py
from sys import stdin from math import ceil N = int(stdin.readline()) for _ in range(N): H, W, N = map(int, stdin.readline().split()) w = ceil(N/H) if N % H: h = N % H else: h = H roomname = h * 100 + w print(roomname)
[ "grandnex@gmail.com" ]
grandnex@gmail.com
eec9de39af9002e5ffe1c66285ec2b4983d3246c
ffead9c586803a9c8b53a1fe16ad33f91bdd71c8
/4/TupleUnpackingExample.py
eb47e54f5acdf02eb4f3cd5787b78bc938d09e81
[]
no_license
angsdey2/python-examples
63489f4d39ca052a2dbce98cd4254eb3b34b0101
d3282181eae04d921f66684b6489d6517949ab25
refs/heads/master
2022-11-27T03:52:48.774509
2020-08-05T18:10:46
2020-08-05T18:10:46
285,367,550
0
0
null
null
null
null
UTF-8
Python
false
false
612
py
# -*- coding: utf-8 -*- """ Created on Mon Aug 3 22:39:39 2020 @author: Angshuman """ examScore = [("AD", 500), ("KR", 600), ("SM", 7000), ("DS", 800)]; def find_highest_scsorer(examScore): highestScorerName =''; currentMax=0; for name,score in examScore: if score>currentMax: currentMax= score; highestScorerName = name; else: pass; return (highestScorerName,currentMax); item = find_highest_scsorer(examScore); print(item); name,score = find_highest_scsorer(examScore); print(f"{name} is the highest scorer with {score} marks");
[ "angshuman.dey@cognizant.com" ]
angshuman.dey@cognizant.com
5b1b6d889aa3a32894887142d13c3972165aaa18
31c9363538f4379ff1643784dda025456059dda3
/passporteye/mrz/scripts.py
62752ca98aef98315eb11cc83ea14bc5c9500896
[ "MIT" ]
permissive
solvire/PassportEye
b3f95debd15527c1190aaea1aca913d500452305
1286be85ab72d058aa69a3912961bfbb36529c78
refs/heads/master
2021-06-26T23:19:04.456317
2017-09-15T21:17:27
2017-09-15T21:17:27
103,700,512
0
0
null
2017-09-15T20:54:34
2017-09-15T20:54:33
null
UTF-8
Python
false
false
6,402
py
''' PassportEye::MRZ: Machine-readable zone extraction and parsing. Command-line scripts Author: Konstantin Tretyakov License: MIT ''' import argparse, time, glob, pkg_resources, os, multiprocessing, logging, json, shutil from collections import Counter from skimage import io import passporteye from .image import read_mrz def process_file(params): """ Processes a file and returns the parsed MRZ (or None if no candidate regions were even found). """ tic = time.time() filename, save_roi = params try: mrz = read_mrz(filename, save_roi=save_roi) except Exception: mrz = None walltime = time.time() - tic return (filename, mrz, walltime) def evaluate_mrz(): """ A script for evaluating the current MRZ recognition pipeline by applying it to a list of files in a directory and reporting how well it went. """ parser = argparse.ArgumentParser(description='Run the MRZ OCR recognition algorithm on the sample test data, reporting the quality summary.') parser.add_argument('-j', '--jobs', default=1, type=int, help='Number of parallel jobs to run') parser.add_argument('-dd', '--data-dir', default=pkg_resources.resource_filename('passporteye.mrz', 'testdata'), help='Read files from this directory instead of the package test files') parser.add_argument('-sd', '--success-dir', default=None, help='Copy files with successful (nonzero score) extraction results to this directory') parser.add_argument('-fd', '--fail-dir', default=None, help='Copy files with unsuccessful (zero score) extraction resutls to this directory') parser.add_argument('-rd', '--roi-dir', default=None, help='Extract ROIs to this directory') parser.add_argument('-l', '--limit', default=-1, type=int, help='Only process the first <limit> files in the directory.') args = parser.parse_args() files = sorted(glob.glob(os.path.join(args.data_dir, '*.*'))) if args.limit >= 0: files = files[0:args.limit] logging.basicConfig(level=logging.INFO) log = logging.getLogger("evaluate_mrz") tic = time.time() pool = multiprocessing.Pool(args.jobs) log.info("Preparing computation for %d files from %s" % (len(files), args.data_dir)) log.info("Running %d workers" % args.jobs) results = [] save_roi = args.roi_dir is not None for d in [args.success_dir, args.fail_dir, args.roi_dir]: if d is not None and not os.path.isdir(d): os.mkdir(d) def valid_score(mrz): return 0 if mrz is None else mrz.valid_score def score_change_type(filename, mrz): try: new_score = mrz.valid_score if mrz is not None else 0 old_score = int(os.path.basename(filename).split('_')[0]) schange = new_score - old_score return '=' if schange == 0 else ('>' if schange > 0 else '<') except Exception: return '?' method_stats = Counter() for result in pool.imap_unordered(process_file, [(f, save_roi) for f in files]): filename, mrz, walltime = result results.append(result) log.info("Processed %s in %0.2fs (score %d) [%s]" % (os.path.basename(filename), walltime, valid_score(mrz), score_change_type(filename, mrz))) log.debug("\t%s" % str(mrz)) vs = valid_score(mrz) if args.success_dir is not None and vs > 0: shutil.copyfile(filename, os.path.join(args.success_dir, '%d_%s' % (vs, os.path.basename(filename)))) if args.fail_dir is not None and vs == 0: shutil.copyfile(filename, os.path.join(args.fail_dir, '%d_%s' % (vs, os.path.basename(filename)))) if args.roi_dir is not None and mrz is not None and 'roi' in mrz.aux: roi_fn = '%d_roi_%s.png' % (vs, os.path.basename(filename)) io.imsave(os.path.join(args.roi_dir, roi_fn), mrz.aux['roi']) if vs > 0 and 'method' in mrz.aux: method_stats[mrz.aux['method']] += 1 num_files = len(results) score_changes = [score_change_type(fn, mrz) for fn, mrz, wt in results] scores = [valid_score(mrz) for fn, mrz, wt in results] num_perfect = scores.count(100) num_invalid = scores.count(0) total_score = sum(scores) total_computation_walltime = sum([wt for fn, mrz, wt in results]) total_walltime = time.time() - tic log.info("Completed") print("Walltime: %0.2fs" % total_walltime) print("Compute walltime: %0.2fs" % total_computation_walltime) print("Processed files: %d" % num_files) print("Perfect parses: %d" % num_perfect) print("Invalid parses: %d" % num_invalid) print("Improved parses: %d" % len(filter(lambda x: x == '>', score_changes))) print("Worsened parses: %d" % len(filter(lambda x: x == '<', score_changes))) print("Total score: %d" % total_score) print("Mean score: %0.2f" % (float(total_score)/num_files)) print("Mean compute time: %0.2fs" % (total_computation_walltime/num_files)) print("Methods used:") for stat in method_stats.most_common(): print(" %s: %d" % stat) def mrz(): """ Command-line script for extracting MRZ from a given image """ parser = argparse.ArgumentParser(description='Run the MRZ OCR recognition algorithm on the given image.') parser.add_argument('filename') parser.add_argument('--json', action='store_true', help='Produce JSON (rather than tabular) output') parser.add_argument('-r', '--save-roi', default=None, help='Output the region of the image that is detected to contain the MRZ to the given png file') parser.add_argument('--version', action='version', version='PassportEye MRZ v%s' % passporteye.__version__) args = parser.parse_args() filename, mrz, walltime = process_file((args.filename, args.save_roi is not None)) d = mrz.to_dict() if mrz is not None else {'mrz_type': None, 'valid': False, 'valid_score': 0} d['walltime'] = walltime d['filename'] = filename if args.save_roi is not None and mrz is not None and 'roi' in mrz.aux: io.imsave(args.save_roi, mrz.aux['roi']) if not args.json: for k in d: print("%s\t%s" % (k, str(d[k]))) else: print(json.dumps(d, indent=2))
[ "kt@ut.ee" ]
kt@ut.ee
9e32c9c0aa9172a40b197d8db987c89b23d6b43b
eacf8153f3b981d54734d71275576ac33f03e921
/pegasus/tests/functional/test_authentication.py
97233a50e03bd54540753a01879da76abb4513e8
[]
no_license
enriquebits/bookmanizer
f9b43c18844dbcb32a5b560cbf5f3b539c3434c2
326b7a9ba8f2dcfce5afeedadebea6ca2ff24255
refs/heads/master
2016-09-06T12:17:39.208517
2015-05-25T02:14:57
2015-05-25T02:14:57
20,838,468
0
0
null
null
null
null
UTF-8
Python
false
false
3,332
py
# -*- coding: utf-8 -*- """ Integration tests for the :mod:`repoze.who`-powered authentication sub-system. As pegasus grows and the authentication method changes, only these tests should be updated. """ from __future__ import unicode_literals from nose.tools import eq_, ok_ from pegasus.tests import TestController class TestAuthentication(TestController): """ Tests for the default authentication setup. If your application changes how the authentication layer is configured those tests should be updated accordingly """ application_under_test = 'main' def test_forced_login(self): """Anonymous users are forced to login Test that anonymous users are automatically redirected to the login form when authorization is denied. Next, upon successful login they should be redirected to the initially requested page. """ # Requesting a protected area resp = self.app.get('/secc/', status=302) ok_( resp.location.startswith('http://localhost/login')) # Getting the login form: resp = resp.follow(status=200) form = resp.form # Submitting the login form: form['login'] = 'manager' form['password'] = 'managepass' post_login = form.submit(status=302) # Being redirected to the initially requested page: ok_(post_login.location.startswith('http://localhost/post_login')) initial_page = post_login.follow(status=302) ok_('authtkt' in initial_page.request.cookies, "Session cookie wasn't defined: %s" % initial_page.request.cookies) ok_(initial_page.location.startswith('http://localhost/secc/'), initial_page.location) def test_voluntary_login(self): """Voluntary logins must work correctly""" # Going to the login form voluntarily: resp = self.app.get('/login', status=200) form = resp.form # Submitting the login form: form['login'] = 'manager' form['password'] = 'managepass' post_login = form.submit(status=302) # Being redirected to the home page: ok_(post_login.location.startswith('http://localhost/post_login')) home_page = post_login.follow(status=302) ok_('authtkt' in home_page.request.cookies, 'Session cookie was not defined: %s' % home_page.request.cookies) eq_(home_page.location, 'http://localhost/') def test_logout(self): """Logouts must work correctly""" # Logging in voluntarily the quick way: resp = self.app.get('/login_handler?login=manager&password=managepass', status=302) resp = resp.follow(status=302) ok_('authtkt' in resp.request.cookies, 'Session cookie was not defined: %s' % resp.request.cookies) # Logging out: resp = self.app.get('/logout_handler', status=302) ok_(resp.location.startswith('http://localhost/post_logout')) # Finally, redirected to the home page: home_page = resp.follow(status=302) authtkt = home_page.request.cookies.get('authtkt') ok_(not authtkt or authtkt == 'INVALID', 'Session cookie was not deleted: %s' % home_page.request.cookies) eq_(home_page.location, 'http://localhost/')
[ "enrique_empo@hotmail.com" ]
enrique_empo@hotmail.com
e6bfbadc51ffa309278e7dec7bdd95cf06c0006d
7565d21f09d43026dbee81a957ef9c8aed08ae69
/rate-limiter/utils/constants.py
0d6f03a9d684d4167c6a567f8af109874e388ab3
[]
no_license
syedmrizwan/notification-service
7cebfc8750f54d26dd2075d9e8b48a3612663378
a045647b61d005287397a39282c32dc6c5c5992e
refs/heads/master
2023-02-02T11:18:15.665657
2020-12-17T07:08:34
2020-12-17T07:08:34
318,601,552
0
0
null
2020-12-11T12:52:11
2020-12-04T18:21:53
Go
UTF-8
Python
false
false
80
py
'''Constants''' LOGGER_NAME = 'Rate Limiter' LOGGING_FILE_NAME = 'logging.conf'
[ "muhammad.rizwan@emumba.com" ]
muhammad.rizwan@emumba.com
796c821dc503856d3333df944620e9880c66973e
e9e083ebe7fad28e16d74209473eb85f578401e2
/frontpage/views.py
13d2c8a5926d8292b0a24c62af3992d9b1ac1945
[]
no_license
4x4falcon/fosm-website-2018
9dfb3ea3904c110d364b23fe7c710852fccd6c1d
f48ed7d527ebf0e77fff300979b09a9b8dcb220b
refs/heads/master
2021-05-04T15:23:22.813168
2018-06-26T06:51:05
2018-06-26T06:51:05
120,226,991
1
0
null
null
null
null
UTF-8
Python
false
false
710
py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from __future__ import print_function from django.shortcuts import render, reverse import pgmap from querymap.views import p # Create your views here. def index(request): t = p.GetTransaction("ACCESS SHARE") errStr = pgmap.PgMapError() value = int(t.GetMetaValue("readonly".encode('utf-8'), errStr)) dbStatus = "OK" if value != 0: dbStatus = "Read only" t.Commit() return render(request, 'frontpage/index.html', {'db_status': dbStatus}) def getting_started(request): return render(request, 'frontpage/getting_started.html', {}) def whats_different(request): return render(request, 'frontpage/whats_different.html', {})
[ "ross@freestreetmap.org" ]
ross@freestreetmap.org
2779073c282e2d634aee8ebd19099d2006f1b000
6887728a11c8b59b171a192f6d3801bae21d6bfe
/motor.py
d2c4992df0e53c993c51c9fde0b991a43cb001fc
[]
no_license
uprightws/Motor
2a3ee87650c6dd6b1cafa1a6a062748a89f6a804
207d25044b55c3d1c140896f610580fa2761aefa
refs/heads/master
2020-05-17T21:36:08.410101
2019-04-28T02:02:21
2019-04-28T02:02:21
183,977,242
1
0
null
2019-04-29T01:08:40
2019-04-29T01:08:39
null
UTF-8
Python
false
false
7,570
py
# -*- coding: utf-8 -*- """ ------------------------------------------------- File Name : train Author : 雨住风停松子落 E-mail : date : 2019/4/25 Description: ------------------------------------------------- Change Activity: 2019/4/25: ------------------------------------------------- """ __author__ = '雨住风停松子落' import os import pandas as pd import numpy as np from sklearn.ensemble import AdaBoostClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import BaggingClassifier from sklearn.ensemble import GradientBoostingClassifier from lightgbm import LGBMClassifier from sklearn.ensemble import VotingClassifier # from sklearn.neural_network import MLPClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import GaussianNB from xgboost import XGBClassifier from sklearn.externals import joblib from sklearn.utils import shuffle import warnings warnings.filterwarnings("ignore") def build_model(data, mode='train'): ''' 初始化模型,包含各模型使用的特征下标 :param data: 数据集 :param mode: train or test :return: ''' filter_features_1 = [data.columns[idx] for idx in [0, 1, 2, 3, 4, 5, 7, 9, 11, 12, 13, 14, 15, 16, 17, 18, 20, 21, 22, 23, 26, 27, 28, 29, 31, 33, 34, 35, 36, 37, 38, 39, 42]] filter_features_2 = [data.columns[idx] for idx in [0, 1, 2, 3, 4, 5, 6, 7, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 42, 43]] filter_features_3 = [data.columns[idx] for idx in [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43]] if mode == 'train': ada_clf = AdaBoostClassifier( RandomForestClassifier(n_estimators=60, max_depth=13, min_samples_split=5, min_samples_leaf=20, oob_score=True, random_state=0, class_weight='balanced'), algorithm="SAMME", n_estimators=1200, learning_rate=0.05 ) bag_clf = BaggingClassifier( base_estimator=RandomForestClassifier(n_estimators=60, max_depth=13, min_samples_split=5, min_samples_leaf=20, oob_score=True, random_state=0, class_weight='balanced'), n_estimators=500, max_samples=1.0, max_features=1.0, bootstrap=True, bootstrap_features=False, n_jobs=1, random_state=0 ) gbdt_clf = GradientBoostingClassifier( n_estimators=1200, learning_rate=0.05, min_samples_leaf=60, max_depth=10, min_samples_split=5, subsample=0.7, random_state=0, loss='deviance' ) lgbm_clf = LGBMClassifier( boosting_type='gbdt', num_leaves=51, max_depth=-1, learning_rate=0.05, n_estimators=600, subsample_for_bin=1000, objective='binary', class_weight='balanced', min_split_gain=0.0, min_child_weight=0.001, min_child_samples=20, subsample=1.0, subsample_freq=0, colsample_bytree=1.0, reg_alpha=3.5, reg_lambda=1.0, random_state=None, importance_type='gain', silent=True ) vote_clf = VotingClassifier( estimators=[ ( 'LogisticRegression', LogisticRegression(penalty='l2', solver='lbfgs', verbose=0) ), ( 'DecisionTreeClassifier', DecisionTreeClassifier() ), ( 'RandomForestClassifier', RandomForestClassifier( n_estimators=60, max_depth=13, min_samples_split=5, min_samples_leaf=20, oob_score=True, random_state=0) ), ('GradientBoostingClassifier', GradientBoostingClassifier(n_estimators=1200, verbose=0)), ('GaussianNB', GaussianNB()), ('KNeighborsClassifier', KNeighborsClassifier()) ], voting='soft' ) xgb_clf = XGBClassifier( learning_rate=0.01, n_estimators=5000, max_depth=6, min_child_weight=1, gamma=0., subsample=0.8, colsample_btree=0.8, objective='binary:logistic', scale_pos_weight=1, min_samples_split=5, min_samples_leaf=60, seed=27, reg_alpha=0.005, random_state=0 ) clfs = { 'AdaBoostClassifier': [ada_clf, filter_features_1], 'BaggingClassifier': [bag_clf, filter_features_1], 'GradientBoostingClassifier': [gbdt_clf, filter_features_2], 'LGBMClassifier': [lgbm_clf, filter_features_3], 'VotingClassifier': [vote_clf, filter_features_1], 'XGBClassifier': [xgb_clf, filter_features_1], } elif mode == 'test': clfs = { 'AdaBoostClassifier': filter_features_1, 'BaggingClassifier': filter_features_1, 'GradientBoostingClassifier': filter_features_2, 'LGBMClassifier': filter_features_3, 'VotingClassifier': filter_features_1, 'XGBClassifier': filter_features_1, } else: raise Exception("mode must be train or test !!") return clfs def train(train_data, savedPath, upsampling): ''' 训练 :param train_data: :param savedPath: :return: ''' clfs = build_model(train_data, mode='train') for clf_name, [clf, filter_features] in clfs.items(): trainData = train_data for i in range(upsampling[clf_name] - 1): trainData = pd.concat([trainData, train_data.tail(30)], axis=0) if clf_name != 'VotingClassifier': print 'shuffle' trainData = shuffle(trainData) # trainData.dropna(inplace=True) clf.fit(trainData[filter_features], trainData[train_data.columns[45]]) print(clf_name, '\t', clf.score(train_data[filter_features], train_data[train_data.columns[45]])) joblib.dump(clf, savedPath + clf_name + '.pkl') def test(test_data, modelPath, savedPath, threshold): ''' 测试 :param test_data: :param modelPath: :param savedPath: :param threshold: 不同方法传入不同阈值 :return: ''' clfs = build_model(test_data, mode='test') result = [] idx = 0 for clf_name, filter_features in clfs.items(): clf = joblib.load(modelPath + clf_name + '.pkl') p_proba = clf.predict_proba(test_data[filter_features]) p_list_midl = [p[1] for p in p_proba] p_list_midl.sort(reverse=True) p_list = [1 if p[1] > p_list_midl[int(len(p_list_midl) * threshold[clf_name])] else 0 for p in p_proba] if idx == 0: result = p_list else: result = [1 if (result[i] == 1 and p_list[i] == 1) else 0 for i in range(5738)] idx = idx + 1 res_df = pd.DataFrame(data=np.column_stack( [np.reshape(test_data[test_data.columns[44]], int(test_data[test_data.columns[0]].count())), result]), columns=['idx', 'result']) res_df.to_csv(savedPath, index=False) print("done!")
[ "uprightws@live.cn" ]
uprightws@live.cn
5f5aa4594437d785225bffd8a12312b3ccdfb9b4
99441588c7d6159064d9ce2b94d3743a37f85d33
/cmake-build-debug/read_CAN/catkin_generated/pkg.develspace.context.pc.py
2581bf96c93bf3ea493f4c25cd72ba5837837945
[]
no_license
YZT1997/robolab_project
2786f8983c4b02040da316cdd2c8f9bb73e2dd4c
a7edb588d3145356566e9dcc37b03f7429bcb7d6
refs/heads/master
2023-09-02T21:28:01.280464
2021-10-14T02:06:35
2021-10-14T02:06:35
369,128,037
0
0
null
null
null
null
UTF-8
Python
false
false
370
py
# generated from catkin/cmake/template/pkg.context.pc.in CATKIN_PACKAGE_PREFIX = "" PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else [] PROJECT_CATKIN_DEPENDS = "".replace(';', ' ') PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else [] PROJECT_NAME = "read_CAN" PROJECT_SPACE_DIR = "/home/yangzt/catkin_ws/devel" PROJECT_VERSION = "0.0.0"
[ "yangzt_0943@163.com" ]
yangzt_0943@163.com
d09084667dcf104c853d56b0ea17fabff94f75a9
32747097a4eae95a85c0e153a797c2689eb76052
/recipes/android/src/android/broadcast.py
ba3dfc97655e41f177a86cd2bda7c1ea5bd3e382
[ "LicenseRef-scancode-unknown-license-reference", "MIT" ]
permissive
lbryio/lbry-android-sdk
fa973940c38c8eb7d81a0b4698fa4de353eaf58c
d9d81957647cb81f7227205cbb4ecba515556e74
refs/heads/master
2022-12-14T10:36:05.641392
2022-11-23T20:03:41
2022-11-23T20:03:41
241,861,403
3
4
MIT
2022-12-11T20:47:57
2020-02-20T10:57:08
C
UTF-8
Python
false
false
3,140
py
# ------------------------------------------------------------------- # Broadcast receiver bridge from jnius import autoclass, PythonJavaClass, java_method from android.config import JAVA_NAMESPACE, JNI_NAMESPACE class BroadcastReceiver(object): class Callback(PythonJavaClass): __javainterfaces__ = [JNI_NAMESPACE + '/GenericBroadcastReceiverCallback'] __javacontext__ = 'app' def __init__(self, callback, *args, **kwargs): self.callback = callback PythonJavaClass.__init__(self, *args, **kwargs) @java_method('(Landroid/content/Context;Landroid/content/Intent;)V') def onReceive(self, context, intent): self.callback(context, intent) def __init__(self, callback, actions=None, categories=None): super(BroadcastReceiver, self).__init__() self.callback = callback if not actions and not categories: raise Exception('You need to define at least actions or categories') def _expand_partial_name(partial_name): if '.' in partial_name: return partial_name # Its actually a full dotted name else: name = 'ACTION_{}'.format(partial_name.upper()) if not hasattr(Intent, name): raise Exception('The intent {} doesnt exist'.format(name)) return getattr(Intent, name) # resolve actions/categories first Intent = autoclass('android.content.Intent') resolved_actions = [_expand_partial_name(x) for x in actions or []] resolved_categories = [_expand_partial_name(x) for x in categories or []] # resolve android API GenericBroadcastReceiver = autoclass(JAVA_NAMESPACE + '.GenericBroadcastReceiver') IntentFilter = autoclass('android.content.IntentFilter') HandlerThread = autoclass('android.os.HandlerThread') # create a thread for handling events from the receiver self.handlerthread = HandlerThread('handlerthread') # create a listener self.listener = BroadcastReceiver.Callback(self.callback) self.receiver = GenericBroadcastReceiver(self.listener) self.receiver_filter = IntentFilter() for x in resolved_actions: self.receiver_filter.addAction(x) for x in resolved_categories: self.receiver_filter.addCategory(x) def start(self): Handler = autoclass('android.os.Handler') self.handlerthread.start() self.handler = Handler(self.handlerthread.getLooper()) self.context.registerReceiver(self.receiver, self.receiver_filter, None, self.handler) def stop(self): self.context.unregisterReceiver(self.receiver) self.handlerthread.quit() @property def context(self): from os import environ if 'PYTHON_SERVICE_ARGUMENT' in environ: PythonService = autoclass(JAVA_NAMESPACE + '.PythonService') return PythonService.mService PythonActivity = autoclass(JAVA_NAMESPACE + '.PythonActivity') return PythonActivity.mActivity
[ "akinwale@gmail.com" ]
akinwale@gmail.com
c3a87e73a1c9e46f95c2c5bbf037974632f09470
747f759311d404af31c0f80029e88098193f6269
/addons/esale_joomla/__terp__.py
0b9ba5da767aab77ff4bccd6f9b18701e5bdec38
[]
no_license
sgeerish/sirr_production
9b0d0f7804a928c0c582ddb4ccb7fcc084469a18
1081f3a5ff8864a31b2dcd89406fac076a908e78
refs/heads/master
2020-05-19T07:21:37.047958
2013-09-15T13:03:36
2013-09-15T13:03:36
9,648,444
0
1
null
null
null
null
UTF-8
Python
false
false
62
py
/home/openerp/production/extra-addons/esale_joomla/__terp__.py
[ "geerish@omerp.net" ]
geerish@omerp.net
2f4374a7fc6382c63983c08d36985b27c3c49257
07455048558230658249786db958701010afa1aa
/src/config.py
f12924262c1d471a7ddd2c2136c341fc5b4489ec
[]
no_license
lohithmunakala/Group-Classification
73965201e4fce812fefcb8818f185ee948759b08
166a9ec91595a932d96e0a1da508bc9be574e06a
refs/heads/master
2023-01-24T18:38:49.777583
2020-11-19T12:11:05
2020-11-19T12:11:05
289,323,026
0
1
null
2020-10-30T08:23:40
2020-08-21T17:06:29
Jupyter Notebook
UTF-8
Python
false
false
1,044
py
import os #the path to where our data will be saved DATASET_PATH = "input" DATASET_PATH_COLAB = "/content/drive/My Drive/Group_Classification/input" #inititializing the class labels CLASSES = ["group of babies", "group of adults", "group of teenagers"] #defining size of train, test, validation sets TRAIN_SPLIT = 0.75 TEST_SPLIT = 0.25 VAL_SPLIT = 0.1 #defining the min lr, max lr, batch size, step size, CLR method and the no of epochs MIN_LR = 1e-6 MAX_LR = 1e-4 BATHC_SIZE = 32 STEP_SIZE = 8 CLR_METHOD = "triangular" NUM_EPOCHS = 48 #initializing the output model path MODEL_PATH = os.path.sep.join(["output", "group classification model"]) MODEL_PATH_COLAB = "/content/drive/My Drive/Group_Classification/model" #define the path to the output paths for images LRFIND_PLOT_PATH = "/content/drive/My Drive/Group_Classification/output/LRFIND_PLOT.png" TRAINING_PLOT_PATH = "/content/drive/My Drive/Group_Classification/output/TRAINING_PLOT.png" CLR_PLOT_PATH = "/content/drive/My Drive/Group_Classification/output/CLR_PLOT.png"
[ "lohithmunakala@gmail.com" ]
lohithmunakala@gmail.com
b68987bce2f40abf5a5b3be0d046f49f02354bc8
650f9e246de38d0ceaee8726f27801f3337e24ce
/string-trainer/simple/csimple.py
7d3e49c4a22d4dd82a8951120a8f4e6961a58054
[ "MIT" ]
permissive
paulscottrobson/old-trainer-archive
57b6fbb5886e5fe526c37f40b7fb9c179176bce7
b3659d3f36b7443594202e0ae9439e80e493a22c
refs/heads/master
2021-01-20T02:46:44.751050
2017-04-26T07:26:34
2017-04-26T07:26:34
89,451,106
0
0
null
null
null
null
UTF-8
Python
false
false
5,861
py
# ***************************************************************************************** # # Simple compiler for TAB1 format # # ***************************************************************************************** import re,sys # ***************************************************************************************** # Compiler / Processor Exception # ***************************************************************************************** class CompilerException(Exception): def __init__(self,message): self.message = message Exception.__init__(self) # ***************************************************************************************** # Strum class # ***************************************************************************************** class Strum: def __init__(self,strumDef,qbTime,voices,label = ""): self.strum = strumDef self.qbTime = qbTime self.label = label self.preRender = self.convertToRender(strumDef,voices) def getStrum(self): return self.strum def getQuarterBeatTime(self): return self.qbTime def getLabel(self): return self.label def toString(self): s = self.strum+"@"+str(self.time) if self.label != "": s = s + "("+self.label+")" return s def convertToRender(self,strum,voices): strum = strum.upper().strip() r = [] while strum != "": if strum[0] == 'X': r.append(-1) strum = strum[1:] elif strum[0] in Strum.FRETS: diatonic = Strum.FRETS.index(strum[0]) r.append(Strum.TOCHROMATIC[diatonic % 7]+int(diatonic / 7) * 12) strum = strum[1:] if (strum+" ")[0] == '+': r[-1] += 1 strum = strum[1:] else: raise CompilerException("Bad strum "+strum) # first strum given is the treble so make it the last. r.reverse() # right pad while len(r) < voices: r.insert(0,-1) return "".join([chr(x+97) if x >= 0 else "-" for x in r]) def render(self): return self.preRender Strum.TOCHROMATIC = [ 0, 2, 4, 5, 7, 9, 10 # D E F# G A B C ] Strum.FRETS = "0123456789TLWHF" Strum.QBOFFSETS = { "O":8, "o":8, "-":-2, "=":-3, ".":2 } # ***************************************************************************************** # Bar class # ***************************************************************************************** class Bar: def __init__(self,barNumber,beats,voices): self.barNumber = barNumber self.beats = beats self.strums = [] self.voices = voices self.qbPosition = 0 def add(self,strumDef,label = ""): self.strums.append(Strum(strumDef,self.qbPosition,self.voices,label)) self.qbPosition += 4 return self def toString(self): s = "#{0} B:{1} V:{2} C:{3} {{".format(self.barNumber,self.beats,self.voices,len(self.strums)) s = s + " ".join([x.toString() for x in self.strums]) + "}" return s def isOffset(self,c): return c in Strum.QBOFFSETS def offset(self,c): if not self.isOffset(c): raise CompilerException("Unknown offset "+c) self.qbPosition += Strum.QBOFFSETS[c] def render(self): r = "" qbPosition = 0 for strum in self.strums: qbElapsed = strum.getQuarterBeatTime() - qbPosition while qbElapsed > 0: amt = min(8,qbElapsed) r = r + str(amt) qbElapsed = qbElapsed - amt r = r + strum.render() qbPosition = strum.getQuarterBeatTime() return r # ***************************************************************************************** # Song Class # ***************************************************************************************** class Song: def __init__(self,sourceFile): self.reset() self.loadTab1(sourceFile) self.compileBody() if self.get("title") == "": raise CompilerException("No title provided") def reset(self): self.bars = [] self.keys = { "title":"","author":"","beats":"4","tempo":"100", \ "version":"1", "tuning":"d3,a4,d4", "type":"dulcimer" } def get(self,key): return self.keys[key.strip().lower()] def loadTab1(self,sourceFile): # pre process file - tabs, spaces, comments source = open(sourceFile).readlines() source = [x if x.find("//") < 0 else x[:x.find("//")] for x in source] source = [x.replace("\t"," ").strip() for x in source] # key updates. for assign in [x for x in source if x.find(":=") >= 0]: assign = [x.strip() for x in assign.split(":=")] if assign[0] == '"' and assign[-1] == '"': assign = assign[1:-1] self.keys[assign[0].lower()] = assign[1] source = [x for x in source if x.find(":=") < 0] self.source = source def compileBody(self): for line in range(0,len(self.source)): if self.source[line] != "": for barPart in [x.strip() for x in self.source[line].split("|") if x.strip() != ""]: newBar = Bar(len(self.bars),int(self.get("beats")),3) self.bars.append(newBar) try: self.compileTab1(newBar,barPart.upper()) except CompilerException as cEx: newMsg = cEx.message+" @ "+str(line+1) raise Exception(newMsg) def compileTab1(self,bar,src): while src != "": m = re.match("^([X"+Strum.FRETS+"\\+]+)\\s*(.*)$",src) if m is not None: strum = m.group(1) bar.add(strum) src = m.group(2) elif src[0] in Strum.QBOFFSETS: bar.offset(src[0]) src = src[1:].strip() else: raise CompilerException("Unknown command "+src) def exportToJSON(self,handle): handle.write("{ \n") keys = [x for x in self.keys.keys()] keys.sort() for k in keys: handle.write(' {0:14}:"{1}",\n'.format('"'+k+'"',self.keys[k])) handle.write(' "bars": [\n') for n in range(0,len(self.bars)): r = self.bars[n].render() handle.write('{0:14}"{1}"{2}\n'.format("",r,"," if n < len(self.bars)-1 else "")) handle.write("\n ]\n") handle.write("} \n") s = Song("twinkle.tab1") s.exportToJSON(sys.stdout) s.exportToJSON(open("../app/music.json","w"))
[ "paul@robsons.org.uk" ]
paul@robsons.org.uk
8fc817688db6b26a0b9a26e87bf9538c291e886e
0d1eb0e64ac89839298caa8f8f496c66abeb2367
/browser.py
65405345b2f45f9ec3d16ea26db8fce13f46f123
[]
no_license
sho86741/zaim-viz
637285f7d4dc31647df6f08b4b842bc36c8f19c2
1be94f41438fdba9558e7a0250be10721faf618f
refs/heads/master
2022-07-05T01:35:57.931164
2020-05-23T14:55:36
2020-05-23T14:55:36
265,807,394
0
0
null
null
null
null
UTF-8
Python
false
false
970
py
# coding: utf-8 from selenium import webdriver from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import expected_conditions as EC from selenium.webdriver.common.by import By class Browser(object): def __init__(self, webdriver): self.driver = webdriver def __getattr__(self, name): if hasattr(self.driver, name): return getattr(self.driver, name) raise AttributeError def sync_send_keys(self, locator, key): # wait for element WebDriverWait(self, 120).until( EC.element_to_be_clickable(locator) ) # send keys self.find_element(locator[0], locator[1]).send_keys(key) # wait for update WebDriverWait(self, 30).until( EC.text_to_be_present_in_element_value(locator, key) ) def wait_element(self, id): return WebDriverWait(self, 10).until(EC.presence_of_element_located((By.ID, id)))
[ "sho86741@protonmail.com" ]
sho86741@protonmail.com
261e88ade9dce201fa6cf55c90a848ba3f2b34be
2a2e1a42a8d26040258b2d9a2b8f7d3b18a62024
/api/urls.py
819e588744d93a7c73ef7ece0e53597fdb27e164
[]
no_license
rashi-agrawal29/music-controller
1567a881fcd67ea3d688b9fae0b43a01d6df813d
1d2b947edb7aca78e1017f7265ca50ffe8eaed66
refs/heads/main
2023-06-24T17:49:25.524970
2021-07-27T11:08:08
2021-07-27T11:08:08
389,942,056
0
0
null
null
null
null
UTF-8
Python
false
false
458
py
from django.urls import path from .views import GetRoom, LeaveRoom, RoomView, CreateRoomView, JoinRoom, UpdateRoom, UserInRoom urlpatterns = [ path('room', RoomView.as_view()), path('create-room', CreateRoomView.as_view()), path('get-room', GetRoom.as_view()), path('join-room', JoinRoom.as_view()), path('user-in-room', UserInRoom.as_view()), path('leave-room', LeaveRoom.as_view()), path('update-room', UpdateRoom.as_view()) ]
[ "rashiagrawal299@gmail.com" ]
rashiagrawal299@gmail.com
9620af649f65a0c0002935d9e24ea87dd7578b35
b0cdab54c5e81681125c01801148c287605ee8d0
/speciality/migrations/0005_auto_20181228_2150.py
ad4d5210ff776dbc55eeccf74f5266e8a064ed44
[]
no_license
lpd76/rdavid2
5528746749acc51d4d0f5efd77886929798e2569
18aa5120fe4ba0ea44f611dd52b008db52641f17
refs/heads/master
2020-04-13T20:47:58.141579
2019-01-17T16:51:31
2019-01-17T16:51:31
null
0
0
null
null
null
null
UTF-8
Python
false
false
637
py
# Generated by Django 2.1.4 on 2018-12-28 21:50 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('speciality', '0004_auto_20181228_2140'), ] operations = [ migrations.AlterModelOptions( name='speciality', options={'verbose_name_plural': 'specialities'}, ), migrations.AlterField( model_name='specialitydetails', name='speciality', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='speciality.Speciality'), ), ]
[ "louisphilippe.david@gmail.com" ]
louisphilippe.david@gmail.com
a03f688cd3bb6ceef3f26b749170bc2c0ac710d7
82770c7bc5e2f27a48b8c370b0bab2ee41f24d86
/microblog/flask/venv/lib/python2.7/site-packages/billiard/forking.py
57fc9795e47d83e6c656232b9ddde88438c6ec12
[ "Apache-2.0" ]
permissive
johankaito/fufuka
77ddb841f27f6ce8036d7b38cb51dc62e85b2679
32a96ecf98ce305c2206c38443e58fdec88c788d
refs/heads/master
2022-07-20T00:51:55.922063
2015-08-21T20:56:48
2015-08-21T20:56:48
39,845,849
2
0
Apache-2.0
2022-06-29T23:30:11
2015-07-28T16:39:54
Python
UTF-8
Python
false
false
17,515
py
# # Module for starting a process object using os.fork() or CreateProcess() # # multiprocessing/forking.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # from __future__ import absolute_import import os import sys import signal import warnings from pickle import load, HIGHEST_PROTOCOL from billiard import util from billiard import process from billiard.five import int_types from .reduction import dump from .compat import _winapi as win32 __all__ = ['Popen', 'assert_spawning', 'exit', 'duplicate', 'close'] try: WindowsError = WindowsError # noqa except NameError: class WindowsError(Exception): # noqa pass W_OLD_DJANGO_LAYOUT = """\ Will add directory %r to path! This is necessary to accommodate \ pre-Django 1.4 layouts using setup_environ. You can skip this warning by adding a DJANGO_SETTINGS_MODULE=settings \ environment variable. """ # # Choose whether to do a fork or spawn (fork+exec) on Unix. # This affects how some shared resources should be created. # _forking_is_enabled = sys.platform != 'win32' # # Check that the current thread is spawning a child process # def assert_spawning(self): if not Popen.thread_is_spawning(): raise RuntimeError( '%s objects should only be shared between processes' ' through inheritance' % type(self).__name__ ) # # Unix # if sys.platform != 'win32': try: import thread except ImportError: import _thread as thread # noqa import select WINEXE = False WINSERVICE = False exit = os._exit duplicate = os.dup close = os.close _select = util._eintr_retry(select.select) # # We define a Popen class similar to the one from subprocess, but # whose constructor takes a process object as its argument. # class Popen(object): _tls = thread._local() def __init__(self, process_obj): # register reducers from billiard import connection # noqa _Django_old_layout_hack__save() sys.stdout.flush() sys.stderr.flush() self.returncode = None r, w = os.pipe() self.sentinel = r if _forking_is_enabled: self.pid = os.fork() if self.pid == 0: os.close(r) if 'random' in sys.modules: import random random.seed() code = process_obj._bootstrap() os._exit(code) else: from_parent_fd, to_child_fd = os.pipe() cmd = get_command_line() + [str(from_parent_fd)] self.pid = os.fork() if self.pid == 0: os.close(r) os.close(to_child_fd) os.execv(sys.executable, cmd) # send information to child prep_data = get_preparation_data(process_obj._name) os.close(from_parent_fd) to_child = os.fdopen(to_child_fd, 'wb') Popen._tls.process_handle = self.pid try: dump(prep_data, to_child, HIGHEST_PROTOCOL) dump(process_obj, to_child, HIGHEST_PROTOCOL) finally: del(Popen._tls.process_handle) to_child.close() # `w` will be closed when the child exits, at which point `r` # will become ready for reading (using e.g. select()). os.close(w) util.Finalize(self, os.close, (r,)) def poll(self, flag=os.WNOHANG): if self.returncode is None: try: pid, sts = os.waitpid(self.pid, flag) except os.error: # Child process not yet created. See #1731717 # e.errno == errno.ECHILD == 10 return None if pid == self.pid: if os.WIFSIGNALED(sts): self.returncode = -os.WTERMSIG(sts) else: assert os.WIFEXITED(sts) self.returncode = os.WEXITSTATUS(sts) return self.returncode def wait(self, timeout=None): if self.returncode is None: if timeout is not None: r = _select([self.sentinel], [], [], timeout)[0] if not r: return None # This shouldn't block if select() returned successfully. return self.poll(os.WNOHANG if timeout == 0.0 else 0) return self.returncode def terminate(self): if self.returncode is None: try: os.kill(self.pid, signal.SIGTERM) except OSError: if self.wait(timeout=0.1) is None: raise @staticmethod def thread_is_spawning(): if _forking_is_enabled: return False else: return getattr(Popen._tls, 'process_handle', None) is not None @staticmethod def duplicate_for_child(handle): return handle # # Windows # else: try: import thread except ImportError: import _thread as thread # noqa import msvcrt try: import _subprocess except ImportError: import _winapi as _subprocess # noqa # # # TERMINATE = 0x10000 WINEXE = (sys.platform == 'win32' and getattr(sys, 'frozen', False)) WINSERVICE = sys.executable.lower().endswith("pythonservice.exe") exit = win32.ExitProcess close = win32.CloseHandle # # # def duplicate(handle, target_process=None, inheritable=False): if target_process is None: target_process = _subprocess.GetCurrentProcess() h = _subprocess.DuplicateHandle( _subprocess.GetCurrentProcess(), handle, target_process, 0, inheritable, _subprocess.DUPLICATE_SAME_ACCESS ) if sys.version_info[0] < 3 or ( sys.version_info[0] == 3 and sys.version_info[1] < 3): h = h.Detach() return h # # We define a Popen class similar to the one from subprocess, but # whose constructor takes a process object as its argument. # class Popen(object): ''' Start a subprocess to run the code of a process object ''' _tls = thread._local() def __init__(self, process_obj): _Django_old_layout_hack__save() # create pipe for communication with child rfd, wfd = os.pipe() # get handle for read end of the pipe and make it inheritable rhandle = duplicate(msvcrt.get_osfhandle(rfd), inheritable=True) os.close(rfd) # start process cmd = get_command_line() + [rhandle] cmd = ' '.join('"%s"' % x for x in cmd) hp, ht, pid, tid = _subprocess.CreateProcess( _python_exe, cmd, None, None, 1, 0, None, None, None ) close(ht) if isinstance(ht, int_types) else ht.Close() (close(rhandle) if isinstance(rhandle, int_types) else rhandle.Close()) # set attributes of self self.pid = pid self.returncode = None self._handle = hp self.sentinel = int(hp) # send information to child prep_data = get_preparation_data(process_obj._name) to_child = os.fdopen(wfd, 'wb') Popen._tls.process_handle = int(hp) try: dump(prep_data, to_child, HIGHEST_PROTOCOL) dump(process_obj, to_child, HIGHEST_PROTOCOL) finally: del Popen._tls.process_handle to_child.close() @staticmethod def thread_is_spawning(): return getattr(Popen._tls, 'process_handle', None) is not None @staticmethod def duplicate_for_child(handle): return duplicate(handle, Popen._tls.process_handle) def wait(self, timeout=None): if self.returncode is None: if timeout is None: msecs = _subprocess.INFINITE else: msecs = max(0, int(timeout * 1000 + 0.5)) res = _subprocess.WaitForSingleObject(int(self._handle), msecs) if res == _subprocess.WAIT_OBJECT_0: code = _subprocess.GetExitCodeProcess(self._handle) if code == TERMINATE: code = -signal.SIGTERM self.returncode = code return self.returncode def poll(self): return self.wait(timeout=0) def terminate(self): if self.returncode is None: try: _subprocess.TerminateProcess(int(self._handle), TERMINATE) except WindowsError: if self.wait(timeout=0.1) is None: raise # # # if WINSERVICE: _python_exe = os.path.join(sys.exec_prefix, 'python.exe') else: _python_exe = sys.executable def set_executable(exe): global _python_exe _python_exe = exe def is_forking(argv): ''' Return whether commandline indicates we are forking ''' if len(argv) >= 2 and argv[1] == '--billiard-fork': assert len(argv) == 3 os.environ["FORKED_BY_MULTIPROCESSING"] = "1" return True else: return False def freeze_support(): ''' Run code for process object if this in not the main process ''' if is_forking(sys.argv): main() sys.exit() def get_command_line(): ''' Returns prefix of command line used for spawning a child process ''' if process.current_process()._identity == () and is_forking(sys.argv): raise RuntimeError(''' Attempt to start a new process before the current process has finished its bootstrapping phase. This probably means that have forgotten to use the proper idiom in the main module: if __name__ == '__main__': freeze_support() ... The "freeze_support()" line can be omitted if the program is not going to be frozen to produce a Windows executable.''') if getattr(sys, 'frozen', False): return [sys.executable, '--billiard-fork'] else: prog = 'from billiard.forking import main; main()' return [_python_exe, '-c', prog, '--billiard-fork'] def _Django_old_layout_hack__save(): if 'DJANGO_PROJECT_DIR' not in os.environ: try: settings_name = os.environ['DJANGO_SETTINGS_MODULE'] except KeyError: return # not using Django. conf_settings = sys.modules.get('django.conf.settings') configured = conf_settings and conf_settings.configured try: project_name, _ = settings_name.split('.', 1) except ValueError: return # not modified by setup_environ project = __import__(project_name) try: project_dir = os.path.normpath(_module_parent_dir(project)) except AttributeError: return # dynamically generated module (no __file__) if configured: warnings.warn(UserWarning( W_OLD_DJANGO_LAYOUT % os.path.realpath(project_dir) )) os.environ['DJANGO_PROJECT_DIR'] = project_dir def _Django_old_layout_hack__load(): try: sys.path.append(os.environ['DJANGO_PROJECT_DIR']) except KeyError: pass def _module_parent_dir(mod): dir, filename = os.path.split(_module_dir(mod)) if dir == os.curdir or not dir: dir = os.getcwd() return dir def _module_dir(mod): if '__init__.py' in mod.__file__: return os.path.dirname(mod.__file__) return mod.__file__ def main(): ''' Run code specifed by data received over pipe ''' global _forking_is_enabled _Django_old_layout_hack__load() assert is_forking(sys.argv) _forking_is_enabled = False handle = int(sys.argv[-1]) if sys.platform == 'win32': fd = msvcrt.open_osfhandle(handle, os.O_RDONLY) else: fd = handle from_parent = os.fdopen(fd, 'rb') process.current_process()._inheriting = True preparation_data = load(from_parent) prepare(preparation_data) # Huge hack to make logging before Process.run work. try: os.environ["MP_MAIN_FILE"] = sys.modules["__main__"].__file__ except KeyError: pass except AttributeError: pass loglevel = os.environ.get("_MP_FORK_LOGLEVEL_") logfile = os.environ.get("_MP_FORK_LOGFILE_") or None format = os.environ.get("_MP_FORK_LOGFORMAT_") if loglevel: from billiard import util import logging logger = util.get_logger() logger.setLevel(int(loglevel)) if not logger.handlers: logger._rudimentary_setup = True logfile = logfile or sys.__stderr__ if hasattr(logfile, "write"): handler = logging.StreamHandler(logfile) else: handler = logging.FileHandler(logfile) formatter = logging.Formatter( format or util.DEFAULT_LOGGING_FORMAT, ) handler.setFormatter(formatter) logger.addHandler(handler) self = load(from_parent) process.current_process()._inheriting = False from_parent.close() exitcode = self._bootstrap() exit(exitcode) def get_preparation_data(name): ''' Return info about parent needed by child to unpickle process object ''' from billiard.util import _logger, _log_to_stderr d = dict( name=name, sys_path=sys.path, sys_argv=sys.argv, log_to_stderr=_log_to_stderr, orig_dir=process.ORIGINAL_DIR, authkey=process.current_process().authkey, ) if _logger is not None: d['log_level'] = _logger.getEffectiveLevel() if not WINEXE and not WINSERVICE: main_path = getattr(sys.modules['__main__'], '__file__', None) if not main_path and sys.argv[0] not in ('', '-c'): main_path = sys.argv[0] if main_path is not None: if (not os.path.isabs(main_path) and process.ORIGINAL_DIR is not None): main_path = os.path.join(process.ORIGINAL_DIR, main_path) d['main_path'] = os.path.normpath(main_path) return d # # Prepare current process # old_main_modules = [] def prepare(data): ''' Try to get current process ready to unpickle process object ''' old_main_modules.append(sys.modules['__main__']) if 'name' in data: process.current_process().name = data['name'] if 'authkey' in data: process.current_process()._authkey = data['authkey'] if 'log_to_stderr' in data and data['log_to_stderr']: util.log_to_stderr() if 'log_level' in data: util.get_logger().setLevel(data['log_level']) if 'sys_path' in data: sys.path = data['sys_path'] if 'sys_argv' in data: sys.argv = data['sys_argv'] if 'dir' in data: os.chdir(data['dir']) if 'orig_dir' in data: process.ORIGINAL_DIR = data['orig_dir'] if 'main_path' in data: main_path = data['main_path'] main_name = os.path.splitext(os.path.basename(main_path))[0] if main_name == '__init__': main_name = os.path.basename(os.path.dirname(main_path)) if main_name == '__main__': main_module = sys.modules['__main__'] main_module.__file__ = main_path elif main_name != 'ipython': # Main modules not actually called __main__.py may # contain additional code that should still be executed import imp if main_path is None: dirs = None elif os.path.basename(main_path).startswith('__init__.py'): dirs = [os.path.dirname(os.path.dirname(main_path))] else: dirs = [os.path.dirname(main_path)] assert main_name not in sys.modules, main_name file, path_name, etc = imp.find_module(main_name, dirs) try: # We would like to do "imp.load_module('__main__', ...)" # here. However, that would cause 'if __name__ == # "__main__"' clauses to be executed. main_module = imp.load_module( '__parents_main__', file, path_name, etc ) finally: if file: file.close() sys.modules['__main__'] = main_module main_module.__name__ = '__main__' # Try to make the potentially picklable objects in # sys.modules['__main__'] realize they are in the main # module -- somewhat ugly. for obj in list(main_module.__dict__.values()): try: if obj.__module__ == '__parents_main__': obj.__module__ = '__main__' except Exception: pass
[ "john.g.keto@gmail.com" ]
john.g.keto@gmail.com
cee01a1f512e64d11d177b39003a6d66c4c62798
f375899369ba86aed1da89101c31817168ffec40
/cinema/urls.py
f73d16e3686a88eb9e7b40b41d324fb2021b3100
[]
no_license
sheremilbekov/cinema
f596a7d1e41f03161a3ddf0c3594f39619c812df
d8baac5c7e25a90340a35e1e0b0cce093014f965
refs/heads/master
2023-03-30T10:19:20.654540
2021-04-07T12:10:54
2021-04-07T12:10:54
354,850,726
0
0
null
null
null
null
UTF-8
Python
false
false
941
py
"""cinema URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/3.1/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: path('', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.urls import include, path 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) """ from django.conf import settings from django.conf.urls.static import static from django.contrib import admin from django.urls import path, include urlpatterns = [ path('admin/', admin.site.urls), path('cooking/', include('main.urls')), ] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
[ "you@example.com" ]
you@example.com
16effac639ce13ca5ccf22f2cfad0658eac06638
547ba955855ff623a8ef6e80fcfaddebadf34bed
/Chapter08/B06246_08_14-slope.py
a5a8f6c01c4af2755f280d506db81016afd04122
[]
no_license
CodedQuen/QGIS-Python-Programming-Cookbook
94a36d265d0336d5bb36ac02d637ba17ee765b04
f84e0159f7c8ec81a29573a7fd2e03b046efce33
refs/heads/master
2022-05-28T05:04:35.053121
2020-05-05T09:44:50
2020-05-05T09:44:50
261,414,871
0
0
null
null
null
null
UTF-8
Python
false
false
2,009
py
# Computing Road Slope using Elevation Data # https://github.com/GeospatialPython/Learn/raw/master/road.zip from PyQt4.QtCore import * from PyQt4.QtGui import * import processing dem = "/qgis_data/road/dem.asc" road = "/qgis_data/road/road.shp" slope = "/qgis_data/road/slope.tif" segRoad = "/qgis_data/road/segRoad.shp" steepness = "/qgis_data/road/steepness.shp" hillshade = "/qgis_data/road/hillshade.tif" demLyr = QgsRasterLayer(dem, "DEM") roadLyr = QgsVectorLayer(road, "Road", "ogr") ext = demLyr.extent() xmin = ext.xMinimum() ymin = ext.yMinimum() xmax = ext.xMaximum() ymax = ext.yMaximum() demBox = "{},{},{},{}".format(xmin,xmax,ymin,ymax) processing.runalg("grass7:r.slope",dem,0,False,1,0,demBox,0,slope) ext = roadLyr.extent() xmin = ext.xMinimum() ymin = ext.yMinimum() xmax = ext.xMaximum() ymax = ext.yMaximum() roadBox = "{},{},{},{}".format(xmin,xmax,ymin,ymax) processing.runalg("grass7:v.split.length",road,500,roadBox,-1,0.0001,0,segRoad) slopeLyr = QgsRasterLayer(slope, "Slope") segRoadLyr = QgsVectorLayer(segRoad, "Segmented Road", "ogr") QgsMapLayerRegistry.instance().addMapLayers([segRoadLyr,slopeLyr], False) processing.runalg("saga:addgridvaluestoshapes",segRoad,slope,0,steepness) steepLyr = QgsVectorLayer(steepness, "Road Gradient", "ogr") roadGrade = ( ("Rolling Hill", 0.0, 20.0, "green"), ("Steep", 20.0, 40.0, "yellow"), ("Very Steep", 40.0, 90.0, "red")) ranges = [] for label, lower, upper, color in roadGrade: sym = QgsSymbolV2.defaultSymbol(steepLyr.geometryType()) sym.setColor(QColor(color)) sym.setWidth(3.0) rng = QgsRendererRangeV2(lower, upper, sym, label) ranges.append(rng) field = "slopetif" renderer = QgsGraduatedSymbolRendererV2(field, ranges) steepLyr.setRendererV2(renderer) processing.runalg("saga:analyticalhillshading",dem,0,158,45,4,hillshade) hs = QgsRasterLayer(hillshade, "Terrain") QgsMapLayerRegistry.instance().addMapLayers([steepLyr, hs])
[ "noreply@github.com" ]
noreply@github.com
79ea992e09ac27c13270a55465339b7a6ed9af03
40644ab8302eabcfe29fc665191a0e7373d2d36a
/final 0.0/modules/mouvements.py
aa4304e8d0f2f93f7a15047205ba3459e2c45f18
[]
no_license
katyushacccp/ISN_projet_final
3ee940aa725bc68e9a9506deac48960aea43d828
41278c58431b69238b2c955ee1088c36d1db203e
refs/heads/master
2020-12-24T19:04:33.560212
2016-05-30T20:25:20
2016-05-30T20:25:20
55,981,116
0
1
null
2016-05-30T20:25:20
2016-04-11T14:50:25
Python
UTF-8
Python
false
false
11,399
py
from random import * from modules.affichage import * from modules.recherche import * from time import * def start(): """Cette fonction sert à initialiser la variable globale 'compteur' qui va par la suite gérer le temps""" wFile("compteur","cub",0) def melangeur(cube,can,nombre): """Cette fonction sert à mélanger de manière aléatoire le cube un nombre n de fois, n étant défini par le paramètre 'nombre'""" for i in range(nombre): can.after(i*rFile("timeur","cub"),lambda:rotation(cube,can,randint(0,5),choice(["droite","gauche"]))) def rotative(cube,can,face,faceSup,norme): """La fonction 'rotative' normalise les différentes fonctions qui gère les mouvements dans le cube. La fonction a également la notion du temps, pour cela elle s'aide de la variable globale 'compteur'""" compteur=rFile("compteur","cub") wFile("compteur","cub",compteur+1) if norme.upper()=="U": can.after(rFile("compteur","cub")*rFile("timeur","cub"),lambda:rotationHaut(cube,can,face,faceSup,"gauche")) elif norme.upper()=="U'": can.after(rFile("compteur","cub")*rFile("timeur","cub"),lambda:rotationHaut(cube,can,face,faceSup,"droite")) elif norme.upper()=="L": can.after(rFile("compteur","cub")*rFile("timeur","cub"),lambda:rotationGauche(cube,can,face,faceSup,"bas")) elif norme.upper()=="L'": can.after(rFile("compteur","cub")*rFile("timeur","cub"),lambda:rotationGauche(cube,can,face,faceSup,"haut")) elif norme.upper()=="F": can.after(rFile("compteur","cub")*rFile("timeur","cub"),lambda:rotation(cube,can,face,"droite")) elif norme.upper()=="F'": can.after(rFile("compteur","cub")*rFile("timeur","cub"),lambda:rotation(cube,can,face,"gauche")) elif norme.upper()=="R": can.after(rFile("compteur","cub")*rFile("timeur","cub"),lambda:rotationDroite(cube,can,face,faceSup,"haut")) elif norme.upper()=="R'": can.after(rFile("compteur","cub")*rFile("timeur","cub"),lambda:rotationDroite(cube,can,face,faceSup,"bas")) elif norme.upper()=="D": can.after(rFile("compteur","cub")*rFile("timeur","cub"),lambda:rotationBas(cube,can,face,faceSup,"droite")) elif norme.upper()=="D'": can.after(rFile("compteur","cub")*rFile("timeur","cub"),lambda:rotationBas(cube,can,face,faceSup,"gauche")) elif norme.upper()=="M": can.after(rFile("compteur","cub")*rFile("timeur","cub"),lambda:axe(cube,can,face,faceSup,"bas")) elif norme.upper()=="M'": can.after(rFile("compteur","cub")*rFile("timeur","cub"),lambda:axe(cube,can,face,faceSup,"haut")) elif norme.upper()=="E": can.after(rFile("compteur","cub")*rFile("timeur","cub"),lambda:axe(cube,can,face,faceSup,"droite")) elif norme.upper()=="E'": can.after(rFile("compteur","cub")*rFile("timeur","cub"),lambda:axe(cube,can,face,faceSup,"gauche")) def rotativeUser(cube,can,face,faceSup,norme): """La fonction 'rotaitveUser' est identique à la fonction 'rotative', la gestion du temps en moins. Cela est donc pratique pour effectuer des mouvements instantanés""" if norme.upper()=="U": rotationHaut(cube,can,face,faceSup,"gauche") elif norme.upper()=="U'": rotationHaut(cube,can,face,faceSup,"droite") elif norme.upper()=="L": rotationGauche(cube,can,face,faceSup,"bas") elif norme.upper()=="L'": rotationGauche(cube,can,face,faceSup,"haut") elif norme.upper()=="F": rotation(cube,can,face,"droite") elif norme.upper()=="F'": rotation(cube,can,face,"gauche") elif norme.upper()=="R": rotationDroite(cube,can,face,faceSup,"haut") elif norme.upper()=="R'": rotationDroite(cube,can,face,faceSup,"bas") elif norme.upper()=="D": rotationBas(cube,can,face,faceSup,"droite") elif norme.upper()=="D'": rotationBas(cube,can,face,faceSup,"gauche") elif norme.upper()=="M": axe(cube,can,face,faceSup,"bas") elif norme.upper()=="M'": axe(cube,can,face,faceSup,"haut") elif norme.upper()=="E": axe(cube,can,face,faceSup,"droite") elif norme.upper()=="E'": axe(cube,can,face,faceSup,"gauche") def rotation(cube,can,face,sens): """Fonction gérant la rotation d'une face. Correspond au mouvement F si sens = droite / F' si sens = gauche""" if sens=="droite": cube[face][0], cube[face][1], cube[face][2], cube[face][5], cube[face][8], cube[face][7], cube[face][6], cube[face][3] \ = cube[face][6], cube[face][3], cube[face][0], cube[face][1], cube[face][2], cube[face][5], cube[face][8], cube[face][7] pos=posRel(face) cube[pos[0][0]][pos[0][1]], cube[pos[0][0]][pos[0][2]], cube[pos[0][0]][pos[0][3]], \ cube[pos[1][0]][pos[1][1]], cube[pos[1][0]][pos[1][2]], cube[pos[1][0]][pos[1][3]], \ cube[pos[2][0]][pos[2][3]], cube[pos[2][0]][pos[2][2]], cube[pos[2][0]][pos[2][1]], \ cube[pos[3][0]][pos[3][3]], cube[pos[3][0]][pos[3][2]], cube[pos[3][0]][pos[3][1]] \ = cube[pos[3][0]][pos[3][3]], cube[pos[3][0]][pos[3][2]], cube[pos[3][0]][pos[3][1]], \ cube[pos[0][0]][pos[0][1]], cube[pos[0][0]][pos[0][2]], cube[pos[0][0]][pos[0][3]], \ cube[pos[1][0]][pos[1][1]], cube[pos[1][0]][pos[1][2]], cube[pos[1][0]][pos[1][3]], \ cube[pos[2][0]][pos[2][3]], cube[pos[2][0]][pos[2][2]], cube[pos[2][0]][pos[2][1]] elif sens=="gauche": cube[face][0], cube[face][1], cube[face][2], cube[face][5], cube[face][8], cube[face][7], cube[face][6], cube[face][3] \ = cube[face][2],cube[face][5],cube[face][8],cube[face][7],cube[face][6],cube[face][3],cube[face][0],cube[face][1] pos=posRel(face) cube[pos[0][0]][pos[0][1]], cube[pos[0][0]][pos[0][2]], cube[pos[0][0]][pos[0][3]], \ cube[pos[1][0]][pos[1][1]], cube[pos[1][0]][pos[1][2]], cube[pos[1][0]][pos[1][3]], \ cube[pos[2][0]][pos[2][3]], cube[pos[2][0]][pos[2][2]], cube[pos[2][0]][pos[2][1]], \ cube[pos[3][0]][pos[3][3]], cube[pos[3][0]][pos[3][2]], cube[pos[3][0]][pos[3][1]] \ = cube[pos[1][0]][pos[1][1]], cube[pos[1][0]][pos[1][2]], cube[pos[1][0]][pos[1][3]], \ cube[pos[2][0]][pos[2][3]], cube[pos[2][0]][pos[2][2]], cube[pos[2][0]][pos[2][1]], \ cube[pos[3][0]][pos[3][3]], cube[pos[3][0]][pos[3][2]], cube[pos[3][0]][pos[3][1]], \ cube[pos[0][0]][pos[0][1]], cube[pos[0][0]][pos[0][2]], cube[pos[0][0]][pos[0][3]] actualise(cube,can) def rotationHaut(cube,can,face,faceSup,sens): """Fonction se basant sur la fonction 'rotation' afin de permettre un autre mouvement. Correspond au mouvement U si sens = gauche / U' si sens = droite""" if sens=="gauche": rotation(cube,can,faceSup,"droite") elif sens=="droite": rotation(cube,can,faceSup,"gauche") def rotationBas(cube,can,face,faceSup,sens): """Fonction se basant sur la fonction 'rotation' afin de permettre un autre mouvement. Correspond au mouvement D si sens = droite / D' si sens = gauche""" faceBas=posRel(faceSup)[4] rotation(cube,can,faceBas,sens) def rotationDroite(cube,can,face,faceSup,sens): """Fonction se basant sur la fonction 'rotation' afin de permettre un autre mouvement. Correspond au mouvement R si sens = haut / R' si sens = bas""" pos=posRel(face) for i in range(4): if pos[i][0]==faceSup: supChiffre=i faceDroite=posRel(face)[boussole(supChiffre+1)][0] if sens=="haut": rotation(cube,can,faceDroite,"droite") elif sens=="bas": rotation(cube,can,faceDroite,"gauche") def rotationGauche(cube,can,face,faceSup,sens): """Fonction se basant sur la fonction 'rotation' afin de permettre un autre mouvement. Correspond au mouvement L si sens = bas / L' si sens = haut""" pos=posRel(face) for i in range(4): if pos[i][0]==faceSup: supChiffre=i faceDroite=posRel(face)[boussole(supChiffre-1)][0] if sens=="haut": rotation(cube,can,faceDroite,"gauche") elif sens=="bas": rotation(cube,can,faceDroite,"droite") def axe(cube,can,face,faceSup,sens): sensGlobal=[] pos=posRel(face) for i in range(4): if pos[i][0]==faceSup: sensChiffre=i sens=int(deReconnaissanceDirection(sens)) sensGlobal=reconnaissanceDirection(sensChiffre+sens) if sensGlobal=="haut": cube[face][1], cube[face][4], cube[face][7], \ cube[posRel(face)[2][0]][posRel(face)[2][2]], cube[posRel(face)[2][0]][4], cube[posRel(face)[2][0]][arreteOppose(posRel(face)[2][2])], \ cube[posRel(face)[4]][7], cube[posRel(face)[4]][4], cube[posRel(face)[4]][1], \ cube[posRel(face)[0][0]][arreteOppose(posRel(face)[0][2])], cube[posRel(face)[0][0]][4], cube[posRel(face)[0][0]][posRel(face)[0][2]] \ = cube[posRel(face)[2][0]][posRel(face)[2][2]], cube[posRel(face)[2][0]][4], cube[posRel(face)[2][0]][arreteOppose(posRel(face)[2][2])], \ cube[posRel(face)[4]][7], cube[posRel(face)[4]][4], cube[posRel(face)[4]][1], \ cube[posRel(face)[0][0]][arreteOppose(posRel(face)[0][2])], cube[posRel(face)[0][0]][4], cube[posRel(face)[0][0]][posRel(face)[0][2]], \ cube[face][1], cube[face][4], cube[face][7] elif sensGlobal=="droite": cube[face][3], cube[face][4], cube[face][5], \ cube[posRel(face)[1][0]][posRel(face)[1][2]], cube[posRel(face)[1][0]][4], cube[posRel(face)[1][0]][arreteOppose(posRel(face)[1][2])], \ cube[posRel(face)[4]][3], cube[posRel(face)[4]][4], cube[posRel(face)[4]][5], \ cube[posRel(face)[3][0]][arreteOppose(posRel(face)[3][2])], cube[posRel(face)[3][0]][4], cube[posRel(face)[3][0]][posRel(face)[3][2]] \ = cube[posRel(face)[3][0]][arreteOppose(posRel(face)[3][2])], cube[posRel(face)[3][0]][4], cube[posRel(face)[3][0]][posRel(face)[3][2]], \ cube[face][3], cube[face][4], cube[face][5], \ cube[posRel(face)[1][0]][posRel(face)[1][2]], cube[posRel(face)[1][0]][4], cube[posRel(face)[1][0]][arreteOppose(posRel(face)[1][2])], \ cube[posRel(face)[4]][3], cube[posRel(face)[4]][4], cube[posRel(face)[4]][5] elif sensGlobal=="gauche": cube[face][3], cube[face][4], cube[face][5], \ cube[posRel(face)[1][0]][posRel(face)[1][2]], cube[posRel(face)[1][0]][4], cube[posRel(face)[1][0]][arreteOppose(posRel(face)[1][2])], \ cube[posRel(face)[4]][3], cube[posRel(face)[4]][4], cube[posRel(face)[4]][5], \ cube[posRel(face)[3][0]][arreteOppose(posRel(face)[3][2])], cube[posRel(face)[3][0]][4], cube[posRel(face)[3][0]][posRel(face)[3][2]] \ = cube[posRel(face)[1][0]][posRel(face)[1][2]], cube[posRel(face)[1][0]][4], cube[posRel(face)[1][0]][arreteOppose(posRel(face)[1][2])], \ cube[posRel(face)[4]][3], cube[posRel(face)[4]][4], cube[posRel(face)[4]][5], \ cube[posRel(face)[3][0]][arreteOppose(posRel(face)[3][2])], cube[posRel(face)[3][0]][4], cube[posRel(face)[3][0]][posRel(face)[3][2]], \ cube[face][3], cube[face][4], cube[face][5] elif sensGlobal=="bas": cube[face][1], cube[face][4], cube[face][7], \ cube[posRel(face)[2][0]][posRel(face)[2][2]], cube[posRel(face)[2][0]][4], cube[posRel(face)[2][0]][arreteOppose(posRel(face)[2][2])], \ cube[posRel(face)[4]][7], cube[posRel(face)[4]][4], cube[posRel(face)[4]][1], \ cube[posRel(face)[0][0]][arreteOppose(posRel(face)[0][2])], cube[posRel(face)[0][0]][4], cube[posRel(face)[0][0]][posRel(face)[0][2]] \ = cube[posRel(face)[0][0]][arreteOppose(posRel(face)[0][2])], cube[posRel(face)[0][0]][4], cube[posRel(face)[0][0]][posRel(face)[0][2]], \ cube[face][1], cube[face][4], cube[face][7], \ cube[posRel(face)[2][0]][posRel(face)[2][2]], cube[posRel(face)[2][0]][4], cube[posRel(face)[2][0]][arreteOppose(posRel(face)[2][2])], \ cube[posRel(face)[4]][7], cube[posRel(face)[4]][4], cube[posRel(face)[4]][1] actualise(cube,can)
[ "lou-theo.laurent@laposte.net" ]
lou-theo.laurent@laposte.net
cee5b7b2b75e484c426ea06dd60947850dfaaf8a
510517e2f9399a6590035e23e1db89dde2ee00f5
/workflow/workflow.py
e04e741e44275bd44f7330ea2fc4267ba22712a6
[]
no_license
fractaledmind/alfred_corpora
686ddbcfd1df92928738efdea716647ab4539380
15e7c3b047a4958dfe0299ee6d5ddc1a32f25de6
refs/heads/master
2021-01-15T14:18:31.475051
2014-09-25T03:18:02
2014-09-25T03:18:02
24,428,371
3
0
null
null
null
null
UTF-8
Python
false
false
74,281
py
# encoding: utf-8 # # Copyright (c) 2014 Dean Jackson <deanishe@deanishe.net> # # MIT Licence. See http://opensource.org/licenses/MIT # # Created on 2014-02-15 # """ The :class:`Workflow` object is the main interface to this library. """ from __future__ import print_function, unicode_literals import os import sys import string import re import plistlib import subprocess import unicodedata import shutil import json import cPickle import pickle import time import logging import logging.handlers try: import xml.etree.cElementTree as ET except ImportError: # pragma: no cover import xml.etree.ElementTree as ET #################################################################### # Standard system icons #################################################################### # These icons are default OS X icons. They are super-high quality, and # will be familiar to users. # This library uses `ICON_ERROR` when a workflow dies in flames, so # in my own workflows, I use `ICON_WARNING` for less fatal errors # (e.g. bad user input, no results etc.) # The system icons are all in this directory. There are many more than # are listed here ICON_ROOT = '/System/Library/CoreServices/CoreTypes.bundle/Contents/Resources' ICON_ACCOUNT = os.path.join(ICON_ROOT, 'Accounts.icns') ICON_BURN = os.path.join(ICON_ROOT, 'BurningIcon.icns') ICON_COLOR = os.path.join(ICON_ROOT, 'ProfileBackgroundColor.icns') ICON_COLOUR = ICON_COLOR # Queen's English, if you please # Shown when a workflow throws an error ICON_ERROR = os.path.join(ICON_ROOT, 'AlertStopIcon.icns') ICON_FAVORITE = os.path.join(ICON_ROOT, 'ToolbarFavoritesIcon.icns') ICON_FAVOURITE = ICON_FAVORITE ICON_GROUP = os.path.join(ICON_ROOT, 'GroupIcon.icns') ICON_HELP = os.path.join(ICON_ROOT, 'HelpIcon.icns') ICON_INFO = os.path.join(ICON_ROOT, 'ToolbarInfo.icns') ICON_MUSIC = os.path.join(ICON_ROOT, 'ToolbarMusicFolderIcon.icns') ICON_NETWORK = os.path.join(ICON_ROOT, 'GenericNetworkIcon.icns') ICON_NOTE = os.path.join(ICON_ROOT, 'AlertNoteIcon.icns') ICON_SETTINGS = os.path.join(ICON_ROOT, 'ToolbarAdvanced.icns') ICON_SYNC = os.path.join(ICON_ROOT, 'Sync.icns') ICON_TRASH = os.path.join(ICON_ROOT, 'TrashIcon.icns') ICON_USER = os.path.join(ICON_ROOT, 'UserIcon.icns') ICON_WARNING = os.path.join(ICON_ROOT, 'AlertCautionIcon.icns') ICON_WEB = os.path.join(ICON_ROOT, 'BookmarkIcon.icns') #################################################################### # non-ASCII to ASCII diacritic folding. # Used by `fold_to_ascii` method #################################################################### ASCII_REPLACEMENTS = { 'À': 'A', 'Á': 'A', 'Â': 'A', 'Ã': 'A', 'Ä': 'A', 'Å': 'A', 'Æ': 'AE', 'Ç': 'C', 'È': 'E', 'É': 'E', 'Ê': 'E', 'Ë': 'E', 'Ì': 'I', 'Í': 'I', 'Î': 'I', 'Ï': 'I', 'Ð': 'D', 'Ñ': 'N', 'Ò': 'O', 'Ó': 'O', 'Ô': 'O', 'Õ': 'O', 'Ö': 'O', 'Ø': 'O', 'Ù': 'U', 'Ú': 'U', 'Û': 'U', 'Ü': 'U', 'Ý': 'Y', 'Þ': 'Th', 'ß': 'ss', 'à': 'a', 'á': 'a', 'â': 'a', 'ã': 'a', 'ä': 'a', 'å': 'a', 'æ': 'ae', 'ç': 'c', 'è': 'e', 'é': 'e', 'ê': 'e', 'ë': 'e', 'ì': 'i', 'í': 'i', 'î': 'i', 'ï': 'i', 'ð': 'd', 'ñ': 'n', 'ò': 'o', 'ó': 'o', 'ô': 'o', 'õ': 'o', 'ö': 'o', 'ø': 'o', 'ù': 'u', 'ú': 'u', 'û': 'u', 'ü': 'u', 'ý': 'y', 'þ': 'th', 'ÿ': 'y', 'Ł': 'L', 'ł': 'l', 'Ń': 'N', 'ń': 'n', 'Ņ': 'N', 'ņ': 'n', 'Ň': 'N', 'ň': 'n', 'Ŋ': 'ng', 'ŋ': 'NG', 'Ō': 'O', 'ō': 'o', 'Ŏ': 'O', 'ŏ': 'o', 'Ő': 'O', 'ő': 'o', 'Œ': 'OE', 'œ': 'oe', 'Ŕ': 'R', 'ŕ': 'r', 'Ŗ': 'R', 'ŗ': 'r', 'Ř': 'R', 'ř': 'r', 'Ś': 'S', 'ś': 's', 'Ŝ': 'S', 'ŝ': 's', 'Ş': 'S', 'ş': 's', 'Š': 'S', 'š': 's', 'Ţ': 'T', 'ţ': 't', 'Ť': 'T', 'ť': 't', 'Ŧ': 'T', 'ŧ': 't', 'Ũ': 'U', 'ũ': 'u', 'Ū': 'U', 'ū': 'u', 'Ŭ': 'U', 'ŭ': 'u', 'Ů': 'U', 'ů': 'u', 'Ű': 'U', 'ű': 'u', 'Ŵ': 'W', 'ŵ': 'w', 'Ŷ': 'Y', 'ŷ': 'y', 'Ÿ': 'Y', 'Ź': 'Z', 'ź': 'z', 'Ż': 'Z', 'ż': 'z', 'Ž': 'Z', 'ž': 'z', 'ſ': 's', 'Α': 'A', 'Β': 'B', 'Γ': 'G', 'Δ': 'D', 'Ε': 'E', 'Ζ': 'Z', 'Η': 'E', 'Θ': 'Th', 'Ι': 'I', 'Κ': 'K', 'Λ': 'L', 'Μ': 'M', 'Ν': 'N', 'Ξ': 'Ks', 'Ο': 'O', 'Π': 'P', 'Ρ': 'R', 'Σ': 'S', 'Τ': 'T', 'Υ': 'U', 'Φ': 'Ph', 'Χ': 'Kh', 'Ψ': 'Ps', 'Ω': 'O', 'α': 'a', 'β': 'b', 'γ': 'g', 'δ': 'd', 'ε': 'e', 'ζ': 'z', 'η': 'e', 'θ': 'th', 'ι': 'i', 'κ': 'k', 'λ': 'l', 'μ': 'm', 'ν': 'n', 'ξ': 'x', 'ο': 'o', 'π': 'p', 'ρ': 'r', 'ς': 's', 'σ': 's', 'τ': 't', 'υ': 'u', 'φ': 'ph', 'χ': 'kh', 'ψ': 'ps', 'ω': 'o', 'А': 'A', 'Б': 'B', 'В': 'V', 'Г': 'G', 'Д': 'D', 'Е': 'E', 'Ж': 'Zh', 'З': 'Z', 'И': 'I', 'Й': 'I', 'К': 'K', 'Л': 'L', 'М': 'M', 'Н': 'N', 'О': 'O', 'П': 'P', 'Р': 'R', 'С': 'S', 'Т': 'T', 'У': 'U', 'Ф': 'F', 'Х': 'Kh', 'Ц': 'Ts', 'Ч': 'Ch', 'Ш': 'Sh', 'Щ': 'Shch', 'Ъ': "'", 'Ы': 'Y', 'Ь': "'", 'Э': 'E', 'Ю': 'Iu', 'Я': 'Ia', 'а': 'a', 'б': 'b', 'в': 'v', 'г': 'g', 'д': 'd', 'е': 'e', 'ж': 'zh', 'з': 'z', 'и': 'i', 'й': 'i', 'к': 'k', 'л': 'l', 'м': 'm', 'н': 'n', 'о': 'o', 'п': 'p', 'р': 'r', 'с': 's', 'т': 't', 'у': 'u', 'ф': 'f', 'х': 'kh', 'ц': 'ts', 'ч': 'ch', 'ш': 'sh', 'щ': 'shch', 'ъ': "'", 'ы': 'y', 'ь': "'", 'э': 'e', 'ю': 'iu', 'я': 'ia', # 'ᴀ': '', # 'ᴁ': '', # 'ᴂ': '', # 'ᴃ': '', # 'ᴄ': '', # 'ᴅ': '', # 'ᴆ': '', # 'ᴇ': '', # 'ᴈ': '', # 'ᴉ': '', # 'ᴊ': '', # 'ᴋ': '', # 'ᴌ': '', # 'ᴍ': '', # 'ᴎ': '', # 'ᴏ': '', # 'ᴐ': '', # 'ᴑ': '', # 'ᴒ': '', # 'ᴓ': '', # 'ᴔ': '', # 'ᴕ': '', # 'ᴖ': '', # 'ᴗ': '', # 'ᴘ': '', # 'ᴙ': '', # 'ᴚ': '', # 'ᴛ': '', # 'ᴜ': '', # 'ᴝ': '', # 'ᴞ': '', # 'ᴟ': '', # 'ᴠ': '', # 'ᴡ': '', # 'ᴢ': '', # 'ᴣ': '', # 'ᴤ': '', # 'ᴥ': '', 'ᴦ': 'G', 'ᴧ': 'L', 'ᴨ': 'P', 'ᴩ': 'R', 'ᴪ': 'PS', 'ẞ': 'Ss', 'Ỳ': 'Y', 'ỳ': 'y', 'Ỵ': 'Y', 'ỵ': 'y', 'Ỹ': 'Y', 'ỹ': 'y', } #################################################################### # Used by `Workflow.filter` #################################################################### # Anchor characters in a name #: Characters that indicate the beginning of a "word" in CamelCase INITIALS = string.ascii_uppercase + string.digits #: Split on non-letters, numbers split_on_delimiters = re.compile('[^a-zA-Z0-9]').split # Match filter flags #: Match items that start with ``query`` MATCH_STARTSWITH = 1 #: Match items whose capital letters start with ``query`` MATCH_CAPITALS = 2 #: Match items with a component "word" that matches ``query`` MATCH_ATOM = 4 #: Match items whose initials (based on atoms) start with ``query`` MATCH_INITIALS_STARTSWITH = 8 #: Match items whose initials (based on atoms) contain ``query`` MATCH_INITIALS_CONTAIN = 16 #: Combination of :const:`MATCH_INITIALS_STARTSWITH` and #: :const:`MATCH_INITIALS_CONTAIN` MATCH_INITIALS = 24 #: Match items if ``query`` is a substring MATCH_SUBSTRING = 32 #: Match items if all characters in ``query`` appear in the item in order MATCH_ALLCHARS = 64 #: Combination of all other ``MATCH_*`` constants MATCH_ALL = 127 #################################################################### # Used by `Workflow.check_update` #################################################################### # Number of days to wait between checking for updates to the workflow DEFAULT_UPDATE_FREQUENCY = 1 #################################################################### # Keychain access errors #################################################################### class KeychainError(Exception): """Raised by methods :meth:`Workflow.save_password`, :meth:`Workflow.get_password` and :meth:`Workflow.delete_password` when ``security`` CLI app returns an unknown code. """ class PasswordNotFound(KeychainError): """Raised by method :meth:`Workflow.get_password` when ``account`` is unknown to the Keychain. """ class PasswordExists(KeychainError): """Raised when trying to overwrite an existing account password. The API user should never receive this error: it is used internally by the :meth:`Workflow.save_password` method. """ #################################################################### # Helper functions #################################################################### def isascii(text): """Test if ``text`` contains only ASCII characters :param text: text to test for ASCII-ness :type text: ``unicode`` :returns: ``True`` if ``text`` contains only ASCII characters :rtype: ``Boolean`` """ try: text.encode('ascii') except UnicodeEncodeError: return False return True #################################################################### # Implementation classes #################################################################### class SerializerManager(object): """Contains registered serializers. .. versionadded:: 1.8 A configured instance of this class is available at ``workflow.manager``. Use :meth:`register()` to register new (or replace existing) serializers, which you can specify by name when calling :class:`Workflow` data storage methods. See `serialization` and `persistent-data` for further information. """ def __init__(self): self._serializers = {} def register(self, name, serializer): """Register ``serializer`` object under ``name``. Raises :class:`AttributeError` if ``serializer`` in invalid. .. note:: ``name`` will be used as the file extension of the saved files. :param name: Name to register ``serializer`` under :type name: ``unicode`` or ``str`` :param serializer: object with ``load()`` and ``dump()`` methods """ # Basic validation getattr(serializer, 'load') getattr(serializer, 'dump') self._serializers[name] = serializer def serializer(self, name): """Return serializer object for ``name`` or ``None`` if no such serializer is registered :param name: Name of serializer to return :type name: ``unicode`` or ``str`` :returns: serializer object or ``None`` """ return self._serializers.get(name) def unregister(self, name): """Remove registered serializer with ``name`` Raises a :class:`ValueError` if there is no such registered serializer. :param name: Name of serializer to remove :type name: ``unicode`` or ``str`` :returns: serializer object """ if name not in self._serializers: raise ValueError('No such serializer registered : {}'.format(name)) serializer = self._serializers[name] del self._serializers[name] return serializer @property def serializers(self): """Return names of registered serializers""" return sorted(self._serializers.keys()) class JSONSerializer(object): """Wrapper around :mod:`json`. Sets ``indent`` and ``encoding``. .. versionadded:: 1.8 Use this serializer if you need readable data files. JSON doesn't support Python objects as well as ``cPickle``/``pickle``, so be careful which data you try to serialize as JSON. """ @classmethod def load(cls, file_obj): """Load serialized object from open JSON file. .. versionadded:: 1.8 :param file_obj: file handle :type file_obj: ``file`` object :returns: object loaded from JSON file :rtype: object """ return json.load(file_obj) @classmethod def dump(cls, obj, file_obj): """Serialize object ``obj`` to open JSON file. .. versionadded:: 1.8 :param obj: Python object to serialize :type obj: JSON-serializable data structure :param file_obj: file handle :type file_obj: ``file`` object """ return json.dump(obj, file_obj, indent=2, encoding='utf-8') class CPickleSerializer(object): """Wrapper around :mod:`cPickle`. Sets ``protocol``. .. versionadded:: 1.8 This is the default serializer and the best combination of speed and flexibility. """ @classmethod def load(cls, file_obj): """Load serialized object from open pickle file. .. versionadded:: 1.8 :param file_obj: file handle :type file_obj: ``file`` object :returns: object loaded from pickle file :rtype: object """ return cPickle.load(file_obj) @classmethod def dump(cls, obj, file_obj): """Serialize object ``obj`` to open pickle file. .. versionadded:: 1.8 :param obj: Python object to serialize :type obj: Python object :param file_obj: file handle :type file_obj: ``file`` object """ return cPickle.dump(obj, file_obj, protocol=-1) class PickleSerializer(object): """Wrapper around :mod:`pickle`. Sets ``protocol``. .. versionadded:: 1.8 Use this serializer if you need to add custom pickling. """ @classmethod def load(cls, file_obj): """Load serialized object from open pickle file. .. versionadded:: 1.8 :param file_obj: file handle :type file_obj: ``file`` object :returns: object loaded from pickle file :rtype: object """ return pickle.load(file_obj) @classmethod def dump(cls, obj, file_obj): """Serialize object ``obj`` to open pickle file. .. versionadded:: 1.8 :param obj: Python object to serialize :type obj: Python object :param file_obj: file handle :type file_obj: ``file`` object """ return pickle.dump(obj, file_obj, protocol=-1) # Set up default manager and register built-in serializers manager = SerializerManager() manager.register('cpickle', CPickleSerializer) manager.register('pickle', PickleSerializer) manager.register('json', JSONSerializer) class Item(object): """Represents a feedback item for Alfred. Generates Alfred-compliant XML for a single item. You probably shouldn't use this class directly, but via :meth:`Workflow.add_item`. See :meth:`~Workflow.add_item` for details of arguments. """ def __init__(self, title, subtitle='', modifier_subtitles=None, arg=None, autocomplete=None, valid=False, uid=None, icon=None, icontype=None, type=None, largetext=None, copytext=None): """Arguments the same as for :meth:`Workflow.add_item`. """ self.title = title self.subtitle = subtitle self.modifier_subtitles = modifier_subtitles or {} self.arg = arg self.autocomplete = autocomplete self.valid = valid self.uid = uid self.icon = icon self.icontype = icontype self.type = type self.largetext = largetext self.copytext = copytext @property def elem(self): """Create and return feedback item for Alfred. :returns: :class:`ElementTree.Element <xml.etree.ElementTree.Element>` instance for this :class:`Item` instance. """ attr = {} if self.valid: attr['valid'] = 'yes' else: attr['valid'] = 'no' # Optional attributes for name in ('uid', 'type', 'autocomplete'): value = getattr(self, name, None) if value: attr[name] = value root = ET.Element('item', attr) ET.SubElement(root, 'title').text = self.title ET.SubElement(root, 'subtitle').text = self.subtitle # Add modifier subtitles for mod in ('cmd', 'ctrl', 'alt', 'shift', 'fn'): if mod in self.modifier_subtitles: ET.SubElement(root, 'subtitle', {'mod': mod}).text = self.modifier_subtitles[mod] if self.arg: ET.SubElement(root, 'arg').text = self.arg # Add icon if there is one if self.icon: if self.icontype: attr = dict(type=self.icontype) else: attr = {} ET.SubElement(root, 'icon', attr).text = self.icon if self.largetext: ET.SubElement(root, 'text', {'type': 'largetype'}).text = self.largetext if self.copytext: ET.SubElement(root, 'text', {'type': 'copy'}).text = self.copytext return root class Settings(dict): """A dictionary that saves itself when changed. Dictionary keys & values will be saved as a JSON file at ``filepath``. If the file does not exist, the dictionary (and settings file) will be initialised with ``defaults``. :param filepath: where to save the settings :type filepath: :class:`unicode` :param defaults: dict of default settings :type defaults: :class:`dict` An appropriate instance is provided by :class:`Workflow` instances at :attr:`Workflow.settings`. """ def __init__(self, filepath, defaults=None): super(Settings, self).__init__() self._filepath = filepath self._nosave = False if os.path.exists(self._filepath): self._load() elif defaults: for key, val in defaults.items(): self[key] = val self.save() # save default settings def _load(self): """Load cached settings from JSON file `self._filepath`""" self._nosave = True with open(self._filepath, 'rb') as file_obj: for key, value in json.load(file_obj, encoding='utf-8').items(): self[key] = value self._nosave = False def save(self): """Save settings to JSON file specified in ``self._filepath`` If you're using this class via :attr:`Workflow.settings`, which you probably are, ``self._filepath`` will be ``settings.json`` in your workflow's data directory (see :attr:`~Workflow.datadir`). """ if self._nosave: return data = {} for key, value in self.items(): data[key] = value with open(self._filepath, 'wb') as file_obj: json.dump(data, file_obj, sort_keys=True, indent=2, encoding='utf-8') # dict methods def __setitem__(self, key, value): super(Settings, self).__setitem__(key, value) self.save() def __delitem__(self, key): super(Settings, self).__delitem__(key) self.save() def update(self, *args, **kwargs): """Override :class:`dict` method to save on update.""" super(Settings, self).update(*args, **kwargs) self.save() def setdefault(self, key, value=None): """Override :class:`dict` method to save on update.""" ret = super(Settings, self).setdefault(key, value) self.save() return ret class Workflow(object): """Create new :class:`Workflow` instance. :param default_settings: default workflow settings. If no settings file exists, :class:`Workflow.settings` will be pre-populated with ``default_settings``. :type default_settings: :class:`dict` :param update_settings: settings for updating your workflow from GitHub. This must be a :class:`dict` that contains ``github_slug`` and ``version`` keys. ``github_slug`` is of the form ``username/repo`` and ``version`` **must** correspond to the tag of a release. See :ref:`updates` for more information. :type update_settings: :class:`dict` :param input_encoding: encoding of command line arguments :type input_encoding: :class:`unicode` :param normalization: normalisation to apply to CLI args. See :meth:`Workflow.decode` for more details. :type normalization: :class:`unicode` :param capture_args: capture and act on ``workflow:*`` arguments. See :ref:`Magic arguments <magic-arguments>` for details. :type capture_args: :class:`Boolean` :param libraries: sequence of paths to directories containing libraries. These paths will be prepended to ``sys.path``. :type libraries: :class:`tuple` or :class:`list` """ # Which class to use to generate feedback items. You probably # won't want to change this item_class = Item def __init__(self, default_settings=None, update_settings=None, input_encoding='utf-8', normalization='NFC', capture_args=True, libraries=None): self._default_settings = default_settings or {} self._update_settings = update_settings or {} self._input_encoding = input_encoding self._normalizsation = normalization self._capture_args = capture_args self._workflowdir = None self._settings_path = None self._settings = None self._bundleid = None self._name = None self._cache_serializer = 'cpickle' self._data_serializer = 'cpickle' # info.plist should be in the directory above this one self._info_plist = self.workflowfile('info.plist') self._info = None self._info_loaded = False self._logger = None self._items = [] self._alfred_env = None self._search_pattern_cache = {} if libraries: sys.path = libraries + sys.path if update_settings: self.check_update() #################################################################### # API methods #################################################################### # info.plist contents and alfred_* environment variables ---------- @property def alfred_env(self): """Alfred's environmental variables minus the ``alfred_`` prefix. .. versionadded:: 1.7 The variables Alfred 2.4+ exports are: ============================ ========================================= Variable Description ============================ ========================================= alfred_preferences Path to Alfred.alfredpreferences (where your workflows and settings are stored). alfred_preferences_localhash Machine-specific preferences are stored in ``Alfred.alfredpreferences/preferences/local/<hash>`` (see ``alfred_preferences`` above for the path to ``Alfred.alfredpreferences``) alfred_theme ID of selected theme alfred_theme_background Background colour of selected theme in format ``rgba(r,g,b,a)`` alfred_theme_subtext Show result subtext. ``0`` = Always, ``1`` = Alternative actions only, ``2`` = Selected result only, ``3`` = Never alfred_version Alfred version number, e.g. ``'2.4'`` alfred_version_build Alfred build number, e.g. ``277`` alfred_workflow_bundleid Bundle ID, e.g. ``net.deanishe.alfred-mailto`` alfred_workflow_cache Path to workflow's cache directory alfred_workflow_data Path to workflow's data directory alfred_workflow_name Name of current workflow alfred_workflow_uid UID of workflow ============================ ========================================= **Note:** all values are Unicode strings except ``version_build`` and ``theme_subtext``, which are integers. :returns: ``dict`` of Alfred's environmental variables without the ``alfred_`` prefix, e.g. ``preferences``, ``workflow_data``. """ if self._alfred_env is not None: return self._alfred_env data = {} for key in ( 'alfred_preferences', 'alfred_preferences_localhash', 'alfred_theme', 'alfred_theme_background', 'alfred_theme_subtext', 'alfred_version', 'alfred_version_build', 'alfred_workflow_bundleid', 'alfred_workflow_cache', 'alfred_workflow_data', 'alfred_workflow_name', 'alfred_workflow_uid'): value = os.getenv(key) if isinstance(value, str): if key in ('alfred_version_build', 'alfred_theme_subtext'): value = int(value) else: value = self.decode(value) data[key[7:]] = value self._alfred_env = data return self._alfred_env @property def info(self): """:class:`dict` of ``info.plist`` contents.""" if not self._info_loaded: self._load_info_plist() return self._info @property def bundleid(self): """Workflow bundle ID from Alfred's environmental vars or ``info.plist``. :returns: bundle ID :rtype: ``unicode`` """ if not self._bundleid: if self.alfred_env.get('workflow_bundleid'): self._bundleid = self.alfred_env.get('workflow_bundleid') else: self._bundleid = unicode(self.info['bundleid'], 'utf-8') return self._bundleid @property def name(self): """Workflow name from Alfred's environmental vars or ``info.plist``. :returns: workflow name :rtype: ``unicode`` """ if not self._name: if self.alfred_env.get('workflow_name'): self._name = self.decode(self.alfred_env.get('workflow_name')) else: self._name = self.decode(self.info['name']) return self._name # Workflow utility methods ----------------------------------------- @property def args(self): """Return command line args as normalised unicode. Args are decoded and normalised via :meth:`~Workflow.decode`. The encoding and normalisation are the ``input_encoding`` and ``normalization`` arguments passed to :class:`Workflow` (``UTF-8`` and ``NFC`` are the defaults). If :class:`Workflow` is called with ``capture_args=True`` (the default), :class:`Workflow` will look for certain ``workflow:*`` args and, if found, perform the corresponding actions and exit the workflow. See :ref:`Magic arguments <magic-arguments>` for details. """ msg = None args = [self.decode(arg) for arg in sys.argv[1:]] if len(args) and self._capture_args: if 'workflow:openlog' in args: msg = 'Opening workflow log file' self.open_log() elif 'workflow:reset' in args: self.reset() msg = 'Reset workflow' elif 'workflow:delcache' in args: self.clear_cache() msg = 'Deleted workflow cache' elif 'workflow:deldata' in args: self.clear_data() msg = 'Deleted workflow data' elif 'workflow:delsettings' in args: self.clear_settings() msg = 'Deleted workflow settings' elif 'workflow:openworkflow' in args: msg = 'Opening workflow directory' self.open_workflowdir() elif 'workflow:opendata' in args: msg = 'Opening workflow data directory' self.open_datadir() elif 'workflow:opencache' in args: msg = 'Opening workflow cache directory' self.open_cachedir() elif 'workflow:openterm' in args: msg = 'Opening workflow root directory in Terminal' self.open_terminal() elif 'workflow:foldingon' in args: msg = 'Diacritics will always be folded' self.settings['__workflow_diacritic_folding'] = True elif 'workflow:foldingoff' in args: msg = 'Diacritics will never be folded' self.settings['__workflow_diacritic_folding'] = False elif 'workflow:foldingdefault' in args: msg = 'Diacritics folding reset' if '__workflow_diacritic_folding' in self.settings: del self.settings['__workflow_diacritic_folding'] elif 'workflow:update' in args: msg = 'Updating workflow' self.start_update() if msg: self.logger.debug(msg) if not sys.stdout.isatty(): # Show message in Alfred self.add_item(msg, valid=False, icon=ICON_INFO) self.send_feedback() sys.exit(0) return args @property def cachedir(self): """Path to workflow's cache directory. The cache directory is a subdirectory of Alfred's own cache directory in ``~/Library/Caches``. The full path is: ``~/Library/Caches/com.runningwithcrayons.Alfred-2/Workflow Data/<bundle id>`` :returns: full path to workflow's cache directory :rtype: ``unicode`` """ if self.alfred_env.get('workflow_cache'): dirpath = self.alfred_env.get('workflow_cache') else: dirpath = os.path.join( os.path.expanduser( '~/Library/Caches/com.runningwithcrayons.Alfred-2/' 'Workflow Data/'), self.bundleid) return self._create(dirpath) @property def datadir(self): """Path to workflow's data directory. The data directory is a subdirectory of Alfred's own data directory in ``~/Library/Application Support``. The full path is: ``~/Library/Application Support/Alfred 2/Workflow Data/<bundle id>`` :returns: full path to workflow data directory :rtype: ``unicode`` """ if self.alfred_env.get('workflow_data'): dirpath = self.alfred_env.get('workflow_data') else: dirpath = os.path.join(os.path.expanduser( '~/Library/Application Support/Alfred 2/Workflow Data/'), self.bundleid) return self._create(dirpath) @property def workflowdir(self): """Path to workflow's root directory (where ``info.plist`` is). :returns: full path to workflow root directory :rtype: ``unicode`` """ if not self._workflowdir: # Try the working directory first, then the directory # the library is in. CWD will be the workflow root if # a workflow is being run in Alfred candidates = [ os.path.abspath(os.getcwdu()), os.path.dirname(os.path.abspath(os.path.dirname(__file__)))] # climb the directory tree until we find `info.plist` for dirpath in candidates: # Ensure directory path is Unicode dirpath = self.decode(dirpath) while True: if os.path.exists(os.path.join(dirpath, 'info.plist')): self._workflowdir = dirpath break elif dirpath == '/': # no `info.plist` found break # Check the parent directory dirpath = os.path.dirname(dirpath) # No need to check other candidates if self._workflowdir: break if not self._workflowdir: raise IOError("'info.plist' not found in directory tree") return self._workflowdir def cachefile(self, filename): """Return full path to ``filename`` within your workflow's :attr:`cache directory <Workflow.cachedir>`. :param filename: basename of file :type filename: ``unicode`` :returns: full path to file within cache directory :rtype: ``unicode`` """ return os.path.join(self.cachedir, filename) def datafile(self, filename): """Return full path to ``filename`` within your workflow's :attr:`data directory <Workflow.datadir>`. :param filename: basename of file :type filename: ``unicode`` :returns: full path to file within data directory :rtype: ``unicode`` """ return os.path.join(self.datadir, filename) def workflowfile(self, filename): """Return full path to ``filename`` in workflow's root dir (where ``info.plist`` is). :param filename: basename of file :type filename: ``unicode`` :returns: full path to file within data directory :rtype: ``unicode`` """ return os.path.join(self.workflowdir, filename) @property def logfile(self): """Return path to logfile :returns: path to logfile within workflow's cache directory :rtype: ``unicode`` """ return self.cachefile('%s.log' % self.bundleid) @property def logger(self): """Create and return a logger that logs to both console and a log file. Use `~Workflow.openlog` to open the log file in Console. :returns: an initialised logger :rtype: `~logging.Logger` instance """ if self._logger: return self._logger # Initialise new logger and optionally handlers logger = logging.getLogger('workflow') if not len(logger.handlers): # Only add one set of handlers logfile = logging.handlers.RotatingFileHandler( self.logfile, maxBytes=1024*1024, backupCount=0) console = logging.StreamHandler() fmt = logging.Formatter( '%(asctime)s %(filename)s:%(lineno)s' ' %(levelname)-8s %(message)s', datefmt='%H:%M:%S') logfile.setFormatter(fmt) console.setFormatter(fmt) logger.addHandler(logfile) logger.addHandler(console) logger.setLevel(logging.DEBUG) self._logger = logger return self._logger @logger.setter def logger(self, logger): """Set a custom logger. :param logger: The logger to use :type logger: `~logging.Logger` instance """ self._logger = logger @property def settings_path(self): """Path to settings file within workflow's data directory. :returns: path to ``settings.json`` file :rtype: ``unicode`` """ if not self._settings_path: self._settings_path = self.datafile('settings.json') return self._settings_path @property def settings(self): """Return a dictionary subclass that saves itself when changed. :returns: :class:`Settings` instance initialised from the data in JSON file at :attr:`settings_path` or if that doesn't exist, with the ``default_settings`` ``dict`` passed to :class:`Workflow`. :rtype: :class:`Settings` instance """ if not self._settings: self._settings = Settings(self.settings_path, self._default_settings) return self._settings @property def cache_serializer(self): """Name of default cache serializer. .. versionadded:: 1.8 This serializer is used by :meth:`cache_data()` and :meth:`cached_data()` See :class:`SerializerManager` for details. :returns: serializer name :rtype: ``unicode`` """ return self._cache_serializer @cache_serializer.setter def cache_serializer(self, serializer_name): """Set the default cache serialization format. .. versionadded:: 1.8 This serializer is used by :meth:`cache_data()` and :meth:`cached_data()` The specified serializer must already by registered with the :class:`SerializerManager` at `~workflow.workflow.manager`, otherwise a :class:`ValueError` will be raised. :param serializer_name: Name of default serializer to use. :type serializer_name: """ if manager.serializer(serializer_name) is None: raise ValueError( 'Unknown serializer : `{}`. Register your serializer ' 'with `manager` first.'.format(serializer_name)) self.logger.debug( 'default cache serializer set to `{}`'.format(serializer_name)) self._cache_serializer = serializer_name @property def data_serializer(self): """Name of default data serializer. .. versionadded:: 1.8 This serializer is used by :meth:`store_data()` and :meth:`stored_data()` See :class:`SerializerManager` for details. :returns: serializer name :rtype: ``unicode`` """ return self._data_serializer @data_serializer.setter def data_serializer(self, serializer_name): """Set the default cache serialization format. .. versionadded:: 1.8 This serializer is used by :meth:`store_data()` and :meth:`stored_data()` The specified serializer must already by registered with the :class:`SerializerManager` at `~workflow.workflow.manager`, otherwise a :class:`ValueError` will be raised. :param serializer_name: Name of default serializer to use. :type serializer_name: """ if manager.serializer(serializer_name) is None: raise ValueError( 'Unknown serializer : `{}`. Register your serializer ' 'with `manager` first.'.format(serializer_name)) self.logger.debug( 'default data serializer set to `{}`'.format(serializer_name)) self._data_serializer = serializer_name def stored_data(self, name): """Retrieve data from data directory. Returns ``None`` if there is no data stored. .. versionadded:: 1.8 :param name: name of datastore :type name: ``unicode`` """ metadata_path = self.datafile('.{}.alfred-workflow'.format(name)) if not os.path.exists(metadata_path): self.logger.debug('No data stored for `{}`'.format(name)) return None with open(metadata_path, 'rb') as file_obj: serializer_name = file_obj.read().strip() serializer = manager.serializer(serializer_name) if serializer is None: raise ValueError( 'Unknown serializer `{}`. Register a corresponding serializer ' 'with `manager.register()` to load this data.'.format( serializer_name)) self.logger.debug('Data `{}` stored in `{}` format'.format( name, serializer_name)) filename = '{}.{}'.format(name, serializer_name) data_path = self.datafile(filename) if not os.path.exists(data_path): self.logger.debug('No data stored for `{}`'.format(name)) if os.path.exists(metadata_path): os.unlink(metadata_path) return None with open(data_path, 'rb') as file_obj: data = serializer.load(file_obj) self.logger.debug('Stored data loaded from : {}'.format(data_path)) return data def store_data(self, name, data, serializer=None): """Save data to data directory. .. versionadded:: 1.8 If ``data`` is ``None``, the datastore will be deleted. :param name: name of datastore :type name: ``unicode`` :param data: object(s) to store :type data: artibrary Python objects. **Note:** some serializers can only handled certain types of data. :param serializer: name of serializer to use. See :class:`SerializerManager` for more information. :type serializer: ``unicode`` :returns: data in datastore or ``None`` """ serializer_name = serializer or self.data_serializer if serializer_name == 'json' and name == 'settings': raise ValueError( 'Cannot save data to `settings` with format `json`. ' "This would overwrite Alfred-Workflow's settings file.") serializer = manager.serializer(serializer_name) if serializer is None: raise ValueError( 'Invalid serializer `{}`. Register your serializer with ' '`manager.register()` first.'.format(serializer_name)) # In order for `stored_data()` to be able to load data stored with # an arbitrary serializer, yet still have meaningful file extensions, # the format (i.e. extension) is saved to an accompanying file metadata_path = self.datafile('.{}.alfred-workflow'.format(name)) filename = '{}.{}'.format(name, serializer_name) data_path = self.datafile(filename) if data is None: # Delete cached data for path in (metadata_path, data_path): if os.path.exists(path): os.unlink(path) self.logger.debug('Deleted data file : {}'.format(path)) return # Save file extension with open(metadata_path, 'wb') as file_obj: file_obj.write(serializer_name) with open(data_path, 'wb') as file_obj: serializer.dump(data, file_obj) self.logger.debug('Stored data saved at : {}'.format(data_path)) def cached_data(self, name, data_func=None, max_age=60): """Retrieve data from cache or re-generate and re-cache data if stale/non-existant. If ``max_age`` is 0, return cached data no matter how old. :param name: name of datastore :type name: ``unicode`` :param data_func: function to (re-)generate data. :type data_func: ``callable`` :param max_age: maximum age of cached data in seconds :type max_age: ``int`` :returns: cached data, return value of ``data_func`` or ``None`` if ``data_func`` is not set :rtype: whatever ``data_func`` returns or ``None`` """ serializer = manager.serializer(self.cache_serializer) cache_path = self.cachefile('%s.%s' % (name, self.cache_serializer)) age = self.cached_data_age(name) if (age < max_age or max_age == 0) and os.path.exists(cache_path): with open(cache_path, 'rb') as file_obj: self.logger.debug('Loading cached data from : %s', cache_path) return serializer.load(file_obj) if not data_func: return None data = data_func() self.cache_data(name, data) return data def cache_data(self, name, data): """Save ``data`` to cache under ``name``. If ``data`` is ``None``, the corresponding cache file will be deleted. :param name: name of datastore :type name: ``unicode`` :param data: data to store :type data: any object supported by :mod:`pickle` """ serializer = manager.serializer(self.cache_serializer) cache_path = self.cachefile('%s.%s' % (name, self.cache_serializer)) if data is None: if os.path.exists(cache_path): os.unlink(cache_path) self.logger.debug('Deleted cache file : %s', cache_path) return with open(cache_path, 'wb') as file_obj: serializer.dump(data, file_obj) self.logger.debug('Cached data saved at : %s', cache_path) def cached_data_fresh(self, name, max_age): """Is data cached at `name` less than `max_age` old? :param name: name of datastore :type name: ``unicode`` :param max_age: maximum age of data in seconds :type max_age: ``int`` :returns: ``True`` if data is less than ``max_age`` old, else ``False`` :rtype: ``Boolean`` """ age = self.cached_data_age(name) if not age: return False return age < max_age def cached_data_age(self, name): """Return age of data cached at `name` in seconds or 0 if cache doesn't exist :param name: name of datastore :type name: ``unicode`` :returns: age of datastore in seconds :rtype: ``int`` """ cache_path = self.cachefile('%s.%s' % (name, self.cache_serializer)) if not os.path.exists(cache_path): return 0 return time.time() - os.stat(cache_path).st_mtime def filter(self, query, items, key=lambda x: x, ascending=False, include_score=False, min_score=0, max_results=0, match_on=MATCH_ALL, fold_diacritics=True): """Fuzzy search filter. Returns list of ``items`` that match ``query``. ``query`` is case-insensitive. Any item that does not contain the entirety of ``query`` is rejected. .. warning:: If ``query`` is an empty string or contains only whitespace, a :class:`ValueError` will be raised. :param query: query to test items against :type query: ``unicode`` :param items: iterable of items to test :type items: ``list`` or ``tuple`` :param key: function to get comparison key from ``items``. Must return a ``unicode`` string. The default simply returns the item. :type key: ``callable`` :param ascending: set to ``True`` to get worst matches first :type ascending: ``Boolean`` :param include_score: Useful for debugging the scoring algorithm. If ``True``, results will be a list of tuples ``(item, score, rule)``. :type include_score: ``Boolean`` :param min_score: If non-zero, ignore results with a score lower than this. :type min_score: ``int`` :param max_results: If non-zero, prune results list to this length. :type max_results: ``int`` :param match_on: Filter option flags. Bitwise-combined list of ``MATCH_*`` constants (see below). :type match_on: ``int`` :param fold_diacritics: Convert search keys to ASCII-only characters if ``query`` only contains ASCII characters. :type fold_diacritics: ``Boolean`` :returns: list of ``items`` matching ``query`` or list of ``(item, score, rule)`` `tuples` if ``include_score`` is ``True``. ``rule`` is the ``MATCH_*`` rule that matched the item. :rtype: ``list`` **Matching rules** By default, :meth:`filter` uses all of the following flags (i.e. :const:`MATCH_ALL`). The tests are always run in the given order: 1. :const:`MATCH_STARTSWITH` : Item search key startswith ``query`` (case-insensitive). 2. :const:`MATCH_CAPITALS` : The list of capital letters in item search key starts with ``query`` (``query`` may be lower-case). E.g., ``of`` would match ``OmniFocus``, ``gc`` would match ``Google Chrome`` 3. :const:`MATCH_ATOM` : Search key is split into "atoms" on non-word characters (.,-,' etc.). Matches if ``query`` is one of these atoms (case-insensitive). 4. :const:`MATCH_INITIALS_STARTSWITH` : Initials are the first characters of the above-described "atoms" (case-insensitive). 5. :const:`MATCH_INITIALS_CONTAIN` : ``query`` is a substring of the above-described initials. 6. :const:`MATCH_INITIALS` : Combination of (4) and (5). 7. :const:`MATCH_SUBSTRING` : Match if ``query`` is a substring of item search key (case-insensitive). 8. :const:`MATCH_ALLCHARS` : Matches if all characters in ``query`` appear in item search key in the same order (case-insensitive). 9. :const:`MATCH_ALL` : Combination of all the above. :const:`MATCH_ALLCHARS` is considerably slower than the other tests and provides much less accurate results. **Examples:** To ignore :const:`MATCH_ALLCHARS` (tends to provide the worst matches and is expensive to run), use ``match_on=MATCH_ALL ^ MATCH_ALLCHARS``. To match only on capitals, use ``match_on=MATCH_CAPITALS``. To match only on startswith and substring, use ``match_on=MATCH_STARTSWITH | MATCH_SUBSTRING``. **Diacritic folding** .. versionadded:: 1.3 If ``fold_diacritics`` is ``True`` (the default), and ``query`` contains only ASCII characters, non-ASCII characters in search keys will be converted to ASCII equivalents (e.g. **ü** -> **u**, **ß** -> **ss**, **é** -> **e**). See :const:`ASCII_REPLACEMENTS` for all replacements. If ``query`` contains non-ASCII characters, search keys will not be altered. """ if not query: raise ValueError('Empty `query`') # Remove preceding/trailing spaces query = query.strip() if not query: raise ValueError('`query` contains only whitespace') # Use user override if there is one fold_diacritics = self.settings.get('__workflow_diacritic_folding', fold_diacritics) results = [] for item in items: skip = False score = 0 words = [s.strip() for s in query.split(' ')] value = key(item).strip() if value == '': continue for word in words: if word == '': continue s, r = self._filter_item(value, word, match_on, fold_diacritics) if not s: # Skip items that don't match part of the query skip = True score += s if skip: continue if score: # use "reversed" `score` (i.e. highest becomes lowest) and # `value` as sort key. This means items with the same score # will be sorted in alphabetical not reverse alphabetical order results.append(((100.0 / score, value.lower(), score), (item, score, r))) # sort on keys, then discard the keys results.sort(reverse=ascending) results = [t[1] for t in results] if max_results and len(results) > max_results: results = results[:max_results] if min_score: results = [r for r in results if r[1] > min_score] # return list of ``(item, score, rule)`` if include_score: return results # just return list of items return [t[0] for t in results] def _filter_item(self, value, query, match_on, fold_diacritics): """Filter ``value`` against ``query`` using rules ``match_on`` :returns: ``(score, rule)`` """ query = query.lower() queryset = set(query) if not isascii(query): fold_diacritics = False rule = None score = 0 if fold_diacritics: value = self.fold_to_ascii(value) # pre-filter any items that do not contain all characters # of ``query`` to save on running several more expensive tests if not queryset <= set(value.lower()): return (0, None) # item starts with query if (match_on & MATCH_STARTSWITH and value.lower().startswith(query)): score = 100.0 - (len(value) / len(query)) rule = MATCH_STARTSWITH if not score and match_on & MATCH_CAPITALS: # query matches capitalised letters in item, # e.g. of = OmniFocus initials = ''.join([c for c in value if c in INITIALS]) if initials.lower().startswith(query): score = 100.0 - (len(initials) / len(query)) rule = MATCH_CAPITALS if not score: if (match_on & MATCH_ATOM or match_on & MATCH_INITIALS_CONTAIN or match_on & MATCH_INITIALS_STARTSWITH): # split the item into "atoms", i.e. words separated by # spaces or other non-word characters atoms = [s.lower() for s in split_on_delimiters(value)] # print('atoms : %s --> %s' % (value, atoms)) # initials of the atoms initials = ''.join([s[0] for s in atoms if s]) if match_on & MATCH_ATOM: # is `query` one of the atoms in item? # similar to substring, but scores more highly, as it's # a word within the item if query in atoms: score = 100.0 - (len(value) / len(query)) rule = MATCH_ATOM if not score: # `query` matches start (or all) of the initials of the # atoms, e.g. ``himym`` matches "How I Met Your Mother" # *and* "how i met your mother" (the ``capitals`` rule only # matches the former) if (match_on & MATCH_INITIALS_STARTSWITH and initials.startswith(query)): score = 100.0 - (len(initials) / len(query)) rule = MATCH_INITIALS_STARTSWITH # `query` is a substring of initials, e.g. ``doh`` matches # "The Dukes of Hazzard" elif (match_on & MATCH_INITIALS_CONTAIN and query in initials): score = 95.0 - (len(initials) / len(query)) rule = MATCH_INITIALS_CONTAIN if not score: # `query` is a substring of item if match_on & MATCH_SUBSTRING and query in value.lower(): score = 90.0 - (len(value) / len(query)) rule = MATCH_SUBSTRING if not score: # finally, assign a score based on how close together the # characters in `query` are in item. if match_on & MATCH_ALLCHARS: search = self._search_for_query(query) match = search(value) if match: score = 100.0 / ((1 + match.start()) * (match.end() - match.start() + 1)) rule = MATCH_ALLCHARS if score > 0: return (score, rule) return (0, None) def _search_for_query(self, query): if query in self._search_pattern_cache: return self._search_pattern_cache[query] # Build pattern: include all characters pattern = [] for c in query: # pattern.append('[^{0}]*{0}'.format(re.escape(c))) pattern.append('.*?{0}'.format(re.escape(c))) pattern = ''.join(pattern) search = re.compile(pattern, re.IGNORECASE).search self._search_pattern_cache[query] = search return search def run(self, func): """Call ``func`` to run your workflow :param func: Callable to call with ``self`` (i.e. the :class:`Workflow` instance) as first argument. ``func`` will be called with :class:`Workflow` instance as first argument. ``func`` should be the main entry point to your workflow. Any exceptions raised will be logged and an error message will be output to Alfred. """ try: func(self) except Exception as err: self.logger.exception(err) if not sys.stdout.isatty(): # Show error in Alfred self._items = [] if self._name: name = self._name elif self._bundleid: name = self._bundleid else: # pragma: no cover name = os.path.dirname(__file__) self.add_item("Error in workflow '%s'" % name, unicode(err), icon=ICON_ERROR) self.send_feedback() return 1 return 0 # Alfred feedback methods ------------------------------------------ def add_item(self, title, subtitle='', modifier_subtitles=None, arg=None, autocomplete=None, valid=False, uid=None, icon=None, icontype=None, type=None, largetext=None, copytext=None): """Add an item to be output to Alfred :param title: Title shown in Alfred :type title: ``unicode`` :param subtitle: Subtitle shown in Alfred :type subtitle: ``unicode`` :param modifier_subtitles: Subtitles shown when modifier (CMD, OPT etc.) is pressed. Use a ``dict`` with the lowercase keys ``cmd``, ``ctrl``, ``shift``, ``alt`` and ``fn`` :type modifier_subtitles: ``dict`` :param arg: Argument passed by Alfred as ``{query}`` when item is actioned :type arg: ``unicode`` :param autocomplete: Text expanded in Alfred when item is TABbed :type autocomplete: ``unicode`` :param valid: Whether or not item can be actioned :type valid: ``Boolean`` :param uid: Used by Alfred to remember/sort items :type uid: ``unicode`` :param icon: Filename of icon to use :type icon: ``unicode`` :param icontype: Type of icon. Must be one of ``None`` , ``'filetype'`` or ``'fileicon'``. Use ``'filetype'`` when ``icon`` is a filetype such as ``'public.folder'``. Use ``'fileicon'`` when you wish to use the icon of the file specified as ``icon``, e.g. ``icon='/Applications/Safari.app', icontype='fileicon'``. Leave as `None` if ``icon`` points to an actual icon file. :type icontype: ``unicode`` :param type: Result type. Currently only ``'file'`` is supported (by Alfred). This will tell Alfred to enable file actions for this item. :type type: ``unicode`` :param largetext: Text to be displayed in Alfred's large text box if user presses CMD+L on item. :type largetext: ``unicode`` :param copytext: Text to be copied to pasteboard if user presses CMD+C on item. :type copytext: ``unicode`` :returns: :class:`Item` instance See the :ref:`script-filter-results` section of the documentation for a detailed description of what the various parameters do and how they interact with one another. See :ref:`icons` for a list of the supported system icons. .. note:: Although this method returns an :class:`Item` instance, you don't need to hold onto it or worry about it. All generated :class:`Item` instances are also collected internally and sent to Alfred when :meth:`send_feedback` is called. The generated :class:`Item` is only returned in case you want to edit it or do something with it other than send it to Alfred. """ item = self.item_class(title, subtitle, modifier_subtitles, arg, autocomplete, valid, uid, icon, icontype, type, largetext, copytext) self._items.append(item) return item def send_feedback(self): """Print stored items to console/Alfred as XML.""" root = ET.Element('items') for item in self._items: root.append(item.elem) sys.stdout.write('<?xml version="1.0" encoding="utf-8"?>\n') sys.stdout.write(ET.tostring(root).encode('utf-8')) sys.stdout.flush() #################################################################### # Updating methods #################################################################### @property def update_available(self): """Is an update available? :returns: ``True`` if an update is available, else ``False`` """ update_data = self.cached_data('__workflow_update_status') if not update_data or not update_data.get('available'): return False return update_data['available'] def check_update(self, force=False): """Check if it's time to update and call update script if it is. :param force: Force update check :type force: ``Boolean`` """ frequency = self._update_settings.get('frequency', DEFAULT_UPDATE_FREQUENCY) if (force or not self.cached_data_fresh( '__workflow_update_status', frequency * 86400)): github_slug = self._update_settings['github_slug'] version = self._update_settings['version'] from background import run_in_background # update.py is adjacent to this file update_script = os.path.join(os.path.dirname(__file__), b'update.py') cmd = ['/usr/bin/python', update_script, github_slug, version] self.logger.info('Checking for update ...') run_in_background('__workflow_update', cmd) else: self.logger.debug('Update not due') def start_update(self): """Check for update and download and install new workflow file :returns: ``True`` if an update is available, else ``False`` """ import update github_slug = self._update_settings['github_slug'] version = self._update_settings['version'] if not update.check_update(github_slug, version): return False update_data = self.cached_data('__workflow_update_status') if (update_data is None or not update_data.get('available')): return False # pragma: no cover local_file = update.download_workflow(update_data['download_url']) self.logger.debug('Installing updated workflow ...') subprocess.call(['open', local_file]) update_data['available'] = False self.cache_data('__workflow_update_status', update_data) return True #################################################################### # Keychain password storage methods #################################################################### def save_password(self, account, password, service=None): """Save account credentials. If the account exists, the old password will first be deleted (Keychain throws an error otherwise). If something goes wrong, a `KeychainError` exception will be raised. :param account: name of the account the password is for, e.g. "Pinboard" :type account: ``unicode`` :param password: the password to secure :type password: ``unicode`` :param service: Name of the service. By default, this is the workflow's bundle ID :type service: ``unicode`` """ if not service: service = self.bundleid try: retcode, output = self._call_security('add-generic-password', service, account, '-w', password) self.logger.debug('Saved password : %s:%s', service, account) except PasswordExists: self.logger.debug('Password exists : %s:%s', service, account) current_password = self.get_password(account, service) if current_password == password: self.logger.debug('Password unchanged') else: self.delete_password(account, service) retcode, output = self._call_security('add-generic-password', service, account, '-w', password) self.logger.debug('save_password : %s:%s', service, account) def get_password(self, account, service=None): """Retrieve the password saved at ``service/account``. Raise :class:`PasswordNotFound` exception if password doesn't exist. :param account: name of the account the password is for, e.g. "Pinboard" :type account: ``unicode`` :param service: Name of the service. By default, this is the workflow's bundle ID :type service: ``unicode`` :returns: account password :rtype: ``unicode`` """ if not service: service = self.bundleid retcode, password = self._call_security('find-generic-password', service, account, '-w') self.logger.debug('get_password : %s:%s', service, account) return password def delete_password(self, account, service=None): """Delete the password stored at ``service/account``. Raises :class:`PasswordNotFound` if account is unknown. :param account: name of the account the password is for, e.g. "Pinboard" :type account: ``unicode`` :param service: Name of the service. By default, this is the workflow's bundle ID :type service: ``unicode`` """ if not service: service = self.bundleid retcode, output = self._call_security('delete-generic-password', service, account) self.logger.debug('delete_password : %s:%s', service, account) #################################################################### # Methods for workflow:* magic args #################################################################### def clear_cache(self): """Delete all files in workflow cache directory.""" self._delete_directory_contents(self.cachedir) def clear_data(self): """Delete all files in workflow data directory.""" self._delete_directory_contents(self.datadir) def clear_settings(self): """Delete settings file.""" if os.path.exists(self.settings_path): os.unlink(self.settings_path) self.logger.debug('Deleted : %r', self.settings_path) def reset(self): """Delete settings, cache and data""" self.clear_cache() self.clear_data() self.clear_settings() def open_log(self): """Open log file in standard application (usually Console.app).""" subprocess.call(['open', self.logfile]) def open_cachedir(self): """Open the workflow cache directory in Finder.""" subprocess.call(['open', self.cachedir]) def open_datadir(self): """Open the workflow data directory in Finder.""" subprocess.call(['open', self.datadir]) def open_workflowdir(self): """Open the workflow directory in Finder.""" subprocess.call(['open', self.workflowdir]) def open_terminal(self): """Open a Terminal window at workflow directory.""" subprocess.call(['open', '-a', 'Terminal', self.workflowdir]) #################################################################### # Helper methods #################################################################### def decode(self, text, encoding=None, normalization=None): """Return ``text`` as normalised unicode. If ``encoding`` and/or ``normalization`` is ``None``, the ``input_encoding``and ``normalization`` parameters passed to :class:`Workflow` are used. :param text: string :type text: encoded or Unicode string. If ``text`` is already a Unicode string, it will only be normalised. :param encoding: The text encoding to use to decode ``text`` to Unicode. :type encoding: ``unicode`` or ``None`` :param normalization: The nomalisation form to apply to ``text``. :type normalization: ``unicode`` or ``None`` :returns: decoded and normalised ``unicode`` :class:`Workflow` uses "NFC" normalisation by default. This is the standard for Python and will work well with data from the web (via :mod:`~workflow.web` or :mod:`json`). OS X, on the other hand, uses "NFD" normalisation (nearly), so data coming from the system (e.g. via :mod:`subprocess` or :func:`os.listdir`/:mod:`os.path`) may not match. You should either normalise this data, too, or change the default normalisation used by :class:`Workflow`. """ encoding = encoding or self._input_encoding normalization = normalization or self._normalizsation if not isinstance(text, unicode): text = unicode(text, encoding) return unicodedata.normalize(normalization, text) def fold_to_ascii(self, text): """Convert non-ASCII characters to closest ASCII equivalent. .. versionadded:: 1.3 .. note:: This only works for a subset of European languages. :param text: text to convert :type text: ``unicode`` :returns: text containing only ASCII characters :rtype: ``unicode`` """ if isascii(text): return text text = ''.join([ASCII_REPLACEMENTS.get(c, c) for c in text]) return unicode(unicodedata.normalize('NFKD', text).encode('ascii', 'ignore')) def _delete_directory_contents(self, dirpath): """Delete all files in a directory :param dirpath: path to directory to clear :type dirpath: ``unicode`` or ``str`` """ if os.path.exists(dirpath): for filename in os.listdir(dirpath): path = os.path.join(dirpath, filename) if os.path.isdir(path): shutil.rmtree(path) else: os.unlink(path) self.logger.debug('Deleted : %r', path) def _load_info_plist(self): """Load workflow info from ``info.plist`` """ self._info = plistlib.readPlist(self._info_plist) self._info_loaded = True def _create(self, dirpath): """Create directory `dirpath` if it doesn't exist :param dirpath: path to directory :type dirpath: ``unicode`` :returns: ``dirpath`` argument :rtype: ``unicode`` """ if not os.path.exists(dirpath): os.makedirs(dirpath) return dirpath def _call_security(self, action, service, account, *args): """Call the ``security`` CLI app that provides access to keychains. May raise `PasswordNotFound`, `PasswordExists` or `KeychainError` exceptions (the first two are subclasses of `KeychainError`). :param action: The ``security`` action to call, e.g. ``add-generic-password`` :type action: ``unicode`` :param service: Name of the service. :type service: ``unicode`` :param account: name of the account the password is for, e.g. "Pinboard" :type account: ``unicode`` :param password: the password to secure :type password: ``unicode`` :param *args: list of command line arguments to be passed to ``security`` :type *args: `list` or `tuple` :returns: ``(retcode, output)``. ``retcode`` is an `int`, ``output`` a ``unicode`` string. :rtype: `tuple` (`int`, ``unicode``) """ cmd = ['security', action, '-s', service, '-a', account] + list(args) p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) retcode, output = p.wait(), p.stdout.read().strip().decode('utf-8') if retcode == 44: # password does not exist raise PasswordNotFound() elif retcode == 45: # password already exists raise PasswordExists() elif retcode > 0: err = KeychainError('Unknown Keychain error : %s' % output) err.retcode = retcode raise err return (retcode, output)
[ "margheim@sas.upenn.edu" ]
margheim@sas.upenn.edu
6ffa9e58676b1f6471c34d935594e1e7e506b321
3278b356a37bcfd689480f6a75b7890ef53f7821
/文字型/No.22.py
c1ed893aaaef1252ff6bb0b58c0a462773de8221
[]
no_license
kawasaki2254/Python_100problem_practice_problem
bb83fe3953f46178a0bb8870c315945bf3f1e18e
7ee39e965e2b32cf52345635834529f337aaca8c
refs/heads/main
2023-02-11T13:41:46.778199
2021-01-11T05:55:48
2021-01-11T05:55:48
326,548,979
1
0
null
null
null
null
UTF-8
Python
false
false
395
py
''' 問題22.2つの文字列を入力して、重複する部分だけ 出力するプログラムを作成してください。 ''' word1 = input('1つ目の文字列を入力してください > ') word2 = input('2つ目の文字列を入力してください > ') r = '' for i in word1: if i in word2 and not i in r: r += i print(f'重複する文字列 : {r}')
[ "kawasakitota2254@gmail.com" ]
kawasakitota2254@gmail.com
2ae484c93d1a05fcf1465498bc32931f6f58f9be
3135f95ab49c7ed9e6e93741748467cfd692f7d5
/quad_pid.py
dff43de662d268fb89541dd99d0d1d22e516a9c2
[]
no_license
Jaeyoung-Lim/mujocoquad
b674c45aa11a0ec7e5a8fcab2f03e534561cfc22
ca4fb25fc0dd105bf7dd20114b54b82c0da49318
refs/heads/master
2020-04-18T22:08:12.949718
2019-01-26T16:38:55
2019-01-26T16:38:55
167,785,118
0
0
null
2019-01-27T08:05:48
2019-01-27T08:05:48
null
UTF-8
Python
false
false
4,237
py
import gym import numpy as np from mujocoquad_gym.envs.mujocoquad_force import MujocoQuadForceEnv class Trajectory: R = 0.5 # trajectory radius w = 1.0 # trajectory angular speed (rad/s) class CtrlParam: # attitude kpz = 2. kpphi = 0.1 kptheta = 0.1 kppsi = 0.3 Kx_p = np.array([ [kpz, 0, 0, 0], [0, kpphi, 0, 0], [0, 0, kptheta, 0], [0, 0, 0, kppsi], ]) kdz = 0.5 kdphi = 0.1 kdtheta = 0.1 kdpsi = 0.1 Kx_d = np.array([ [kdz, 0, 0, 0], [0, kdphi, 0, 0], [0, 0, kdtheta, 0], [0, 0, 0, kdpsi], ]) kiz = 0.01 kiphi = 0.01 kitheta = 0.01 kipsi = 0.01 Kx_i = np.array([ [kiz, 0, 0, 0], [0, kiphi, 0, 0], [0, 0, kitheta, 0], [0, 0, 0, kipsi], ]) # position control matrix kpx = 0.6 kpy = 0.6 Ks_p = np.array([ [kpx, 0], [0, kpy], ]) kdx = 0.2 kdy = 0.2 Ks_d = np.array([ [kdx, 0], [0, kdy], ]) class MotorParam: C = 0.1 # constant factor L = 0.1 # moment arm (L_arm cos 45) a = 0.25 b = 1 / (4*L) c = 1 / (4*C) C_R = np.array([ [a, b, -b, -c], [a, -b, -b, c], [a, -b, b, -c], [a, b, b, c], ]) def main(): dt = 0.01 mass = 0.3 gravity = 9.81 ex = 0 es = 0 ex_int = 0 env = gym.make('MujocoQuadForce-v0') # [x, y, z, q0, q1, q2, q3] observation = env.reset() for t in range(1000): env.render() ################################# # desired position state (x, y, z) # circle trajectory on 1 m height s_d = np.array([ Trajectory.R * np.cos(Trajectory.w * dt * t), Trajectory.R * np.sin(Trajectory.w * dt * t), 1.0 ]) ################################ # quat -> rpy quat = observation[3:] rotmat_WB = np.array([ [1 - 2*(quat[2]**2 + quat[3]**2), 2*(quat[1]*quat[2] - quat[3]*quat[0]), 2*(quat[1]*quat[3] + quat[2]*quat[0])], [2 * (quat[1]*quat[2] + quat[3]*quat[0]), 1 - 2*(quat[1]**2 + quat[3]**2), 2*(quat[2]*quat[3] - quat[1]*quat[0])], [2 * (quat[1]*quat[3] - quat[2]*quat[0]), 2*(quat[2]*quat[3] + quat[1]*quat[0]), 1 - 2*(quat[1]**2 + quat[2]**2)], ]) roll = np.arctan2(2*(quat[0] * quat[1] + quat[2] * quat[3]), 1 - 2*(quat[1]**2 + quat[2]**2)) pitch = np.arcsin(2*(quat[0] * quat[2] - quat[3] * quat[1])) yaw = np.arctan2(2*(quat[0] * quat[3] + quat[1] * quat[2]), 1 - 2*(quat[2]**2 + quat[3]**2)) ################################ # state # position s = np.array([ observation[0], observation[1], ]) # attitude x = np.array([ observation[2], roll, pitch, yaw, ]) ################################ # error # position es_last = es es = s_d[0:2] - s es_dot = (es - es_last) / dt # differentiation # position input us = np.matmul(CtrlParam.Ks_p, es) \ + np.matmul(CtrlParam.Ks_d, es_dot) us = np.append(us, 0) # attitude rotmat_BW = np.linalg.inv(rotmat_WB) x_d = np.array([ s_d[2], # +z -np.matmul(rotmat_BW, us)[1], # -y -> roll, np.matmul(rotmat_BW, us)[0], # +x -> pitch, (Trajectory.w * dt * t + np.pi) % (2 * np.pi) - np.pi, ]) ex_last = ex ex = x_d - x ex_dot = (ex - ex_last) / dt # differentiation ex_int += ex * dt # integration # attitude input u = np.matmul(CtrlParam.Kx_p, ex) \ + np.matmul(CtrlParam.Kx_d, ex_dot) \ + np.matmul(CtrlParam.Kx_i, ex_int) u[0] += mass * gravity / (np.cos(pitch) * np.cos(roll)) # actuator input # +,+ # +,- # -,- # -,+ F = np.matmul(MotorParam.C_R, u) observation, reward, done, info = env.step(F) if done: break if __name__ == "__main__": main()
[ "east0822@gmail.com" ]
east0822@gmail.com
2e24e4d2a766d658e872865bd2ce68adc082c893
61734be18935dd5ecfd9103373188c6eb870673c
/shopping.py
100f67643120cd2b2c8c5bddf130a291d52088dd
[]
no_license
bojone/analytical-classification
751780f8aae21018be23440d2acaa6ba34554ba6
e9333fde0eb76fcdc25482209e6b80bc8a21929b
refs/heads/main
2023-06-24T22:25:33.036612
2021-07-29T07:24:00
2021-07-29T07:24:00
388,304,001
11
0
null
null
null
null
UTF-8
Python
false
false
3,744
py
#! -*- coding: utf-8 -*- # 测试“全连接+Softmax”解析解的效果 # 博客:https://kexue.fm/archives/8578 import numpy as np import pandas as pd from bert4keras.tokenizers import Tokenizer from bert4keras.models import build_transformer_model from bert4keras.snippets import sequence_padding from bert4keras.snippets import open from bert4keras.optimizers import Adam from keras.layers import * from keras.models import Model from tqdm import tqdm num_classes = 10 maxlen = 128 # bert配置 config_path = '/root/kg/bert/chinese_roformer-sim-char-ft_L-12_H-768_A-12/bert_config.json' checkpoint_path = '/root/kg/bert/chinese_roformer-sim-char-ft_L-12_H-768_A-12/bert_model.ckpt' dict_path = '/root/kg/bert/chinese_roformer-sim-char-ft_L-12_H-768_A-12/vocab.txt' # 建立分词器 tokenizer = Tokenizer(dict_path, do_lower_case=True) # 建立分词器 # 建立加载模型 encoder = build_transformer_model( config_path, checkpoint_path, model='roformer', with_pool='linear' ) def load_data(filename): """加载数据 单条格式:(文本, 标签id) """ data = pd.read_csv(filename).dropna() labels = list(set(data['cat'])) global num_classes num_classes = len(labels) D = [] for text, label in zip(data['review'], data['cat']): D.append((text, labels.index(label))) return D # 加载数据集 data = load_data('/root/online_shopping_10_cats.csv') idxs = np.random.permutation(len(data)) train_data = [data[i] for i in idxs if i % 4 != 0] valid_data = [data[i] for i in idxs if i % 4 == 0] def convert(data): """数据向量化 """ X, S, Y = [], [], [] for t, l in tqdm(data): x, s = tokenizer.encode(t, maxlen=maxlen) X.append(x) S.append(s) Y.append([l]) X = sequence_padding(X) S = sequence_padding(S) X = encoder.predict([X, S], verbose=True) Y = np.array(Y) return X, Y train_x, train_y = convert(train_data) valid_x, valid_y = convert(valid_data) def compute_kernel_bias(vecs): """计算kernel和bias vecs.shape = [num_samples, embedding_size], 最后的变换:y = (x + bias).dot(kernel) """ mu = vecs.mean(axis=0, keepdims=True) cov = np.cov(vecs.T) u, s, vh = np.linalg.svd(cov) W = u.dot(np.diag(1 / np.sqrt(s))) return W, -mu # 数据白化 kernel, bias = compute_kernel_bias(train_x) train_x = (train_x + bias).dot(kernel) valid_x = (valid_x + bias).dot(kernel) # ============== 通过梯度下降求解 ============== x = Input(shape=(train_x.shape[1],)) y = Dense(num_classes, activation='softmax')(x) model = Model(x, y) model.compile( loss='sparse_categorical_crossentropy', optimizer=Adam(1e-3), metrics=['accuracy'] ) model.summary() model.fit( train_x, train_y, epochs=10, batch_size=32, validation_data=(valid_x, valid_y) ) train_y_pred = model.predict(train_x, verbose=True) valid_y_pred = model.predict(valid_x, verbose=True) train_acc = np.mean(train_y[:, 0] == train_y_pred.argmax(1)) valid_acc = np.mean(valid_y[:, 0] == valid_y_pred.argmax(1)) print(train_acc, valid_acc) # ============== 通过解析解求解 ============== ps = np.array([(train_y == i).mean() for i in range(num_classes)]) mus = [train_x[train_y[:, 0] == i].mean(axis=0) for i in range(num_classes)] cov = np.eye(len(mus[0])) - np.einsum('nd,nc,n->dc', mus, mus, ps) cov_inv = np.linalg.inv(cov) w = np.einsum('nd,dc->cn', mus, cov_inv) b = np.log(ps) - np.einsum('nd,dc,nc->n', mus, cov_inv, mus) / 2 train_y_pred = train_x.dot(w) + b valid_y_pred = valid_x.dot(w) + b train_acc = np.mean(train_y[:, 0] == train_y_pred.argmax(1)) valid_acc = np.mean(valid_y[:, 0] == valid_y_pred.argmax(1)) print(train_acc, valid_acc)
[ "noreply@github.com" ]
noreply@github.com
2e69bb3c653c7a6fe8fb3e43c5da74f7ad961e0b
0cbd649f9a2a31c0ff50b8a13b40bba17e58640d
/lesson6_step5.py
c085962592591136da570dfb53ce5fa9e03821f6
[]
no_license
Senkool/selenium_course
5b79d76fb9a50a64410668337a24f278ba9c05eb
a5e4dc098b8f1507f0a2c0f9643c3c56ccfd6979
refs/heads/master
2022-06-24T15:36:27.408102
2020-05-10T16:25:19
2020-05-10T16:25:19
262,828,053
0
0
null
null
null
null
UTF-8
Python
false
false
406
py
from selenium import webdriver import time try: browser = webdriver.Chrome() browser.get("http://suninjuly.github.io/huge_form.html") elements = browser.find_elements_by_css_selector("input") for element in elements: element.send_keys("ответ") button = browser.find_element_by_css_selector("button.btn") button.click() finally: time.sleep(30) browser.quit()
[ "12denis1998@gmail.com" ]
12denis1998@gmail.com
393ef0ee8e6ca3b958d075bdb3bbd1e68260b9b2
41c660d5cd7b8dcc1a0c0abbc81b5c3e7e80fbd3
/raspy/handle_motion.py
d9c9a0df603ab4855b034242677008dcb4559284
[]
no_license
rconjaerts/nrt
8bb1406a12af5bb60a54e1c374e6e8ba1d78ad1a
016f723c944f095876ea96c739882b37bec98d3d
refs/heads/master
2020-12-26T18:03:00.274341
2014-07-01T14:15:22
2014-07-01T14:15:22
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,935
py
#!/usr/bin/python from __future__ import division from PIL import Image import numpy as np import sys import requests import json import time import blinker def handle_motion(img_path): print "handling motion" # We load it and calculate a single scalar value for the motion ('activity') # in this image img = np.asarray(Image.open(img_path).convert('L')) img = 1 * (img == 25) m,n = img.shape motion_pixels = img.sum() total_pixels = m*n motion_value = motion_pixels / total_pixels * 100 # see if we should do some shit with the baby try: amplitude_value = float(open('/home/pi/last_amplitude').readline()) except Error: amplitude_value = 0 if amplitude_value > 0.20 and motion_value > 0.20: print "BABY IS AWAKE." # Send the event to our server url = 'http://192.168.137.1:8080/BigSisterReboot/webresources/entities.event' payload = {'timestamp': int(time.time()), 'value': motion_value, 'accountId': 1, 'typeId': 2, } headers = {'content-type': 'application/json'} r = requests.post(url, data=json.dumps(payload), headers=headers) def handle_snapshot(img_path): pass # Send the raw image to our server # url = 'http://192.168.137.1:8080/BigSister/webresources/entities.eventvideo' # payload = {'timestamp': int(time.time()), # 'filename': img_path, # 'value': value, # 'accountId': 1,} # headers = {'content-type': 'application/json'} # # r = requests.post(url, data=json.dumps(payload), headers=headers) # Our first arguement passed is an image file name img_path = str(sys.argv[1]).strip() # motion pictures end with m.jpg, so here we differentiate between motion pics and snapshots if img_path[-5:] == 'm.jpg': handle_motion(img_path) else: handle_snapshot(img_path)
[ "tomjaspers@gmail.com" ]
tomjaspers@gmail.com
43fa9cc363b25fbbb658601647398ac8dbfe41aa
a25b26b3b27b59325915c10dfafd607faae4ed01
/pickfrombothsides.py
879435386092602b6015b9cf8eab00d3d4514243
[]
no_license
helplearnhome/Coding-Interview-in-Python
3e3d9f42b993b9ea8e86e25d8a8e36c1610c45f2
52a76689a5808a89ebb8399bf3e6d4922c4c190b
refs/heads/master
2023-07-17T06:30:09.207235
2021-09-08T03:28:33
2021-09-08T03:28:33
397,350,111
0
0
null
null
null
null
UTF-8
Python
false
false
1,823
py
# # Recursive def func(A,B,i,j,sum): if B == 0: # print(sum) return sum return max(func(A,B-1,i+1,j,sum+A[i]),func(A,B-1,i,j-1,sum+A[j])) class Solution: # @param A : list of integers # @param B : integer # @return an integer def solve(self, A, B): # print(B) return func(A,B,0,-1,0) # x = Solution() # print(x.solve([1,9,8,1,1,7],3)) #Wrong way #Iterative # def func(A,B,i,j,sum): # while(B>0 and i<=j): # if (A[i] > A[j]): # sum+=A[i] # i+=1 # else: # sum+=A[j] # j-=1 # B-=1 # return sum # class Solution: # # @param A : list of integers # # @param B : integer # # @return an integer # def solve(self, A, B): # # print(B) # return func(A,B,0,len(A)-1,0) # x = Solution() # print(x.solve([1,9,8,1,1,7],3)) #iterative class Solution: # @param A : list of integers # @param B : integer # @return an integer def solve(self, A, B): sum=0 maxi=0 for i in range(B): sum+=A[i] maxi=sum for j in range(1,B+1): sum-=A[B-j] sum+=A[-j] if maxi < sum: maxi=sum return maxi x = Solution() print(x.solve([0,0,0,1,1,7],3)) #pavana # int Solution::solve(vector<int> &A, int B) { # vector<int> dp1(B,0),dp2(B,0); # int n=A.size(); # dp1[0]=A[0]; # for(int i=1;i<B;i++) # dp1[i]=dp1[i-1]+A[i]; # dp2[0]=A[n-1]; # for(int i=1;i<B;i++) # dp2[i]=dp2[i-1]+A[n-1-i]; # int ans=max(dp1[B-1],dp2[B-1]); # for(int i=0;i<B-1;i++) # ans=max(ans,dp1[i]+dp2[B-2-i]); # return ans; # }
[ "noreply@github.com" ]
noreply@github.com
93c4453f26512207811cdba404053b9a07b2e9c1
b68887f55cfcd0225d732acfbfcc7f3724e49d5d
/pages/factories.py
a59431ddc5e91da1966fb1ba58c8d6ad49dcbfb0
[ "MIT" ]
permissive
rds0751/nhsuk-content-store
0ac7eb06f85cc97cd57e58a3f24e19db9991a8a2
7bd6a386e3583779ddba2347a4b3a80fdf75b368
refs/heads/master
2020-04-19T08:53:54.273378
2019-01-29T05:08:18
2019-01-29T05:08:18
168,092,530
0
0
null
2019-01-29T05:05:33
2019-01-29T05:05:33
null
UTF-8
Python
false
false
1,515
py
import factory from home.factories import HomePageFactory, ParentBasedFactory from . import models class ConditionsPageFactory(ParentBasedFactory, factory.django.DjangoModelFactory): title = 'Conditions' slug = 'conditions' path = '000100010002' depth = 3 _ParentFactory = HomePageFactory _unique = True class Meta: model = models.FolderPage class ConditionPageFactory(ParentBasedFactory, factory.django.DjangoModelFactory): path = factory.Sequence(lambda n: '000100010002%04d' % (n + 1)) depth = 4 _ParentFactory = ConditionsPageFactory _unique = False class Meta: model = models.EditorialPage class SymptomsPageFactory(ParentBasedFactory, factory.django.DjangoModelFactory): title = 'Symptoms' slug = 'symptoms' path = '000100010001' depth = 3 _ParentFactory = HomePageFactory _unique = True class Meta: model = models.FolderPage class SymptomPageFactory(ParentBasedFactory, factory.django.DjangoModelFactory): path = factory.Sequence(lambda n: '000100010001%04d' % (n + 1)) depth = 4 _ParentFactory = SymptomsPageFactory _unique = False class Meta: model = models.EditorialPage class ConditionFolderPageFactory(ParentBasedFactory, factory.django.DjangoModelFactory): path = factory.Sequence(lambda n: '000100010002%04d' % (n + 1)) depth = 4 _ParentFactory = ConditionsPageFactory _unique = False class Meta: model = models.FolderPage
[ "marcofucci@gmail.com" ]
marcofucci@gmail.com
1ebd1dd0aa7d1f4a4140c09684866a9a7eccdcb8
d685a23ff883f216f5ab8ea6ba257eb660e6a931
/laboratorios/lab01/codigo/rectangle.py
c50f4713939745bab2bd9d972bd8ca6c78d7b997
[]
no_license
varisti7/ST0245-001
2abde80a13d6139018e6e7ac9b99fd476f572c98
3582eb0a06f22af496d63458db55d8d47b797c79
refs/heads/master
2023-01-12T20:29:19.816894
2020-11-17T21:27:02
2020-11-17T21:27:02
283,341,706
0
1
null
null
null
null
UTF-8
Python
false
false
1,299
py
import time, string, random import matplotlib.pyplot as plt def rectangle(size2n): """ This agorithm recieves an integer as the number of rectangles of 2xn and calculates of how many ways it could be filled with 2x1 rectangles. """ if size2n == 1 or size2n == 2: # C1 return size2n else: return rectangle(size2n - 1) + rectangle(size2n - 2) # T(n-1) + T(n-2) """ In the worst case the asynthotic complexity would be: T(n) = C1 + T(n-1) + T(n-2) => solved by "Laboratory practice No. 1: Recursion" point 4.4.1 O(2^n) """ def plotTimes(): """ This function plot's the rectangle algorithm. The x label as the number (from 20-40) and the y label as the time required to solve it. """ timeArray = [] lengthArray = [] for x in range(20,40): t1 = time.time() rectangle(x) tt = time.time() - t1 timeArray.append(tt) print(tt) lengthArray.append(x) plt.plot(lengthArray, timeArray, 'rs') plt.show() def main(): plotTimes() main() """ 6,20E-03 4,55E-03 1,29E-02 2,36E-02 0,04935503 0,079976082 0,100703001 0,176604033 0,24488306 0,347311974 0,278718948 0,402431965 0,634913206 1,017865181 1,632277012 2,63162899 4,297087908 6,946293831 11,17777205 18,06480503 """
[ "varisti6@gmail.com" ]
varisti6@gmail.com
47f0abfaceb11e660d4f305e745db7fe9fee819f
da84fa23cc4cf2e81e50892085ac162508bff155
/nestris_ocr/capturing/linux/linux_mgr.py
6381209b8dd85f1880aca2b7eb9cbc653ec9f0cd
[]
no_license
alex-ong/NESTrisOCR
83ddaba55b100f0ee20e924731459e547e321887
488beeb30e596ccd0548152e241e1c6f772e717b
refs/heads/master
2023-01-10T08:02:41.702538
2021-07-07T06:33:39
2021-07-07T06:33:39
169,196,192
25
8
null
2022-12-27T15:37:40
2019-02-05T05:44:34
Python
UTF-8
Python
false
false
1,202
py
import Xlib import Xlib.display from Xlib import X class WindowMgr: """Encapsulates some calls for window management""" def __init__(self, hwnd=None): self.handle = hwnd def checkWindow(self, hwnd): """checks if a window still exists""" return hwnd def getWindows(self): """ Return a list of tuples (handler, window name) for each real window. """ windows = [] def getWindowHierarchy(window, windows): children = window.query_tree().children for w in children: try: w.get_image(0, 0, 1, 1, X.ZPixmap, 0xFFFFFFFF) windows.append( ( w.id, w.get_wm_class()[1] if w.get_wm_class() is not None else "", ) ) except Xlib.error.BadMatch: pass finally: windows = getWindowHierarchy(w, windows) return windows root = Xlib.display.Display().screen().root windows = getWindowHierarchy(root, windows) return windows
[ "the.onga@gmail.com" ]
the.onga@gmail.com
10252106e1b1114e8e4adf56f12d670ac5aee1e0
397c9e2743c41cf591692c4fc37f43a9070119bd
/build/env/lib/python2.7/site-packages/SQLAlchemy-1.2.0b3-py2.7-linux-x86_64.egg/sqlalchemy/cutils.py
a62e8adc17fa043f78d6b8b32d3c703fd2682408
[ "Apache-2.0" ]
permissive
bopopescu/myhue
cf41238c782d12b3a1a0ee9ef70196359bb67894
5f566970a5a1fa5af9f01832c9e9808c47634bc7
refs/heads/master
2022-11-18T05:37:24.467150
2019-11-23T16:16:22
2019-11-23T16:16:22
282,390,507
0
0
Apache-2.0
2020-07-25T07:03:40
2020-07-25T07:03:39
null
UTF-8
Python
false
false
282
py
def __bootstrap__(): global __bootstrap__, __loader__, __file__ import sys, pkg_resources, imp __file__ = pkg_resources.resource_filename(__name__, 'cutils.so') __loader__ = None; del __bootstrap__, __loader__ imp.load_dynamic(__name__,__file__) __bootstrap__()
[ "352322399@qq.com" ]
352322399@qq.com
e0cf8c5298a8ee4e8a3b21eb3b1fe65504c3047e
204ec78fcebcea9e1e1da4905cf3fad0a514b01f
/test/unit/test_timeout.py
4990b87aad1b2d40888f75acc3481c349d3eb4e0
[ "Apache-2.0" ]
permissive
ARMmbed/pyOCD
659340bf8753aa8e15a72890b8bea64dff2c2f42
d4cdcf7e532cae17caad866839287bbe1e0d952b
refs/heads/master
2023-05-31T13:45:15.797588
2020-10-12T13:55:47
2020-10-12T13:55:47
190,203,829
3
1
Apache-2.0
2019-07-05T11:05:40
2019-06-04T13:09:56
Python
UTF-8
Python
false
false
1,942
py
# pyOCD debugger # Copyright (c) 2017-2019 Arm Limited # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from time import (time, sleep) import pytest from pyocd.utility.timeout import Timeout class TestTimeout: def test_no_timeout(self): with Timeout(0.05) as to: cnt = 0 while to.check(): sleep(0.01) cnt += 1 if cnt == 4: break else: assert False assert not to.did_time_out def test_timeout_a(self): s = time() with Timeout(0.05) as to: while to.check(): sleep(0.01) assert to.did_time_out assert (time() - s) >= 0.05 def test_timeout_b(self): timedout = False s = time() with Timeout(0.05) as to: cnt = 0 while cnt < 10: if to.did_time_out: timedout = True sleep(0.02) cnt += 1 assert timedout assert to.did_time_out assert (time() - s) >= 0.05 def test_timeout_c(self): timedout = False with Timeout(0.05) as to: cnt = 0 while cnt < 10: if to.did_time_out: timedout = True cnt += 1 assert not timedout assert not to.did_time_out
[ "flit@me.com" ]
flit@me.com
4e36d68ce3ba6c25adc4cbac052979ce95b8b4c2
4494d201fef442f6546919acd685561ba2bd0ec3
/lookup/views.py
0d074c1c3cde4853656fa056e992d3cdf480a210
[]
no_license
santoshikaruturi/djangoweather
de1d8bef593f6bb32f342f2867e345f67dd63c32
4fb465ff4167a99fb0dfea92212f055c66568994
refs/heads/master
2020-12-10T08:55:36.893164
2020-01-13T08:49:58
2020-01-13T08:49:58
233,549,908
0
0
null
null
null
null
UTF-8
Python
false
false
208
py
#This is my views.py file from django.shortcuts import render # Create your views here. def home(request): return render(request,'home.html',{}) def about(request): return render(request,'about.html',{})
[ "karuturisantoshi9@gmail.com" ]
karuturisantoshi9@gmail.com
49da684394e0562e287537b0ace76cdd6969645c
ec649dc247e5229f78c2bcf51fdef1c57f3cdbff
/OCR/tesseract-invoice.py
0a8f8a6159e2642f490c1a13260c8f55f28faaf0
[]
no_license
dainv1989/machine-learning
0f1686a9bf921bfcf665a786c63ec68b189942a0
e44b14041f577ebe5d5ce785af45e4226e0d2756
refs/heads/master
2021-07-11T20:39:39.423568
2020-03-22T23:55:10
2020-03-22T23:55:10
93,155,384
0
0
null
null
null
null
UTF-8
Python
false
false
1,415
py
import cv2 import re import pytesseract as pts from pytesseract import Output def show_textboxes(image): d = pts.image_to_data(image, output_type=Output.DICT) #print(d.keys()) n_boxes = len(d['text']) for i in range(n_boxes): if int(d['conf'][i]) > 60: (x, y, w, h) = (d['left'][i], d['top'][i], d['width'][i], d['height'][i]) img = cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2) return image #end show_textboxes def find_pattern(image, data_pattern): d = pts.image_to_data(image, output_type=Output.DICT) keys = list(d.keys()) n_boxes = len(d['text']) for i in range(n_boxes): if int(d['conf'][i]) > 60: if re.match(data_pattern, d['text'][i]): (x, y, w, h) = (d['left'][i], d['top'][i], d['width'][i], d['height'][i]) image = cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2) return image #end find_pattern img = cv2.imread('invoice.jpg') #text = pts.image_to_string(img) #print(text) date_pattern = '^(0[1-9]|[12][0-9]|3[01])/(0[1-9]|1[012])/(19|20)\d\d$' img_datebox = find_pattern(img, date_pattern) #cv2.imshow('img', img_datebox) email_pattern = '^[a-z0-9]+@[a-z0-9]+\.[a-z]+$' img_emailbox = find_pattern(img_datebox, email_pattern) cv2.imshow('img', img_emailbox) #img_textboxes = show_textboxes(img) #cv2.imshow('img', img_textboxes) cv2.waitKey(0)
[ "dainv1989@hotmail.com" ]
dainv1989@hotmail.com
6a3039fdc7a6b0b344c3bf5b42fdec14cefec348
d76ed914106e5ec6c5f1a64b843fb27e43256893
/yugioh_cardDB/admin.py
3724c94ca4b77cb44dcc9bc0188d8d6b1de00014
[]
no_license
ss6987/yugioh_DB
7de17227bbeecfc295eef0bd02bb7583c4c30466
352f54f1302d80879c67f9cd05aae01818f4ee7a
refs/heads/master
2020-04-07T19:19:15.315196
2019-04-10T00:36:32
2019-04-10T00:36:32
157,318,534
0
0
null
null
null
null
UTF-8
Python
false
false
821
py
from django.contrib import admin from .models import * class PackAdmin(admin.ModelAdmin): readonly_fields = ["recording_card"] class ShopURLAdmin(admin.ModelAdmin): readonly_fields = ["card"] class PriceAdmin(admin.ModelAdmin): readonly_fields = ["shop_url"] admin.site.register(Card) admin.site.register(Monster) admin.site.register(PendulumMonster) admin.site.register(LinkMonster) admin.site.register(CardClassification) admin.site.register(Attribute) admin.site.register(Type) admin.site.register(LinkMarker) admin.site.register(CardId) admin.site.register(Pack, PackAdmin) admin.site.register(PackClassification) admin.site.register(PackOfficialName) admin.site.register(SearchPage) admin.site.register(ShopURL, ShopURLAdmin) admin.site.register(PriceLog, PriceAdmin) admin.site.register(Rarity)
[ "angel.blossom.vivid@gmail.com" ]
angel.blossom.vivid@gmail.com
7222be1cddcf38ef35b6da54356de01d6646981a
fab3523d77e3641a7239818591a16a4ca29ba8c3
/pymanetsim/plot_plane.py
ceb365f916e8793a68f3d054cfd6821437a6d5da
[]
no_license
joninvski/pymanetsim
c26cbe2256add2fefdf3097264575ca9fe7be3ce
c19e8d2324dc5325a2ba31aa692fc674c658725e
refs/heads/master
2016-09-06T15:15:18.235956
2014-10-07T14:23:24
2014-10-07T14:23:24
null
0
0
null
null
null
null
UTF-8
Python
false
false
721
py
import pdb import numpy as np import matplotlib.cm as cm import matplotlib.mlab as mlab import matplotlib.pyplot as plt def plot_plane(plane, all_nodes, name): x = [0]*plane.x_size y = [0]*plane.y_size X = np.meshgrid(x, y) #For each node for node in all_nodes.values(): #Get its heat value heat = 100 #TODO - This is fake #Put that value on the heat map x = node.location.x y = node.location.y X[0][x][y] = heat plt.imshow(X[0], interpolation='nearest', origin='lower', \ extent=[0, plane.x_size, 0, plane.y_size]) plt.savefig('/tmp/' + str(name)) # plt.show() # plt.savefig('../results/images/plots/' + str(name))
[ "trindade.joao@gmail.com" ]
trindade.joao@gmail.com
14b05aaebd43d60f8b353474ac5f6a16ec15f2f2
607d5300e1c55cd194b581174f13965f73971eb6
/oss/testdata/python_examples/get_object.py
588f638b58d92c3c43dfbe1974e3bf2d0eae0d2b
[ "Apache-2.0" ]
permissive
aliyun-beta/aliyun-oss-go-sdk
a577a45c3ec445c0a8a2f73538077943f0bb537e
b9a019776ad68c252acb92a67a09be59162de4ab
refs/heads/master
2021-01-17T14:37:13.127610
2015-12-15T08:35:49
2015-12-15T08:35:49
46,700,470
3
1
null
null
null
null
UTF-8
Python
false
false
263
py
from oss.oss_api import * from oss import oss_xml_handler endpoint="hostlocal:9999" endpoint="oss-cn-hangzhou.aliyuncs.com:9999" id, secret = "ayahghai0juiSie", "quitie*ph3Lah{F" oss = OssAPI(endpoint, id, secret) res=oss.get_object("bucket-name","object/name")
[ "w@h12.me" ]
w@h12.me
66a53d2b71a6f7420dc7a93f85b662fb08440da4
f6a24e544fe48cb13fa38fcde11ce9b57d119eba
/api.py
7e4075258107e12abd359e2ec3feba410554f86c
[ "MIT", "BSD-3-Clause", "Apache-2.0" ]
permissive
postsai/postsai
b8afeb44d87ff16cd0526ba593f70446751affc6
c2b8363c34ff28a8c54b04548ff1c72c8a98e2c3
refs/heads/master
2023-08-16T16:50:30.671405
2023-08-10T15:08:17
2023-08-10T15:08:17
52,112,371
5
5
NOASSERTION
2023-09-14T14:19:35
2016-02-19T19:49:22
Python
UTF-8
Python
false
false
1,985
py
#! /usr/bin/python3 # The MIT License (MIT) # Copyright (c) 2016-2021 Postsai # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. import cgi import json import sys from os import environ import config from backend.cvs import PostsaiCommitViewer from backend.query import Postsai from backend.importer import PostsaiImporter if __name__ == "__main__": if "REQUEST_METHOD" in environ and environ['REQUEST_METHOD'] == "POST": data = sys.stdin.read() parsed = None try: parsed = json.loads(data, strict=False) except UnicodeDecodeError: data = data.decode("iso-8859-15").encode("utf-8") parsed = json.loads(data, strict=False) PostsaiImporter(vars(config), parsed).import_from_webhook() else: form = cgi.FieldStorage() if form.getfirst("method", "") == "commit": PostsaiCommitViewer(vars(config)).process() else: Postsai(vars(config)).process()
[ "nhnb@users.sourceforge.net" ]
nhnb@users.sourceforge.net
837640dec4290cdc6c2b69db42c87b60beb2a646
327a6d74d22fb06d76f5da814452e76aa8999692
/stanCode_projects/Photoshop/green_screen.py
ff846ff20b3d35e8ea5a12d8952923732dfd0a2b
[ "MIT" ]
permissive
siyuban/stanCode
6a5c2114a3bc949c1d6e5a70b987dcda74161874
5cca179a7542abdf1fe3a9e1da256945241249cc
refs/heads/main
2023-02-11T16:20:08.707138
2021-01-09T15:44:55
2021-01-09T15:44:55
324,043,306
0
0
null
null
null
null
UTF-8
Python
false
false
1,464
py
""" File: green_screen.py Name:萬思妤 ------------------------------- This file creates a new image that uses MillenniumFalcon.png as background and replace the green pixels in "ReyGreenScreen.png". """ from simpleimage import SimpleImage def combine(background_img, figure_img): """ :param background_img: SimpleImage, the background image. :param figure_img: SimpleImage, green screen figure image :return: SimpleImage, figure image with the green screen pixels replaced by pixels of background. """ for x in range(background_img.width): for y in range(background_img.height): figure_pixel = figure_img.get_pixel(x, y) bigger = max(figure_pixel.red, figure_pixel.blue) if figure_pixel.green > bigger*2: background_img_pixel = background_img.get_pixel(x, y) figure_pixel.red = background_img_pixel.red figure_pixel.blue = background_img_pixel.blue figure_pixel.green = background_img_pixel.green return figure_img def main(): """ Create a new image that uses MillenniumFalcon.png as background and replace the green pixels in "ReyGreenScreen.png". """ space_ship = SimpleImage("images/MillenniumFalcon.png") figure = SimpleImage("images/ReyGreenScreen.png") space_ship.make_as_big_as(figure) result = combine(space_ship, figure) result.show() if __name__ == '__main__': main()
[ "noreply@github.com" ]
noreply@github.com