blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
da1c774acc5561d55c44153d527b7870d3edd240
|
7b710b60437eb4fbde32c616492f9a0466a899cf
|
/saludo.py
|
3c2d9e45eec02617bb8a184a3a74b364a155c399
|
[] |
no_license
|
1000VIA/Platzi---Curso-de-Python
|
04586ab57f69868384f683e1aea7be850432acef
|
7b7d4dd759e2785f7de4fd090b7459c5f71e7326
|
refs/heads/master
| 2020-07-13T21:14:30.596897
| 2019-12-13T00:55:31
| 2019-12-13T00:55:31
| 205,156,148
| 2
| 1
| null | 2019-12-13T00:55:32
| 2019-08-29T12:13:01
|
Python
|
UTF-8
|
Python
| false
| false
| 71
|
py
|
name = str(input('Cuál es tú nombre?'))
print('Hola: ' + name + '!')
|
[
"milvampa1@gmail.com"
] |
milvampa1@gmail.com
|
b2e2db887089fa40f37121da6294b059dbddd07e
|
d873d529a24a30e2a9fc3e5a15ef6fe595c1423e
|
/blockchain-env/Lib/site-packages/Cryptodome/Signature/PKCS1_v1_5.pyi
|
5851e5b47145e14378defa599a936c27b33312d2
|
[
"MIT"
] |
permissive
|
AdityaStark7/blockchain_backend
|
2731d733a0029614565b484ec5820d04f615bfe7
|
a019dda3bd4160ae962ced5bd95d060ad33f7f07
|
refs/heads/main
| 2023-08-19T12:26:38.211892
| 2021-10-15T05:31:18
| 2021-10-15T05:31:18
| 385,313,460
| 8
| 1
|
MIT
| 2021-10-11T17:28:22
| 2021-07-12T16:31:53
|
Python
|
UTF-8
|
Python
| false
| false
| 157
|
pyi
|
from Cryptodome.PublicKey.RSA import RsaKey
from Cryptodome.Signature.pkcs1_15 import PKCS115_SigScheme
def new(rsa_key: RsaKey) -> PKCS115_SigScheme: ...
|
[
"ak511046@gmail.com"
] |
ak511046@gmail.com
|
6d149975d5e7fc54cb3bb401ac3053f567a095d5
|
e029672997da3c81d756bd66108c33322184ee3b
|
/data_convert.py
|
2330bda6d670b43da88063637651d5cd6b5a051a
|
[] |
no_license
|
jisangyu/K-Anonymity
|
3bc6d29592b62b49df883eec46c7d59995620bf6
|
bec6a2afed2de46e6b4ea6e217563ff5131fc0cc
|
refs/heads/master
| 2020-05-18T12:29:54.818318
| 2019-06-05T13:28:22
| 2019-06-05T13:28:22
| 184,410,448
| 1
| 2
| null | 2019-06-05T13:26:52
| 2019-05-01T12:05:48
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,635
|
py
|
import pandas as pd
import csv
def load_distance(coloum_name):
distance_dict = dict()
with open('./data_distance/distance_' + coloum_name + ".csv", newline='') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
distance_dict.update({row[1]: row[2]})
return distance_dict
def load_data(file_name):
distance_age = load_distance('AGE')
distance_race = load_distance('RACE')
distance_ethnicity = load_distance('ETHNICITY')
distance_gender = load_distance('GENDER')
distance_birthplace = load_distance('BIRTHPLACE')
distance_condition = load_distance('CONDITION')
columns = {}
with open('./data/' + file_name + '.csv') as f:
reader = csv.reader(f, dialect='excel', delimiter='\t')
headers = next(reader, None)
for h in headers:
columns[h] = []
for row in reader:
for h, v in zip(headers, row):
if h == 'CONDITION':
v = distance_condition.get(v)
elif h == 'BIRTHPLACE':
v = distance_birthplace.get(v)
elif h == 'GENDER':
v = distance_gender.get(v)
elif h == 'ETHNICITY':
v = distance_ethnicity.get(v)
elif h == 'RACE':
v = distance_race.get(v)
elif h == 'AGE':
v = distance_age.get(str(v))
columns[h].append(v)
return pd.DataFrame(columns)
df = load_data("finalPatientDataSet")
df.to_csv('./data_convert/data_convert.csv', encoding='utf-8', index=False)
|
[
"ssinzo@gmail.com"
] |
ssinzo@gmail.com
|
6c47582b4db897b64b9c851989f2ebaf2c0c1813
|
c804db17b29c5929c87768b7fa6ebb28f937fc36
|
/eu135.py
|
2d8087d06adc1d916ec3c880789f9bf2b1d6f41f
|
[] |
no_license
|
ishandutta2007/ProjectEuler-2
|
a1f07329fd895828e740d0cf71b0b0a4de261b4c
|
29265e4e3dccc67d05ef8d6129363cea1705d970
|
refs/heads/master
| 2023-03-17T03:19:35.220761
| 2019-04-18T13:39:40
| 2019-04-18T13:39:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,445
|
py
|
# ------------------------------------------------------------ Same differences --------------------------------------------------------------- #
# #
# Given the positive integers, x, y, and z, are consecutive terms of an arithmetic progression, the least value of the positive integer, #
# n, for which the equation, x^2 − y^2 − z^2 = n, has exactly two solutions is n = 27: #
# #
# 34^2 − 27^2 − 20^2 = 12^2 − 9^2 − 6^2 = 27 #
# #
# It turns out that n = 1155 is the least value which has exactly ten solutions. #
# #
# How many values of n less than one million have exactly ten distinct solutions? #
# --------------------------------------------------------------------------------------------------------------------------------------------- #
import time
def eu135():
TOP = 10 ** 6
TARGET = 10
# (z + 2d)^2 - (z + d)^2 - z^2 = n
# -z^2 + 2zd + 3d^2 = n
# -z^2 - zd + 3zd + 3d^2 = n
# -z(z + d) + 3d(z + d) = n
# (3d - z)(d + z) = n --> u = 3d - z, v = d + z --> uv = n
# d = (u + v) / 4
# z = (3v - u) / 4
solutions = [0 for i in range(TOP + 1)]
for u in range(1, TOP):
for v in range(u // 3 + 1, TOP // u + 1):
if (u + v) % 4 == 0 and \
(3*v - u) % 4 == 0:
solutions[u * v] += 1
s = sum([1 for i in range(TOP) if solutions[i] == TARGET])
return s
if __name__ == "__main__":
startTime = time.clock()
print (eu135())
elapsedTime = time.clock() - startTime
print ("Time spent in (", __name__, ") is: ", elapsedTime, " sec")
|
[
"roee.sefi@gmail.com"
] |
roee.sefi@gmail.com
|
780eef6e690917e25c77c3a334d9886de47514aa
|
358d5fcb1e9709b946097b9b291573a0a83a7b94
|
/src/gui.py
|
9d9d8450475b41f1f2442529785192254dcf8011
|
[
"Apache-2.0"
] |
permissive
|
MakeNTU/2021_team06_
|
9b452e652da44b7dafc8abe7519c5f64ebaab900
|
4fecd8651227d2fe3cc2b3e5ab31d182fafe368c
|
refs/heads/main
| 2023-05-01T00:42:26.017576
| 2021-05-25T01:56:07
| 2021-05-25T01:56:07
| 351,734,420
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,595
|
py
|
from vpython import *
import time, serial
COM_PORT = '/dev/cu.usbserial-0001'
BAUD_RATES = 9600
ser = serial.Serial(COM_PORT, BAUD_RATES)
ser.readline()
scene = canvas(align='left', width=0, height=0, background=vec(1, 0, 1))
v_graph = graph(align='left', width='700', xtitle='t', ytitle='時速(km/h)', background=vector(0, 0, 0), ymin=0, ymax=255)
v_t = gcurve(color=color.blue, graph=v_graph, width=5)
T_graph = graph(align='right', width='700', xtitle='t', ytitle='引擎溫度(°C)', background=vector(0, 0, 0), ymin=0, ymax=255)
T_t = gcurve(color=color.red, graph=T_graph, width=5)
scene1 = canvas(align='left', width=0, height=0, background=vec(1, 0, 1))
t0 = time.time()
train = 1
speed_limit = 150
invasion = 0
v = [0, 0]
T = [0, 0]
brakeState = [0, 0]
overspeed = [0, 0]
overtemp = [0, 0]
def train_select(m):
global train
train_dict = {'普悠瑪號': 0, '太魯閣號': 1}
train = train_dict[m.selected]
v_t.delete()
T_t.delete()
# print(train)
def reset_button(b):
global t0
t0 = time.time()
v_t.delete()
T_t.delete()
menu(choices=['普悠瑪號', '太魯閣號'], bind=train_select, pos=scene.caption_anchor, selected='太魯閣號')
button(text='Reset', bind=reset_button, pos=scene.caption_anchor)
def set_speed(s):
global speed_limit
speed_limit = s.number
# print(s.number)
out = str(speed_limit) + ',' + str(int(invasion)) + '\n'
ser.write(out.encode('ascii'))
print(out)
def set_invasion(r):
global invasion
invasion = r.checked
out = str(speed_limit) + ',' + str(int(invasion)) + '\n'
ser.write(out.encode('ascii'))
print(out)
scene1.append_to_caption('速限:')
speed_control = winput(bind=set_speed, pos=scene1.caption_anchor, text=str(speed_limit), width=50)
scene1.append_to_caption('\n')
checkbox(bind=set_invasion, pos=scene1.caption_anchor, text='軌道異物入侵', width=50)
scene1.append_to_caption('\n')
def f(b):
pass
brake_display0 = button(background=color.white, bind=f, text='普悠瑪號煞車')
brake_display1 = button(background=color.white, bind=f, text='太魯閣號煞車')
scene1.append_to_caption('\n')
overspeed_display0 = button(background=color.white, bind=f, text='普悠瑪號超速')
overspeed_display1 = button(background=color.white, bind=f, text='太魯閣號超速')
scene1.append_to_caption('\n')
overtemp_display0 = button(background=color.white, bind=f, text='普悠瑪號過熱')
overtemp_display1 = button(background=color.white, bind=f, text='太魯閣號過熱')
scene1.append_to_caption('\n')
def state_to_color(state):
if state:
return color.red
else:
return color.green
while 1:
rate(20)
data = ser.readline().decode().strip('\n').split(',')
# data = [0, 0, 0, 0, 0, 0]
i = int(data[0])
v[i] = int(data[1])
T[i] = int(data[2])
brakeState[i] = int(data[3])
overspeed[i] = int(data[4])
overtemp[i] = int(data[5])
v_t.plot(time.time() - t0, v[train])
T_t.plot(time.time() - t0, T[train])
out = str(speed_limit) + ',' + str(int(invasion)) + '\n'
print(i, v, T, brakeState, overspeed, overtemp)
# print(train, out)
# ser.write(out.encode('ascii'))
brake_display0.background = state_to_color(brakeState[0])
brake_display1.background = state_to_color(brakeState[1])
overspeed_display0.background = state_to_color(overspeed[0])
overspeed_display1.background = state_to_color(overspeed[1])
overtemp_display0.background = state_to_color(overtemp[0])
overtemp_display1.background = state_to_color(overtemp[1])
|
[
"noreply@github.com"
] |
MakeNTU.noreply@github.com
|
2b6f8dff268c3503dbf74f2003923095f66d1922
|
c5d5c36636c4ea7b05bbb65aa9981ab1749baf43
|
/foods/models.py
|
f9ae3584600a81da95db5a5531c73360a360482e
|
[
"MIT"
] |
permissive
|
akuprik/foodgram-project
|
fd770bed208ee828b62e3e38b040e95c05403301
|
4eb1c79fc9147a04fb675a74df86e67c95ae6c68
|
refs/heads/master
| 2023-04-14T04:43:06.508649
| 2021-04-24T05:45:57
| 2021-04-24T05:45:57
| 346,740,662
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 890
|
py
|
from django.db import models
from django.contrib.auth import get_user_model
User = get_user_model()
class Ingredient(models.Model):
name = models.CharField(max_length=256)
unit = models.CharField(max_length=64)
def __str__(self):
return '{}, {}'.format(self.name, self.unit)
class Recipe(models.Model):
author = models.ForeignKey(User, on_delete=models.CASCADE)
title = models.CharField(max_length=256)
description = models.TextField()
cooking_time = models.IntegerField()
ingredients = models.ManyToManyField(Ingredient, through='IngredientRecipe')
def __str__(self):
return '{} ({})'.format(self.title, self=author)
class IngredientRecipe(models.Model):
ingredient = models.ForeignKey(Ingredient, on_delete=models.CASCADE)
recipe = models.ForeignKey(Recipe, on_delete=models.CASCADE)
value = models.IntegerField()
|
[
"akupr@yandex.ru"
] |
akupr@yandex.ru
|
d2553312d3639d7b24781b350bb59dc90989953b
|
2cce9bca5cbe600ce87a8dc34dc9222704bbc98a
|
/training/train_cfm_512_deeplabv3_mobilenetv2.py
|
c052a3caf064c88835cec80b350bd6d919db5535
|
[
"MIT"
] |
permissive
|
daniel-cheng/CALFIN
|
ea335ed0799ceeafc148c54cb748fbed364aeb4d
|
fbe5090d0ab4d48016a4f66bcb441a7c8faa56a2
|
refs/heads/master
| 2023-05-15T07:05:27.451758
| 2021-04-20T23:03:37
| 2021-04-20T23:03:37
| 185,285,507
| 17
| 7
|
MIT
| 2022-06-03T15:48:39
| 2019-05-06T23:22:54
|
Python
|
UTF-8
|
Python
| false
| false
| 3,200
|
py
|
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from keras.preprocessing.image import ImageDataGenerator
from keras.utils import plot_model
from keras.models import Model, Input, load_model
from keras.layers import Concatenate, Conv2D, MaxPooling2D, Conv2DTranspose, Dropout, UpSampling2D, BatchNormalization, RepeatVector, Reshape, Permute, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, EarlyStopping
from keras.regularizers import l1, l2, l1_l2
from keras.activations import relu, sigmoid
from keras.layers import Activation
from keras import backend as K
from tensorflow.python.client import device_lib
from keras.applications import imagenet_utils
from segmentation_models.losses import bce_jaccard_loss, jaccard_loss, binary_crossentropy
from segmentation_models.metrics import iou_score
import sys
sys.path.insert(0, 'keras-deeplab-v3-plus')
from model import Deeplabv3
from clr_callback import CyclicLR
from AdamAccumulate import AdamAccumulate
import os
os.environ["CUDA_VISIBLE_DEVICES"]="0"
import cv2, glob
from skimage.io import imsave, imread
from skimage.transform import resize, rotate, rescale
from random import shuffle
from data_cfm_512 import load_validation_data
from albumentations import *
from aug_generators import aug_daniel, imgaug_generator
img_size = 512
data_path = 'data/'
pred_path = 'preds/'
temp_path = 'temp/'
K.set_image_data_format('channels_last') # TF dimension ordering in this code
if __name__ == '__main__':
print('-'*30)
print('Loading validation data...')
print('-'*30)
validation_data = load_validation_data(img_size)
model_checkpoint = ModelCheckpoint('cfm_weights_mobilenetv2_' + str(img_size) + '_e{epoch:02d}_iou{val_iou_score:.4f}.h5', monitor='val_iou_score', save_best_only=False)
clr_triangular = CyclicLR(mode='triangular2', step_size=4000, base_lr=6e-4, max_lr=6e-5)
callbacks_list = [
#EarlyStopping(patience=6, verbose=1, restore_best_weights=False),
# clr_triangular,
model_checkpoint
]
print('-'*30)
print('Creating and compiling model...')
print('-'*30)
img_shape = (img_size, img_size, 1)
# flatten_shape = (img_size * img_size,)
# target_shape = (img_size, img_size, 3)
inputs = Input(shape=img_shape)
# r1 = Reshape(flatten_shape)(inputs)
# r2 = RepeatVector(3)(r1)
# r3 = Reshape(target_shape)(r2)
base_model = Deeplabv3(input_shape=img_shape, classes=1, alpha = 1.4, backbone='mobilenetv2', weights=None)
last_linear = base_model(inputs)
out = Activation('sigmoid')(last_linear)
model = Model(inputs, out)
model.compile(optimizer=AdamAccumulate(lr=1e-4, accum_iters=4), loss=bce_jaccard_loss, metrics=['binary_crossentropy', iou_score, 'accuracy'])
model.summary()
# model.load_weights('cfm_weights_512_e07_iou0.0139.h5')
print('-'*30)
print('Fitting model...')
print('-'*30)
train_generator = imgaug_generator(4, img_size)
history = model.fit_generator(train_generator,
steps_per_epoch=2000,
epochs=80,
validation_data=validation_data,
verbose=1,
# max_queue_size=64,
# use_multiprocessing=True,
# workers=2,
callbacks=callbacks_list)
print(history.history)
|
[
"dcheng334@gmail.com"
] |
dcheng334@gmail.com
|
1a20152a4e9f3a965e26b539ee8d244aba84e05e
|
854c71acb6696d0ba6ff957aeb8f5db63ad47567
|
/pr4/settings.py
|
7a76ba83f811a6ee9c5f96b5f4f7013b4cc32444
|
[] |
no_license
|
ivan-emtsov/ind9
|
87d1ca68ff85fe55882c1c1f040b8a915cf7d2ae
|
9ca9a9c568e44d4b1d3f74afdbcdd1937fec3fa7
|
refs/heads/master
| 2020-11-27T18:03:35.965163
| 2019-12-22T11:10:36
| 2019-12-22T11:10:36
| 229,554,114
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,413
|
py
|
"""
Django settings for pr4 project.
Generated by 'django-admin startproject' using Django 1.9.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
import posixpath
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'd0338511-d112-4010-8a41-b36d11d03c1f'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'app',
# Add your apps here to enable them
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'pr4.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.media'
],
},
},
]
WSGI_APPLICATION = 'pr4.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'ru-ru'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = posixpath.join(*(BASE_DIR.split(os.path.sep) + ['static']))
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
|
[
"nikita.mihaylov@waltex.ru"
] |
nikita.mihaylov@waltex.ru
|
2657dd3f3e69d9a929e71eb62f1987ad44ad513f
|
6f3790b00ddf3a90fa8a8310fb1257be0d685e7a
|
/socialhub/useraccounts/views.py
|
4a1faaa1034a032042cc928bb1af2d8497b45ade
|
[] |
no_license
|
Erikfj/socialhub
|
76954c17a9378d5b2eb3bf4c281c60ec9aaee355
|
fb3bd1799ff0d32ea1dbd1f7d67266b3762284c2
|
refs/heads/master
| 2021-01-23T02:28:49.728538
| 2015-06-03T09:13:23
| 2015-06-03T09:13:23
| 34,781,390
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,480
|
py
|
from django.shortcuts import render
from django.contrib.auth.models import User
from django.shortcuts import redirect
from django.contrib.auth import authenticate, login, logout
# Create your views here.
def user_login(request):
context = {}
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
return redirect('frontpage')
else:
context['login_failed'] = True
return render(request, 'useraccounts/login.html', context)
def user_logout(request):
logout(request)
return redirect('frontpage')
def user_register(request):
context = {}
if request.method == "POST":
user = User()
user.first_name = request.POST.get("firstname")
user.last_name = request.POST.get("lastname")
user.username = request.POST.get("username")
user.email = request.POST.get("email")
user.set_password(request.POST.get("password"))
user.save()
context['user_saved_successfully'] = True
return render(request, 'useraccounts/register.html', context)
def user_settings(request):
context = {}
if request.method == "POST":
user = request.user
user.first_name = request.POST.get("firstname")
user.last_name = request.POST.get("lastname")
user.email = request.POST.get("email")
user.save()
context['user_updated_successfully'] = True
return render(request, 'useraccounts/settings.html', context)
|
[
"filip@adminisatorsmbp.lan"
] |
filip@adminisatorsmbp.lan
|
c768e9f572a0fb4718a5cd16dcba759dba8ba8e0
|
8dc4724260d097d7bd785d45dbab0c95e91e95ff
|
/manage.py
|
c14cf40bcd00bc7c338736b75ce3530a3c080916
|
[] |
no_license
|
ManuDIY/Learn-Django-Authentication-System
|
38cc9dbff08fb34fbccff4a816cd8b0b23fdae55
|
4af6776cea41c072a9703b04aa454054ac705f46
|
refs/heads/master
| 2022-09-20T16:13:14.787540
| 2019-06-23T06:51:47
| 2019-06-23T06:51:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 632
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'webbasedauth.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"henrymbuguak@gmail.com"
] |
henrymbuguak@gmail.com
|
2c753b738f062d9b1794b3b800ad4d2abbeb9aeb
|
64085c1c9c9993048d7de5e56064bc1a7ea6ca22
|
/hash.py
|
8f403e9edbf5ba80102a4537896e963971456fa6
|
[] |
no_license
|
1ckov/Web_A-5_Project_Repo
|
f298188de1082356f4d497a4dac450b4e8c22fc4
|
432f726aa9f6b7fd693ed7c583ffd817b8b285f5
|
refs/heads/main
| 2023-06-10T13:55:47.319251
| 2021-07-04T02:18:37
| 2021-07-04T02:18:37
| 352,611,082
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 342
|
py
|
import hashlib
password=(" ")
salt=(b"100000")
key = hashlib.pbkdf2_hmac(
'sha256', # The hash digest algorithm for HMAC
password.encode('utf-8'), # Convert the password to bytes
salt, # Provide the salt
100000, # It is recommended to use at least 100,000 iterations of SHA-256
dklen=128 # Get a 128 byte key
)
print()
|
[
"sa6o.hristov96@gmail.com"
] |
sa6o.hristov96@gmail.com
|
2e86b740ae0c926fe248bb6620ce44b4e1e3d9a0
|
8e589cd52e92ca1f2c085e435f2e5fe88389b1d6
|
/correlation-master/coffeeVsSleep.py
|
70cb24d987e0732a3f863490a054b5df662c383b
|
[
"MIT"
] |
permissive
|
vmarques339/Cups-of-coffee-vs-hours-of-sleep-Students-marks-vs-Attendance
|
9923290bb48da8ea979c1b92d9c0ca484cd68ce8
|
84a7fa4eea79bcf9536c651548eec360f029c9df
|
refs/heads/main
| 2023-09-02T01:54:47.749960
| 2021-11-20T19:32:23
| 2021-11-20T19:32:23
| 430,192,313
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 234
|
py
|
import plotly.express as px
import csv
with open("cups of coffee vs hours of sleep.csv") as csv_file:
df = csv.DictReader(csv_file)
fig = px.scatter(df,x="Coffee in ml", y="sleep in hours", color="week")
fig.show()
|
[
"noreply@github.com"
] |
vmarques339.noreply@github.com
|
a9257397276aca8e36cb069196d0038845d1f29c
|
f8bf7e9efdc2027f18da77026902a24b2ebb2bc7
|
/test_rc.py
|
99fa579df0d9af8116daa8af35e01bfcfc4cbfd4
|
[] |
no_license
|
zhaomr13/TheStandardModelGame
|
5767e0b57c8162655d6e7fae1acc1ed816c0b621
|
36a939e97053cb7bd7e235dcc5a125d3ba2d266b
|
refs/heads/master
| 2021-01-23T03:58:59.162339
| 2017-04-01T14:32:25
| 2017-04-01T14:32:25
| 86,139,196
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 37,022
|
py
|
# -*- coding: utf-8 -*-
# Resource object code
#
# Created by: The Resource Compiler for PyQt5 (Qt v5.6.1)
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore
qt_resource_data = b"\
\x00\x00\x22\x19\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x64\x00\x00\x00\x64\x08\x06\x00\x00\x00\x70\xe2\x95\x54\
\x00\x00\x21\xe0\x49\x44\x41\x54\x78\x01\xec\x97\x35\x94\x34\x55\
\x10\x85\x71\x77\x58\x1b\xe9\x9e\x1e\x77\x77\x77\xf7\x99\x1e\x5b\
\xdf\xc5\x5d\x52\x5c\x63\x22\x72\xf2\x90\x88\x3c\x40\x22\x32\x9c\
\x0c\xb7\x04\x97\xcb\xab\x3a\xbf\x6e\x8e\x77\x9d\xf3\x8d\xb5\xd7\
\xad\x5b\xf5\xe6\x02\x2d\xb4\xd0\x42\x0b\x2d\xb4\xd0\x42\x0b\x2d\
\xb4\xd0\xe2\x2f\x8d\xeb\xae\xbb\xce\x74\xe1\x85\x17\xda\xc4\x47\
\xcb\x8d\x37\xde\x78\x5f\xb7\xdb\xfd\x62\x34\x1a\x7d\xda\xef\xf7\
\x3f\x09\x04\x02\x58\x2c\x16\x98\xcd\x66\xa8\xd7\xeb\xd8\xdf\xdf\
\xe7\xef\xab\xd5\x0a\x7e\xbf\x1f\x2e\x97\x8b\x71\xbb\xdd\xf0\x7a\
\xbd\x67\xf0\xf9\x7c\x3f\x09\xbe\x27\xc4\x7e\xdf\x86\xc3\xe1\xd7\
\x05\x6f\x12\x5b\x5b\x5b\xd2\xb9\xd7\x17\xdf\xff\xdf\x02\xe8\xf5\
\x7a\xdb\x55\x57\x5d\xb5\x77\xe5\x95\x57\xee\x10\xf1\x78\xfc\xfb\
\xf1\x78\x0c\x21\x02\x82\x81\x20\x26\x93\xc9\xef\xaa\xaa\xa2\x3f\
\xec\x23\x12\x89\x60\x3a\x9d\x62\x3a\x99\xc0\x9f\x0d\xe3\xf8\xf8\
\x18\xb4\xef\x58\x9d\xc0\xe4\xb6\x62\x77\x77\x97\x69\xb6\x9a\x68\
\xb7\xdb\xe8\xf5\x7a\xcc\x70\x38\xc4\x60\x30\xe4\xf7\x93\x34\x1a\
\x8d\x6f\x9a\xcd\xe6\x17\xad\x56\xeb\x0b\x93\xc9\xf4\xc5\xff\x52\
\x84\x6b\xaf\xbd\xf6\x52\xf1\x76\x05\x21\x44\x78\xf1\xae\xbb\xee\
\xc2\x69\x2a\x95\x0a\x89\xc0\x49\xb7\x07\x5c\xe8\x75\x7b\x10\xee\
\x40\xa5\x5d\x43\x28\x14\xe2\xcf\xfd\x6e\x1f\x96\x94\x9b\x93\x2f\
\x1c\x84\xc6\xa0\x05\x83\x43\x16\x49\x1f\x30\x9e\xa8\x1f\xdb\xab\
\x15\x9f\x87\xdc\x64\xf7\x3a\x11\x29\xbb\xf8\x78\xe1\x0a\x84\x43\
\x82\x20\xbf\xf3\xf7\xd3\x78\x3c\x1e\x72\xd4\x3b\x82\x77\x85\xd3\
\x3e\xfa\xdf\x08\xe2\x74\x3a\x5f\xa1\x0a\x16\x55\x09\x8b\xc5\x42\
\x49\xe3\x4a\x27\xb2\xf9\x1c\x57\x75\x5f\x20\xf9\x2c\x10\xd5\xcb\
\xfb\x15\x6a\x25\x04\x83\x41\xfe\xde\xa8\x37\x60\x4e\x3a\xa9\x5d\
\x71\xeb\x2a\x0b\xb1\xf4\x42\x90\x4e\xa7\xc3\xb8\xc3\x5e\xcc\xd4\
\x19\x8b\x37\x26\xa7\xa5\x22\x20\x97\xd1\x75\xca\xad\x1a\xb2\x0f\
\xb8\x30\x7c\x32\x8c\xee\x6d\x39\x34\xc4\xb1\xcd\x46\x93\xce\x7b\
\x1a\x72\x0d\x9f\x57\xb4\xaf\xe2\x29\x4a\x02\xdb\x7f\x4a\x84\xb5\
\xb5\xb5\xc9\x35\xd7\x5c\xb3\x2b\x98\x8b\x59\xf0\x3e\x25\x87\x92\
\xe4\x74\x39\xb9\x3d\x9d\x4e\x66\x32\x9b\xe2\x64\x10\x46\x8f\x82\
\x72\xb9\x8c\x52\xa9\x84\x54\x31\xc3\x73\x82\x3e\x97\x0a\x25\x28\
\x09\x07\x1f\x57\x2c\x16\x91\xad\xe6\xc9\x21\xec\x2e\xc2\xe1\x77\
\x61\x34\x1c\x71\x72\x3b\xed\x0e\x02\x09\x76\x16\x63\x50\x24\x1c\
\xec\x1d\x40\x9d\xa8\x50\xa7\x2a\x6a\x6a\x03\xcb\x87\xdb\x50\x97\
\x63\x4c\xfb\x73\x4c\x47\x33\x4c\x69\x9b\xaa\x72\xab\x24\x68\x46\
\x89\x79\xf6\xd2\x7f\x4a\x10\xd1\x02\x7e\xa3\x07\xdb\xd9\xd9\x41\
\x2a\x95\xa2\xd6\xc2\x09\x52\xec\x16\x74\x3b\x5d\x54\xab\x55\x26\
\x91\x4e\xa2\x50\x28\xa0\x58\x28\xc2\xe0\x96\x91\xcd\x66\x99\x68\
\x26\x4e\xc3\x19\x99\x4c\x06\x99\x54\x1a\x72\xcc\xce\xed\x2a\x9d\
\x4e\x23\x51\x48\x41\x6f\x97\x90\xcb\xe5\x18\xab\xd7\xc1\xe7\x24\
\xf1\xaa\x95\x2a\xfc\xf1\xe0\x19\xa7\xe9\x65\x03\xf6\xf6\xf6\xd8\
\x8d\xf9\x52\x01\x85\xdb\xbc\x68\x3f\x14\x42\xfb\x91\x20\x5a\xf7\
\x87\x50\xdb\x4b\xa0\x3c\xc8\xa0\x48\xdb\x0a\x0c\x8b\x6c\x36\x9b\
\x5f\x17\xad\xec\x09\x71\x0f\x4f\x88\xb9\x37\xfa\x57\x8a\x60\x34\
\x1a\x23\xe2\x21\x22\x36\x9b\x2d\xe2\x70\x38\x7e\xa1\x8a\xa6\xa1\
\x2c\x06\x37\x27\x88\x30\xd9\x14\x76\xc3\xe9\x87\x8f\x25\xe3\x24\
\x18\x27\x5a\xe7\x94\x91\x48\x24\x78\xff\x40\x3c\xc4\x3d\x9e\x3e\
\xc7\x22\x51\x48\x51\x1b\xb7\x16\x1a\xf4\xa1\x54\x14\x06\x3b\xef\
\xcb\x28\x2e\x2b\xb7\x35\x12\x2f\x2f\x04\xf2\xc5\x02\x24\x0e\xbb\
\x4d\x27\xe9\xb9\xd5\x51\x5b\x54\x14\x05\x47\x47\x47\x38\x3c\x3c\
\xc4\xfe\xc1\x01\x26\x77\x8f\x70\xfb\x33\x53\xdc\xfa\xf0\x0e\x8e\
\xef\xdb\xc5\xd1\x1d\xfb\xbc\xed\x5c\x6e\xbb\xed\x36\xc4\x62\xb1\
\x97\xff\x8d\x2b\xb3\xcb\xa2\xd1\x28\x3f\x30\x55\xa4\x10\x84\x2a\
\x9a\x5d\x41\xbf\x53\x82\x08\xc9\x22\xa3\x54\x2c\xb1\x08\x44\x24\
\x1e\xe5\x24\xd3\x3e\x24\x08\xcd\x0d\x5e\xd2\x06\xdc\xbc\x9c\x15\
\xf3\x07\x0e\xbb\x03\x1b\x3e\x89\x04\xe4\x65\xae\x33\xe0\x66\x87\
\xd0\xd0\x26\x64\x87\x82\x72\xa9\x4c\x89\x43\x22\x9e\x80\x27\xe2\
\x27\x71\xd8\x69\x5b\x46\x1d\xbb\x83\x8a\x40\x96\x65\x1e\xfc\x54\
\x28\xd9\x72\x01\xf9\x63\x27\x8a\xb7\xb9\x4e\xe0\x40\x66\xe4\x43\
\x2c\x1a\xa7\x62\x20\xc1\x69\xee\xbd\x25\x44\x78\x4c\xf0\xa4\x20\
\xf6\x4f\x5f\xc6\x6e\x88\x37\xbb\xc0\x47\x2b\x17\x7a\x60\x5a\x5e\
\xda\xed\x76\x4a\x02\xbb\x82\x92\x46\x09\x22\x8c\x8a\xc4\x89\x22\
\x11\x88\x60\x24\x04\xab\xd5\x0a\xbb\xcd\x8e\x35\xcb\x16\x3d\x3c\
\x27\x4e\x6f\x36\xb2\xa8\x54\xd5\x8a\x6c\xc2\xcd\xee\x2d\xde\x5f\
\x92\x24\x18\x6d\x32\x36\x2c\x7a\x88\x65\x2b\xb3\x26\x6d\x22\x97\
\xcd\xf2\x7f\x10\xbf\xcf\xc7\x43\x9e\xc4\x21\x36\x8d\x3a\x2e\x0c\
\x9a\x3d\xc2\xc1\xec\x14\x82\xee\x69\x31\x5f\x9c\x62\x89\xfc\x5e\
\x01\xfb\x2f\xd4\xb1\xfd\xe0\x00\x8b\xd5\x0c\xf3\xf1\x0a\xf3\xe9\
\x12\x8b\x99\xd8\xbe\x60\x68\xb6\x50\x41\xdc\xfb\x8f\x16\xe4\xa6\
\x9b\x6e\x7a\xf5\x74\x4b\x38\xbd\x4c\xa5\x15\x95\x68\x5b\x54\xd1\
\xec\x0a\x31\xd4\xcf\x24\x48\x6f\x32\xd2\x7e\x2c\x18\xe1\x0d\xf8\
\x28\xc9\x2c\xc2\x4d\xca\xba\x70\x83\x9d\x45\x90\x15\x99\xdc\xc1\
\x02\xe9\x74\x3a\xe8\x0c\x7a\xae\x56\xda\x57\x32\xc9\xd0\x49\x06\
\xea\xf3\xcc\xa6\x71\x8b\xff\xc3\xf0\x79\x24\x19\x8e\xa0\x8b\xe6\
\x0f\x3b\x6d\xc3\xb0\x89\x5a\xad\xc6\x2d\xd1\xf0\x07\x77\x67\x01\
\x1c\xc7\xb5\x7c\xfd\x47\xe1\x3c\x34\x3e\x0a\x33\x19\xc4\x06\xc9\
\xe2\x15\x33\xa3\x2d\x96\x19\x43\x66\x3b\x0c\x66\xfc\x3e\x33\x33\
\x33\x84\x99\x99\x39\xb1\x1d\x66\x4e\x74\xfe\xfd\x53\xbd\xbb\x55\
\x56\x05\xbd\x76\x2c\xad\xab\xba\x76\x76\x35\x3b\x7b\xef\x3d\xdd\
\xa7\xe1\xf6\x8c\xff\xf3\x1f\x6f\x44\x05\x78\x28\x0e\xd6\xd2\x39\
\xb6\xab\x52\x26\x5d\xa4\xb2\x45\x9d\x55\xb6\xa4\x93\xca\x96\x75\
\x52\xc9\xbc\x00\x15\x4e\x08\x51\xee\xf0\xee\x4a\x88\x6f\x8c\x10\
\x09\x42\x18\xe7\x30\x9b\xf6\xdf\x4c\x5a\x37\x4b\x40\xfe\xf6\xb7\
\xbf\xed\x26\x83\xc6\x69\xb3\xf0\x4c\x18\xa7\xc8\x42\x3a\xdf\xc0\
\xe2\xb0\x00\xbc\xfe\xf3\x94\x7f\xe1\x1b\xbc\xda\x7d\x71\xc7\x4b\
\x78\x15\x80\xb4\x6f\xd7\x1e\x10\xa0\x3d\x16\x0b\x6d\x54\x6d\x6d\
\xad\xee\xba\xeb\x2e\x72\x10\xae\xa7\x2b\xaf\xbc\x52\xd7\x5c\x73\
\x0d\xe7\x63\x59\xf8\x09\xc0\x17\xd6\x03\x38\x67\x22\xe7\x9e\x25\
\x16\x1f\x80\x5a\xb7\x6f\x43\xc0\x00\x25\x02\x2c\x0a\x82\xa2\x40\
\x87\xde\x48\x2f\x38\x28\x58\x49\x9e\x14\x25\xc5\xa7\x28\xd1\xc4\
\x93\x68\x8b\x9f\x9c\xa4\xc4\x38\xfb\x2c\x2e\x55\x04\x0c\x58\x19\
\x82\xe2\xf1\x0a\xc8\xcd\x06\x84\x76\xed\xda\xb5\x6d\xdf\xbe\xfd\
\x7d\x26\xb7\x9a\x85\x7c\x50\x50\x50\x80\x06\xa1\x91\x50\x03\x91\
\x0f\x8b\xe3\xf5\x0d\x2c\x2c\x74\xc1\x22\xb6\x6a\xdf\x5a\x1d\x2e\
\xe9\x20\x16\x13\x01\xc4\x9a\x9a\x1a\xbd\xfe\xfa\xeb\x82\xde\xae\
\xbb\xee\x3a\xed\xdd\xbb\x57\xef\xbc\xf3\x8e\x58\xb4\xfd\xfb\xf7\
\x6b\xc7\x8e\x1d\xda\xbd\x7b\xb7\xb0\xc4\x8d\x1b\x37\xea\xcb\x2f\
\xbf\x14\x56\xb6\x6a\xd5\x2a\xdd\x77\xdf\x7d\x9a\x30\x61\x82\xa0\
\x4b\x3e\x83\x0e\xa1\x3a\x40\x66\x0c\x6d\xdb\xb7\x15\xe3\xe0\x33\
\x1b\x37\xb4\x09\xb0\xbc\x67\x51\xbd\x00\xe1\x67\x08\x40\x4e\x49\
\xbe\x50\xd5\x1b\x42\x54\xb3\x21\x54\x35\xeb\x43\xd5\xf3\xff\x77\
\x57\xc9\xf8\x38\xe5\x55\xa7\x8a\xbf\x23\xd0\x32\xd2\x9c\x0c\x23\
\x04\x8b\x28\x2a\x2a\x62\x51\x31\xfb\x06\x34\x15\x0b\xc0\x22\x1c\
\xb5\xb0\x20\x58\x0a\xd4\x04\x0d\xf1\xbe\x4d\xdb\xb6\x02\xb8\xdb\
\x6f\xbf\x5d\x6b\xd7\xae\x15\x80\xdc\x7b\xef\xbd\xda\xb5\x6b\x97\
\xae\xbe\xfa\x6a\x01\x2e\xe0\x1c\x38\x70\x40\x00\x32\x7a\xf4\x68\
\x7d\xf4\xd1\x47\x1a\x32\x64\x88\x08\x93\x5f\x7d\xf5\x55\x7d\xf8\
\xe1\x87\xe2\xfa\x4f\x3d\xf5\x94\xd6\xac\x59\x23\x84\x85\xba\xe3\
\x8e\x3b\x04\x58\x28\x01\xe3\xea\xdd\xbb\xb7\x38\x0f\xeb\x04\x20\
\xcb\x8d\x84\xa3\x06\x20\xac\x2a\x3c\x3c\xdc\x39\x7f\x34\x1f\x45\
\xb2\xe0\xe1\x5f\x0a\xce\x39\x4d\x81\x59\xa7\x2a\x20\xf3\x54\xf1\
\x1a\x94\x6d\x92\x73\xaa\x3a\x44\x9e\x79\x50\xfd\xcc\xea\x70\x53\
\xad\x0a\x31\xb5\x6d\xdb\xb6\x65\x47\xc3\x2a\xbc\xc7\x38\x70\xfc\
\x04\xa0\xb0\xd0\x98\x3d\x93\x82\x8a\xf0\x13\x68\x2b\x0b\xc1\x22\
\xf0\x77\x00\x01\x04\x16\x1c\x00\x2e\xbd\xf4\x52\x16\x4f\x5f\x7f\
\xfd\x35\xf4\xc6\x7b\x3d\xf1\xc4\x13\x24\x65\xfc\x1d\xcd\x65\x71\
\x58\x48\xde\xf3\x7d\x16\x98\xcf\xf9\x0c\x4b\xe0\xda\x2c\xac\x6e\
\xbc\xf1\x46\xae\xa1\xbc\xbc\x3c\x3d\xfa\xe8\xa3\xe2\x1f\x0b\x06\
\xc8\x8f\x3f\xfe\x38\x0b\xcd\x6f\xcb\x82\x0f\xb5\x6e\xdd\x9a\x6b\
\xa1\x18\x58\x2b\x96\x02\x40\x0a\x0d\x0d\xc5\xaf\x70\x4d\x55\x55\
\x56\xa9\xa2\x57\xa5\xd1\x66\x85\x65\xf6\x69\xaa\xbf\x39\x5b\xb5\
\xa3\x0a\x54\x35\xa8\x94\x5a\x9a\x7a\x95\xda\xdf\xcb\x2b\x55\x51\
\x59\xd1\xc0\x7b\xac\xdb\x14\x60\xe9\xd1\xca\xbe\x7b\xd9\xe0\x99\
\x20\xbe\xa2\x01\x50\x98\x04\x0b\x85\x55\xb0\x10\x4c\xd8\x45\x4a\
\x80\x81\x4f\xe0\x78\xcb\x96\x2d\x5a\xb1\x62\x85\xfa\xf5\xeb\x07\
\x78\xf8\x05\x16\x10\xba\x00\x44\xae\xc9\xa2\xf3\x37\xa7\xb9\x5c\
\xc7\xe5\x1a\xfc\xdd\x85\xa1\x88\x5b\x50\x84\xdf\xe0\x7b\x0d\x28\
\x06\x4e\x9a\x31\xdd\x7f\xff\xfd\x80\x8e\x2f\x82\x0a\x75\xeb\xad\
\xb7\xf2\x3b\x58\x06\x80\xa0\x30\x00\x4a\x98\xcd\x75\x19\x07\xb4\
\x2a\x57\xe8\x8c\x4b\x35\x85\x9b\xd1\x49\x75\x9b\x43\x55\xbf\xa5\
\x8b\xea\xb7\x85\xaa\x6e\x47\xb0\xaa\x56\x87\xa9\x7c\x72\xa4\x32\
\x52\x1b\xcf\x43\x89\xb8\xce\x82\xa3\x92\xa3\xfc\xf5\xaf\x7f\x1d\
\x0b\x77\x42\x4f\x08\x9a\xc9\xc4\xf0\x1b\x68\x2d\x0b\xe3\xb4\x0f\
\xa7\x7a\xc3\x0d\x37\xe8\x83\x0f\x3e\x00\x30\x7c\x83\x9e\x79\xe6\
\x19\x68\x04\x8d\x74\x61\x2f\x9a\x8b\xb3\xfd\x59\xb1\x89\x7e\x63\
\x0a\xf1\x92\x69\xf9\x4b\x66\xad\xcf\x18\x55\x04\xfe\xe9\x4f\x7f\
\x3a\x05\xb1\xea\xf1\x00\x16\x94\x71\x30\x2e\xac\x88\x85\xc6\x3a\
\xb8\xfe\xf6\xed\xdb\xb5\x70\xe1\x42\x01\x18\xbe\x03\x50\x18\x37\
\x80\x3b\x4b\x73\x51\x18\xdf\xc1\xaf\x9c\x71\xd6\x99\x3a\x37\xf4\
\x54\x5d\x18\x61\x25\x9a\xb8\x18\x25\xa4\x98\x75\x46\x9c\x6e\xef\
\x4f\xd3\x45\xdd\x4f\xd3\xd9\x67\x11\x25\x36\x0a\xdf\xdb\x6f\x63\
\xda\x6b\x63\xbc\xf5\xb7\xde\xbb\x18\x4e\x84\x01\x9d\xb0\x00\x68\
\x33\x13\x83\xa2\x98\x9c\x0b\x53\xcd\xd9\x63\x1d\xe2\xdf\x9d\x77\
\xde\x49\xf9\x84\x05\xc2\xf1\xb3\x18\x58\x82\x13\xde\xff\x90\x30\
\x51\x16\xeb\x25\x03\x78\xb7\xc9\x2e\xfb\x9d\x09\x3f\x31\xae\x13\
\xad\x66\x76\x8a\xc9\x7f\x8f\x3b\xee\xb8\xbf\x9b\xa5\x7e\x6e\xe3\
\xc3\xd2\x18\x1f\x11\x5a\x03\x3e\x89\x85\xbe\xec\xb2\xcb\xf4\xf0\
\xc3\x0f\x7b\x2d\x89\xbf\xa3\x4c\xf8\x1d\xc0\x83\xba\xf0\x85\x44\
\x8e\xf8\xc8\xe2\xa2\x62\xf5\x28\x8f\x54\xed\xc2\x48\x55\xde\x94\
\xa0\xf2\xcb\x33\x54\x54\x5e\xa0\xc2\x9c\x12\x15\xe6\x15\x73\x0e\
\xc2\xf9\x8c\xf9\xb7\x71\xf4\x56\x68\xab\x36\x20\xde\xb7\x05\xff\
\x1c\xf3\x47\xc3\xa1\x14\xa8\x03\x33\xc7\x77\xf0\x0a\x65\x4d\x99\
\x32\x45\x44\x44\x00\xd2\xb7\x6f\x5f\x6d\xdd\xba\x15\xe0\x00\x04\
\x61\xf2\x3f\x25\x9c\xf3\x9d\xc9\xb7\x76\xfc\xad\x69\xdc\xa0\x43\
\x2c\xf7\x47\x9f\x74\xd2\x49\x1e\xc4\xc6\xfe\x08\x96\xcc\x22\xdb\
\xf5\x04\x8d\x31\xa6\xc1\x83\x07\x0b\xe5\x31\x87\x2c\x2c\x0a\x65\
\x22\x0a\x03\x34\x1c\x36\xc5\x50\xe8\x2e\x37\x23\x5b\x21\xa3\xce\
\x57\xfd\xd6\x30\x55\x6f\x0c\x56\xef\xed\x46\x61\x3b\x43\x54\xb7\
\x25\x44\x55\x4b\xba\x2b\xab\x20\xcd\x4b\x5d\x28\xd2\x6f\x42\x5d\
\x36\xb1\x31\xd5\xd5\xd5\x2c\x18\x94\x80\x89\x03\x0a\x8b\x87\x56\
\xc1\xef\x5e\x40\x5e\x7b\xed\x35\xed\xdc\xb9\x13\x6d\x81\x9b\x39\
\xc7\xf1\x3e\xdf\xf9\x21\x69\xe0\x95\xbf\x13\x45\x59\x5e\xd3\xfe\
\x70\xd6\x8d\x8c\xe2\x66\xdb\x58\x9e\x30\x79\xcc\x16\xff\x33\xc6\
\x7e\xdb\x6d\xb7\x09\xfa\x82\xe2\x9e\x7d\xf6\x59\x31\x56\xc6\x0f\
\x38\x28\x1a\xef\xf1\x25\x58\x58\x58\x88\xcd\x33\xf1\x22\x45\x14\
\x74\xd0\x90\xc1\x97\x69\xf4\xf0\x71\xea\x51\xd8\x41\x9e\x92\xee\
\x8a\x2e\xe9\xa4\xb0\x6e\x8d\x73\x80\xf2\x00\x92\xb5\x98\x6f\x56\
\x57\x7b\x44\x2d\xc4\xa8\x60\x0c\x26\xc9\xc2\xa3\xed\x44\x3e\x08\
\xbe\x82\x49\x60\x35\x8f\x3d\xf6\x98\xb0\x14\xac\xe2\xcd\x37\xdf\
\x74\xc9\x20\x9f\x01\x14\xdc\xcd\x44\xd1\x22\x34\x13\x0b\x82\x32\
\x38\x26\xfa\xd9\x68\x32\xd7\x34\x74\xa2\x01\xd0\xea\x08\x56\x15\
\x6e\x77\x54\x06\x5d\x6e\xda\xb4\x49\x04\x1a\x8c\x93\xf1\x30\x17\
\x1c\x3f\x7e\x90\x8a\x03\xe3\x46\xf3\x89\xe0\xb0\x98\xc0\x82\x50\
\x95\xcf\x88\x50\xf1\x48\x8f\x0a\xaa\xd3\xf9\xbc\xa9\x50\x5e\x81\
\xfe\x16\x1d\xa9\x39\x1c\x6f\x91\xd4\x5b\xb6\xb8\x9f\xf2\x43\x2c\
\x2e\x9a\xcc\x2b\x9a\xcf\x24\x88\xed\x49\xdc\xa6\x4e\x9d\x0a\x08\
\x00\xc6\x39\xbc\xfe\x22\x81\x22\x8c\x62\x7e\x8b\x82\x1d\x8a\xd5\
\xcd\x02\x84\x54\x73\xc2\x1e\xb3\xec\x5b\xb1\x06\xe8\x0b\x45\xa1\
\xb2\x0b\x58\x00\x85\x0f\x74\xfe\x0e\x60\x08\xef\x63\x52\x3c\x4a\
\x9b\x6a\x51\xd7\x26\x03\x65\xed\xc5\x88\x25\x8e\x21\xaa\x5c\x61\
\x32\xbf\x9b\x52\x93\x32\x38\x8f\x84\x11\x9f\x7a\xc4\x00\xf9\x17\
\xd9\x31\x8b\xcf\x0f\xc1\xb3\x58\x09\x5a\x0d\x7d\x61\xa2\x1c\x57\
\x56\x56\x92\xe8\xe1\xec\xd1\x30\x34\xc4\x65\xce\x58\x52\x53\x6b\
\xe1\x18\x67\xfa\xa8\x69\xe2\x44\xb3\xb2\x1b\xcd\x32\xfe\x73\xa4\
\xc1\xf8\xc3\x1f\xfe\xd0\x74\xab\x60\xa9\x0b\xb9\x6d\x1e\x0d\x92\
\x50\x24\x2f\x20\xd0\x27\x73\x23\xb4\x6f\x2c\xff\x74\xee\xa0\x90\
\x8a\xb3\x14\x56\x73\xa6\xa6\xcf\x9e\xa1\x29\x73\x6e\x56\xdc\xd0\
\xf3\x84\xb5\xc4\x0c\x3c\x57\x01\x1d\x03\x99\x37\x34\x0d\x73\xdc\
\x69\x34\x5f\xe4\xdb\x88\x7f\x98\xb7\xdb\xc1\xa5\x2c\x28\xe1\x20\
\x83\xc5\x9c\x19\x30\x93\xf9\xe4\x93\x4f\xdc\xbe\x03\x40\xb9\x01\
\x35\x15\x26\x7a\x90\xc0\xdd\xad\x5a\xb5\x9a\x72\x14\x2b\x0d\x84\
\xf0\xfd\x4d\xd6\x98\x75\xae\xc7\x12\x88\x06\x59\x78\x8e\xb1\x7c\
\x68\x0d\x4a\x76\x7b\x37\xf5\xf5\xf5\xaa\xab\xad\x57\xb5\x25\x82\
\x95\x96\x24\xd6\x0d\xeb\xa9\xde\x03\xab\x55\xdb\xbb\x46\x75\xd5\
\xfc\xcd\xa4\x0e\xa9\x13\x52\x5e\x5e\x7e\x58\x23\x2e\x40\xe9\x66\
\xbc\x7e\x13\x26\x8b\x25\x58\x22\x08\x18\x2e\xee\x16\x9f\x53\xee\
\x60\xc0\x44\x27\x76\xfe\xaf\x95\xa9\xcd\x65\x1f\x07\x2b\xc6\x9a\
\xc9\x77\x06\x0e\x1c\xa8\x49\x93\x26\xa1\x60\xde\xa8\x0b\x25\x22\
\x92\x42\x39\xff\x19\x7d\x01\xc9\x22\xb5\x2e\xaf\x54\xaf\x33\x1a\
\x9b\x1a\xa1\xbc\x3e\x24\xa6\x99\xee\xdc\xc3\x0b\x88\x69\xc9\x7c\
\xe3\x44\xb8\xb4\x01\x87\xec\x32\x59\x1c\x31\x1b\x50\x58\x03\x61\
\x21\xd1\x17\x61\x25\x1a\x85\xa0\x65\xf6\x4a\xe4\xe1\xe8\xcb\x45\
\x64\xd0\x18\x83\x3c\xa1\x99\x15\xac\x8f\x31\x7f\xf2\x99\xcd\xeb\
\x53\xf3\x87\xdf\x51\x72\xc1\x1a\x18\x2f\x4c\x80\x9f\x61\x0e\x30\
\x04\xd2\x2a\xf6\x5c\x0d\xbc\x2d\x4c\xc3\x37\xf4\xd6\x7b\xdf\xbc\
\xa9\x85\x4f\x8f\xd6\x88\x5d\xc5\x1a\x76\x5b\xac\x4a\xaf\x8f\xb2\
\xca\x70\x8a\xdb\x98\x3b\xbc\x80\xd8\x00\x97\xe0\xd8\xe0\x52\xcc\
\x98\x01\xa2\x31\x63\xc7\x8e\xd5\xbc\x79\xf3\xf0\x05\x2c\x30\x42\
\x18\x7c\x90\xef\x70\xd4\xc5\x2b\xda\xe5\x04\xea\xb3\xac\xfa\xf8\
\x66\xba\xb5\x43\xbe\xb5\x97\x40\x03\x9f\x88\xc5\xc0\x04\x6e\x5e\
\x28\x25\xd1\x64\x92\x27\x51\xf1\x91\x89\xa6\xa0\x51\xea\x16\xd1\
\x55\x91\x5d\x62\x15\x13\x1e\xaf\xd8\x08\x8f\xe2\x63\x0e\xea\x64\
\x69\x00\x18\x03\x3b\xe4\x70\xd0\xd5\xc9\xe6\x74\x37\xa2\xf9\xf0\
\x3d\x54\xc5\xa0\xd0\x18\xf8\x91\xba\xd0\xe2\xc5\x8b\x35\x68\xd0\
\x20\x8a\x6b\x84\x84\x0c\x00\x0d\xc2\x62\x88\xf1\x01\x90\xca\x2d\
\x13\x81\x8b\x77\x1e\x73\xcc\x31\x5d\x6d\xc2\x17\xdb\x24\x7f\xdf\
\x8c\x37\xdb\xee\x44\x71\x50\x40\x2a\xd8\xe3\xc6\x8d\xc3\x9f\x00\
\x0e\x9f\x79\xcb\xee\x21\x49\x3d\x94\x35\xff\x12\xd5\x6c\x09\x54\
\xc5\xaa\x20\x95\x4d\x0d\x37\xe7\x1e\xa7\xdc\xea\x14\xe5\x94\xa4\
\xbb\xf3\x60\x12\xe6\x1e\xe0\xf3\xc0\x2c\x1a\xfa\x7f\x00\xe1\x8a\
\x7c\x38\x37\x97\xc8\x61\x0d\x0c\xf0\x17\x0a\xe7\x63\x19\x8b\x5a\
\x48\x5b\x6b\xbc\x81\x52\x6a\x8e\x7e\x16\x4e\x99\x8d\x32\x80\x70\
\x85\x4f\x47\x5b\xe7\xc5\x06\xa9\xef\xb6\x30\x55\xae\x09\xd5\x9c\
\xfb\xc7\x6a\xd8\xf6\x24\xd5\x6e\x08\x35\x09\x53\xd9\xa4\x70\x25\
\x27\x34\xd2\x16\x0a\x79\x78\x00\xb1\xf0\x76\x89\xcb\x9e\x71\x78\
\xe4\x1a\xec\xd4\x91\x1c\xe2\x47\x70\xec\x38\x7a\x5e\xb1\x00\xd7\
\x8c\xd0\x24\xb4\x6d\x60\x22\x26\x0c\x6a\x6d\x0b\x00\xc3\x7b\x6c\
\xb4\x5a\x84\xcf\x64\xee\xcc\x8b\x39\xc1\x12\xcc\x15\xa6\xb8\xc8\
\x42\xe0\xae\x89\x1d\xd5\x35\xba\xab\xb2\xb2\xb3\x94\x99\x93\xae\
\xb0\xb8\x40\x45\x24\x19\x35\x47\x35\x2a\x21\x02\x6d\xf3\xdd\xe7\
\x2c\xe1\x4c\xf0\x69\x70\xf6\xc3\x0b\xe1\x4e\x97\x4b\xe0\x3b\xa8\
\xfd\x3f\xf7\xdc\x73\x62\x73\x68\xe4\xc8\x91\xc2\x1c\xaf\xbd\xf6\
\x5a\x0d\x1d\x3a\x94\xe2\x21\x55\x5c\x57\x74\xa4\x75\x86\xef\x01\
\xa4\xb7\xfa\x69\x11\x4c\x4b\x30\x12\x57\x2a\x2a\x77\xe5\x7f\xcb\
\xbe\x1b\x38\xa6\x44\xc4\x9a\xa0\x94\x6c\x27\xe7\x15\xe5\x2b\x6f\
\x4c\xac\xaa\x27\x27\xaa\x72\x7c\xba\x7a\x0e\xcd\x51\x59\x4d\xa1\
\x8a\xf3\x4b\x55\x5c\x58\x2a\xce\x41\xa0\x74\xbb\x5e\xf1\x21\x0f\
\xc6\xd0\x3c\xc3\xcc\xf3\x2d\xc0\xc0\x09\x83\x34\x56\x41\xae\x81\
\x5f\x40\x6b\x9c\x06\xb8\xde\xd9\xa6\x85\x43\xbe\xc7\xe0\x29\x47\
\xb7\xd0\x5e\xe4\x76\x56\x35\x0e\xb7\xd7\x05\xb3\x66\xcd\x12\x80\
\x10\x12\x33\x37\xc2\xd9\x0c\x8b\x2e\x83\xd3\xba\xa9\x6e\x63\xa8\
\x2a\xd7\x06\x69\xe0\xd6\x18\xd5\x6e\x0a\x51\xdd\xd6\x60\xd5\x6c\
\x0e\x52\xc9\xb5\x91\x4a\x4b\x4d\x97\xdb\x55\xb5\xeb\x1c\x3a\x20\
\xd6\x0c\x5d\x02\xcd\xb0\xf0\x00\xc1\x82\xb3\xaf\x41\x38\xcb\x66\
\xd0\x2f\x15\x28\xaf\x25\x02\xd2\x24\x79\x1c\xe7\x14\xd3\x01\x82\
\x6f\x48\xb6\xa0\x25\x30\x35\xcc\x00\xe9\xa2\xab\xd7\x8c\xd6\xbe\
\x0f\xde\xd4\x81\xcf\x5e\x56\xc2\x4d\x01\x1a\x7f\x6b\x89\x4a\x27\
\x44\x28\xd1\x93\x44\x70\x43\xc0\xc3\x56\xaf\x4f\x80\x94\x31\x00\
\x04\x40\xb0\x84\x97\x5f\x7e\x59\x34\x13\x0c\x1f\x3e\x5c\x98\xe1\
\x9c\x39\x73\x44\x14\x41\x08\xcc\x0e\x20\xe5\x05\xf6\x19\x6e\xba\
\xe9\x26\xce\x87\x37\xdf\xb3\x4b\xfd\xde\xa8\xee\xb8\x16\x0e\xc8\
\x78\xa2\x47\x7c\x08\x94\x05\x85\x51\x6c\x44\xeb\x0b\x4d\xca\xcb\
\x4a\x54\x56\x54\xa1\xa2\x82\x62\x95\x95\xf4\x54\x91\x51\x77\x49\
\x59\xb1\x8a\x4b\x8a\x38\xc7\x09\x16\xb6\xcf\x17\xfe\xac\x65\x00\
\xae\x55\x07\x73\x25\x1f\x81\x0b\x89\xd1\x79\x0f\x1d\xb9\x88\xcb\
\x09\x56\xc5\x77\xfe\x67\x5d\xef\xfa\xc9\x6d\x14\xc1\x26\x55\xe6\
\x53\x3f\x23\xc0\x01\x14\x28\x0b\x49\xcf\xb4\x36\xa5\xca\xf3\x55\
\xb5\x39\x40\x15\xcb\x2c\x53\x9f\xd5\xcd\x12\xc3\x68\x15\x5d\x19\
\xaf\xfc\xbe\x49\x76\x4e\x63\xe8\xeb\xca\xf8\x87\xbe\x1e\x56\x63\
\x7a\x8e\x62\x1b\x11\x05\x49\x21\xe1\xaf\x2b\x59\x23\x1c\xbb\xd7\
\x1f\xf8\x1c\x69\x30\x40\xd0\x08\xbf\xf9\x87\xc5\x53\xee\x01\x10\
\x32\x70\x42\xdf\xb8\xe4\x04\x75\x1d\x79\xb1\xaa\x37\x04\x6b\xfc\
\xee\x0a\x4d\xb9\x77\x80\x6a\xcd\xa7\x0c\xd9\xee\x51\xfd\xae\x20\
\xa3\xb5\x54\xd7\xc3\x85\x12\xbf\xeb\x4b\x72\xc4\xcd\x2b\xc2\x3a\
\x68\xd3\xa1\x0f\xea\xfb\xef\xbf\xd7\xf5\xd7\x5f\x2f\xfa\x9f\xd8\
\x11\x1c\x3f\x7e\xbc\x48\x00\x27\x4f\x9e\x4c\x72\x48\x66\x0b\x10\
\xf8\x1b\xe2\xf6\xde\xfe\x76\x7f\x8b\xd1\xf7\x7b\x80\x61\xe2\x76\
\x35\xad\xc1\xaf\x83\x2e\x4c\x3e\x53\x1d\x33\xcf\xd6\x35\x37\x8c\
\x93\xa7\x24\x46\xd9\xfd\xa3\xd5\xef\x8a\x7a\x85\xd7\x9c\x6d\x2c\
\xd1\xd1\x9d\x4b\xb8\xfc\x9d\x4f\x80\x10\x7b\x93\x10\x11\x47\xb3\
\xcf\xd1\xa7\x4f\x1f\x22\x06\xc4\x95\x05\xb0\x1c\x8e\x29\xbe\x41\
\x61\xae\xd1\xc1\x2f\x01\xb1\x0a\xc5\x50\xb3\x90\x0f\x70\xec\xd0\
\xb7\xeb\xf5\x4d\xcf\xcd\x54\xfe\xe8\x68\x15\x0f\x8f\x57\x61\x4d\
\x86\x72\xd3\x0b\x95\x9b\x99\x4f\x4f\xb0\x3b\x87\xb0\x59\x44\xa8\
\xbe\x00\xf2\x02\x80\xb8\x56\x50\xb4\xde\xf8\xd3\x45\x4e\x08\xc7\
\xae\x4d\xd4\x09\x66\xd9\x40\x1e\x62\xdf\xeb\xe7\x67\x37\xa5\xba\
\x72\xd2\x0b\x00\x82\x32\x52\x54\xcd\xb4\xaa\xee\x5f\x12\x4e\xb7\
\xca\x6f\x88\xaa\x36\x75\x56\xd9\xfa\x0b\x55\xbd\x39\xd0\xfc\x49\
\x88\x7a\xce\x0c\x57\x6e\x5d\x22\xe7\x21\x28\xab\x4f\x80\xbc\x0a\
\x18\xf4\xd4\xc2\x97\xd3\xa6\x4d\x13\x5c\x08\x25\xb1\x6b\x48\x94\
\xc1\xb1\x2b\xcb\x13\x1e\xbb\xd6\x51\xf3\x3f\xdb\x2c\x8f\x39\xcf\
\xcf\x0c\xc4\x25\xb6\x2f\x9a\x08\xbf\x4a\x54\x49\xbf\xef\x5f\x93\
\xce\x50\x9d\xe5\x1f\x73\x6f\x99\xae\x37\x3f\x7a\x55\x37\xdd\x59\
\xaf\x1b\x76\x0e\x51\x9f\xad\xdd\x95\x3b\x34\x9a\xb0\x97\x73\x59\
\x1f\x9f\x00\x79\x0d\xba\x22\xfb\x66\x03\x8a\xbd\x72\x28\x8b\x7d\
\x02\x8a\x6d\x74\x19\x62\x8a\xc3\x86\x0d\x23\x5b\xc7\x32\x44\x96\
\x0e\x57\x72\xff\x84\x1f\x62\xe1\x12\x66\x2c\x44\x68\x3b\xa0\xc4\
\xc5\xc4\xea\x1f\xe6\x43\xe2\x27\x9e\xab\x4f\xbf\xfc\x54\x65\x4b\
\x83\xc5\xbf\x03\x1f\xbc\xa3\x4d\xcf\xcf\x56\xe6\xc0\x70\xfa\xd1\
\x38\x17\x36\xf1\x0d\x10\x22\x2c\x68\x0a\x64\xa9\xdd\x20\x84\xb6\
\xd0\x97\x93\x26\xad\x3b\x9c\xe3\xd7\x80\xfc\xf1\x8f\x7f\x6c\x83\
\x53\x67\x4d\x60\x88\x88\xf0\x08\xb5\x4a\x39\x4b\xe1\x57\x9d\xad\
\xef\xf4\x95\x17\x90\x37\xdf\xdd\xa7\xc1\x3b\xe2\x94\xde\xaf\xf1\
\x3c\xce\xc7\x17\xfb\x06\x88\x45\x15\x38\x2f\x9c\x38\x17\x05\x61\
\x34\x03\x50\x70\x50\xbc\x07\x08\x7c\x87\xeb\x42\x21\xff\xf8\xc6\
\x00\xa9\xf2\x57\x40\xc8\x99\x01\x84\xb9\xa3\xac\xb4\x07\xb5\x49\
\x3b\x47\x89\x93\x2e\xd4\x97\x5f\x7f\xa5\xe2\xc5\x9d\xc5\xbf\x37\
\xde\x7d\x43\x03\xb6\x45\x29\xb5\x77\x17\xb9\x02\x2d\xeb\xe6\xcb\
\xfd\x1e\xaf\xd1\xca\xf3\xe0\x83\x0f\x8a\x7f\x9b\x37\x6f\x26\x0b\
\x27\x23\xa7\x0c\x40\x4b\x28\xa1\x2e\xfc\x08\x68\xd0\x96\xab\x6b\
\x3d\xeb\xe7\x4f\x46\x38\xc1\x28\x8b\x05\x6e\x40\x19\xb9\xe7\xbd\
\x6d\xc6\xf9\xea\xbf\xbd\xbb\xd6\xdc\xbd\x5a\x8f\xbd\xfe\x80\xd6\
\x3d\x3d\x5d\xab\x1e\x98\xa7\x8d\xcf\xcd\x92\xa7\x26\xc8\xb1\x07\
\xe2\x1b\x20\xd0\x0f\x7e\x82\xec\xdc\xdd\xa6\xec\xcc\x0f\xab\x30\
\xe4\x31\x43\x47\x65\xae\xb0\xf8\x9c\xbf\x22\xe1\x58\xcb\x36\xd8\
\xde\xa7\x12\xd1\xd8\xd9\x6f\xb7\xd9\x9d\xec\x39\x45\x35\x1b\x83\
\x55\xb0\xe4\x22\xdd\xf9\xf8\x03\x9a\x7c\xcf\x00\x55\xae\x0b\x54\
\xed\x96\x60\x05\x25\x9f\xe3\x5a\x55\xc9\x43\x7c\x03\x84\x05\x87\
\x8a\x5c\xcb\xa8\x01\xe1\xba\xd2\xdd\xb1\xcb\xce\x79\xef\xda\x2f\
\x3f\x36\xeb\x48\xf4\x63\x40\xa0\xf3\xfd\xd0\x33\x0b\x7d\xfa\x69\
\xa7\xeb\xe4\xf8\x53\xd4\x7b\x47\x88\x68\x31\xcd\x99\xd8\x49\xb1\
\x75\x1d\xd4\x39\xee\x1c\x5d\x18\x7c\x96\x01\x76\xba\xb7\x19\x90\
\x34\xc2\x97\x9b\x38\x5f\x23\x19\x84\xa2\x68\x09\x65\x63\x8a\x70\
\x97\x48\x0a\x8b\x61\x17\x0c\x01\x28\x6a\x57\x6c\xf3\xb2\x31\xc5\
\xae\xa2\x01\x32\xd6\x0f\x71\x20\x31\x74\x7b\xee\xfb\x60\x03\xb4\
\x1e\x4b\x39\xe7\x5c\xbb\x47\x32\xe0\x7c\x9d\x79\xaa\xdd\xbf\x72\
\xba\x75\x65\x9e\xe9\xed\x88\x77\x8d\xe4\xe4\x65\x28\xeb\xa1\x03\
\x62\x17\x79\x8d\x0d\xa8\x97\x5e\x7a\x49\x2f\xbe\xf8\xa2\xc6\x8c\
\x19\x03\x18\x00\x83\x2f\xc1\x77\xf0\x1e\xff\x01\x9f\xe2\x43\xb0\
\x24\x06\xe9\xd7\x51\x96\xf5\x02\xcc\xb1\x5c\xeb\x5b\x02\x9e\x75\
\xeb\xd6\x89\x8d\x3a\xaa\xdd\x6c\xc6\xb1\x79\x87\x7f\x45\x71\xa9\
\x5e\x70\xcc\xba\xc0\x22\x54\x31\x50\x5c\x5f\x76\x0b\x5f\xc3\x3f\
\x10\xda\xfe\x42\x81\xc2\x28\xd5\xfb\x35\x20\x06\xc6\x8b\x2c\x34\
\x75\x3e\x1a\xb4\x69\x17\xa2\x69\x9b\x7d\xf7\x89\x13\x27\x8a\x9d\
\x53\x92\x68\x14\x96\xda\x1e\xb7\xe2\xdd\x7c\xf3\xcd\xde\x4a\xb8\
\x2f\x1d\x27\xaf\x62\x72\xf0\x1e\xa6\x86\x60\x76\x68\x06\x56\x80\
\x29\xba\x76\x52\x38\xd2\x00\x71\x77\x24\xe1\xf0\xbf\xb1\x01\xcf\
\xf1\x47\x40\x6c\xae\x2f\x52\x9d\x40\x59\xb1\x04\x16\xdf\xd5\xf6\
\xf8\x9c\xe0\x07\x30\xdc\x36\x36\xc2\x31\xeb\x83\xdf\x39\xe4\x1f\
\xb6\x05\x7f\x7e\xcf\x9e\x3d\xa2\xd2\xcb\x3f\xba\xc3\xa1\x2d\xfc\
\xca\x82\x05\x0b\x44\xd6\x7e\xf9\xe5\x97\x53\xe7\x07\x00\x06\x87\
\x85\xd0\xc3\xc4\x60\x01\x70\xa1\x1f\x82\xd1\xcb\x98\xe3\x7d\x77\
\x4f\x4c\x58\x68\x98\x51\x51\x70\xd3\x5d\x52\xd6\xa3\x69\x7d\x8f\
\x57\x40\xf1\x89\xb2\x9e\xe7\x42\xf4\x5e\x61\x76\x14\xc7\xa8\x65\
\x81\x3e\xd1\x14\xa5\x00\x1c\x39\xe7\x00\x80\xdb\xda\xb5\x60\x00\
\xf3\xc4\xa4\xe7\xfb\x5b\x3e\x62\x0a\xf7\xb6\x7b\xf6\x49\xa7\xa0\
\x00\x75\x48\x3a\x5b\x61\x19\x17\x28\x34\xa6\xa3\x42\xc2\x3b\x2b\
\x28\xd4\xaa\x19\x01\x16\xea\x06\x06\xe1\x33\x5c\x73\x9d\x63\x17\
\xfa\xd9\x7c\x03\x84\x85\xc7\xdc\x7e\xa1\x50\xb3\x01\x10\xf2\x11\
\x1c\x18\xd1\x85\x5f\x3d\xa9\xcd\x14\xef\x3d\xf7\xd4\x87\x76\xd1\
\xe7\xab\xf7\x16\x7a\xb0\x0e\x96\xea\x35\xa1\x2a\xbc\x3c\xce\x14\
\x38\x43\xee\x5c\x68\xcb\x94\x14\xf6\x78\xd7\x97\x7b\x28\xc6\x53\
\xc3\x87\x23\xb9\x59\x12\x7a\xc2\x99\xe1\xb4\x58\xfc\x51\xa3\x46\
\x11\xf6\x72\xcc\xbd\xe1\x68\x0d\x3e\x85\xd0\x17\xc1\x5a\xe0\x53\
\xbf\x00\xc4\x94\xec\x44\x0a\xbd\x36\xaf\x0f\x29\x1d\xa1\xa8\xff\
\xe8\x72\xa6\x4a\xe7\x74\x52\xdd\xc4\x32\xdd\xfe\xe0\x1e\xdd\xb0\
\x61\xb0\xea\x67\x65\xab\x62\x6e\x77\xa5\x0c\x0a\x51\x8f\x88\x48\
\x14\x14\x56\x71\xfd\xcd\xbe\x01\x62\xed\x2f\xa5\xa0\xfb\xe4\x93\
\x4f\xea\xdb\x6f\xbf\xd5\xdc\xb9\x73\xdd\xae\x20\x00\x50\xf5\xa5\
\x9d\xd4\x95\x95\x89\x32\x5c\x91\x11\x13\xc5\x54\x09\xf7\xde\xf1\
\x83\x87\x4c\x92\x77\x0c\x62\x61\xf1\x97\x3c\xa9\x2e\xd6\xb6\x6d\
\x73\x7a\x25\xe1\xcc\x95\x12\x9f\x2e\x4f\xac\x95\xe1\x3d\x3c\x82\
\xd0\x36\xef\x92\xcd\x32\x52\x33\x58\x23\xac\x02\x67\xef\x6e\xe7\
\x6b\xb0\x92\xcb\x67\xbe\x00\x52\xf6\xbf\x8e\x3b\xa2\x03\xd7\x4c\
\xed\xe4\x87\x7a\xb0\x38\x07\x21\x21\xe2\x15\xff\xf2\xa5\x7d\x77\
\xae\x81\x71\x45\x4b\x6f\x03\x72\xb7\x16\x24\xa4\x27\xab\xc7\x35\
\x17\x72\xb3\xa7\xaa\xb7\x04\xaa\x6a\x7d\xa0\x7a\xcd\x8c\x50\xb1\
\x35\xcb\xe5\xd7\x27\x1b\x60\x59\x9c\x07\x9b\x88\xf5\xc3\xd7\x12\
\x85\x62\x25\x96\xe1\xe7\x98\xb5\x1d\x7a\x5f\x16\x8b\xfa\x2b\x7a\
\xb0\x70\xec\xee\x51\x4a\xd0\x17\x80\xd0\x2e\xe4\x17\x7d\x59\x04\
\x33\x58\x49\x54\x62\x9c\x32\xa6\x07\xa8\xd7\xea\x00\x2d\xbb\x6f\
\xa6\xd6\x3c\x39\x99\x5e\x5e\xf5\xdb\x18\xa9\xaa\xd5\xa1\x4a\x8a\
\x4f\x75\x4f\x44\x45\x29\xf1\x1f\xf8\x52\xd6\x87\x8d\x3b\x9f\x3a\
\x17\xe9\xa7\x5a\x45\xb2\xc3\x6d\x07\x3c\x3b\x04\x3f\x02\x4d\xcd\
\x9c\x39\x53\xd3\xa7\x4f\xe7\x47\xf1\x25\x98\x2e\x82\x56\x30\x08\
\x1c\x3b\xb4\x85\x56\xb0\xb3\x08\x20\x7b\x5a\x22\x75\x1d\x7f\xfc\
\xf1\x39\x36\x27\x77\xb3\x67\x43\x56\x56\x8e\x72\xcb\xd3\x95\x9d\
\x5a\xa0\x9c\xb4\x7c\x65\x67\xe4\x28\x3b\xd3\x3e\xcb\xce\x73\x82\
\x5f\x25\x1f\xa3\x82\x01\x83\xa0\x94\xee\x09\x13\x58\x48\xb1\xaf\
\x71\xf7\x12\xfc\xc3\xd2\xa5\x4b\x79\xe8\x0b\xc8\xb3\xf0\x00\x81\
\xef\x60\x5f\x99\x30\x18\xe7\x0d\x10\x0c\x02\xfa\x72\xb7\x46\xe3\
\xc8\x38\x97\x01\x7e\x64\x7f\xbb\xdb\x72\x9b\x51\x2d\xc9\x32\x8e\
\x3d\xf6\xd8\x12\x16\xb8\x91\x82\x0c\x8c\xce\x83\xce\xa7\x4d\x54\
\xd5\xd6\x83\xd5\x73\x49\x90\x7a\xce\x88\x50\xe9\x75\xd1\x2a\xba\
\xdc\xa3\xac\x9c\x0c\x47\x55\x58\x04\x29\x01\x6b\x00\x53\x20\x80\
\xc2\xed\xd8\x5d\x7c\xbd\x59\x67\x19\x34\xe4\x7a\x77\x79\x75\x3e\
\xa4\xa9\x4f\xc1\x89\x23\x80\x40\xb6\x8e\x1f\xe1\x7c\x40\x42\x43\
\xe8\xdc\xe3\x36\xe1\x16\x52\x44\xfc\x9b\x59\x78\xa0\x2d\xe0\x48\
\xfa\xa9\xb8\x5d\x2f\x21\xd6\xa3\xa0\x3e\x17\xa8\x70\x5a\x90\x7a\
\xcf\xc8\xd5\xa8\xe5\x95\x76\x1c\xa0\xa2\xe9\x41\x2a\x9e\x11\x2c\
\x4f\x6a\x0c\xe7\xa1\x80\x00\x82\x45\x00\x08\xbe\xd5\x75\xe1\xd4\
\x1e\x8e\x4e\x8b\x10\xbb\xbf\xf0\x2e\xa8\x6a\xc0\x80\x01\x5a\xb9\
\x72\x25\xe6\x0b\x0d\x41\x5d\x58\x0b\xfb\x25\x44\x5e\x50\x14\x03\
\xa2\x98\xc6\x20\x28\x37\xf3\x19\x9a\x82\x5f\x21\xea\x00\xc4\x96\
\x00\x08\x5b\x0f\x97\xd3\x4b\xc0\xbd\x21\x48\x41\x59\xa1\xca\x07\
\xe5\xaa\xac\xbc\x8c\xf7\x3f\x28\xac\x01\xca\x0b\x73\x18\x13\xe0\
\xd0\x01\x04\xda\x42\x61\x7d\x6f\x8b\xb2\x87\xb9\xb8\x2e\x8b\xc5\
\x80\x40\xa3\x03\xbb\x87\xf4\xf5\x92\xb9\x03\x12\x4d\x72\xdc\x87\
\x47\x6e\x02\x48\x44\x14\x84\xc3\x44\x65\xf0\x28\x80\x40\x79\x36\
\x18\xe8\x0e\xad\xe1\x71\xe2\xfb\xcd\xd1\x1d\x30\xb0\x9b\xd3\xa3\
\xf1\x9c\x6f\x73\x8a\x38\x12\xfa\xa1\x05\x94\xa8\x2a\x61\x52\x07\
\x35\x3e\x4a\x63\x6b\xa8\xea\x36\x74\x51\xcd\xaa\x6e\xaa\x5e\x12\
\xae\x9e\x13\xa3\x95\x99\x9e\xed\x1e\xa9\xe1\x9e\x42\xe1\x9e\x0d\
\xe6\x22\xcf\x06\x1c\xba\x6d\x4b\x0c\x38\x5c\xe5\xe6\x25\xee\x46\
\x4d\xb8\xd0\x6d\x47\x22\xee\x18\x00\xdc\x7b\x67\xa2\x0c\x8a\x04\
\x11\x0a\xe3\x18\xeb\x01\x1c\x38\x16\xcb\x32\x87\xd9\xaa\x19\xb6\
\x8a\x2e\x37\x7f\x78\xb7\x39\xe2\x37\x62\x6d\x61\x63\xa3\xe2\x14\
\xd9\xc3\x5e\xb3\x22\x94\x58\xd8\xc3\x24\x02\xe1\xd8\x2b\xe1\xdd\
\x23\xdc\xcd\x9d\x2e\xf7\x62\xbe\xde\x48\x93\xf9\xdb\x7f\x32\x70\
\x86\x45\x58\x27\x1e\xae\x67\x63\x4d\xb0\x8b\xbe\x41\xc4\x44\x92\
\xc3\x8f\xe3\xc4\x89\x1e\xa0\x23\xcc\x94\xf7\x70\x26\x9a\xc0\x80\
\x2c\x9b\xc5\x6c\x01\xc4\x0d\x8e\xcf\x38\x17\x30\x08\x04\xf0\x2b\
\x97\xd9\x80\xfb\x18\x4f\x87\x37\x07\xab\xe0\x9f\xcd\xf1\x7d\x14\
\x06\x89\xca\xf0\x28\xfd\xc6\x60\xf5\x9a\x1d\xae\xd2\x71\xf1\x2a\
\xec\x93\xa6\x82\xaa\x4c\xe5\x95\x66\x29\x37\x3f\x47\xee\x09\xdd\
\x08\x19\x39\x01\x0e\x8a\xc8\x3a\x31\x5f\x58\x81\x1c\x8e\xf7\x47\
\xa4\x3f\x0c\x0b\x61\xb3\x8a\xfa\x3f\x0f\x9f\xa4\x54\xb2\x7c\xf9\
\x72\xaa\xbc\xf4\x66\x41\x61\x58\x01\x15\x61\xc2\x62\x34\x03\x40\
\xdc\x63\xfd\xbc\x8f\xfc\xc3\xc9\x03\x2c\x66\xce\x5d\x48\xc6\xd7\
\x53\x8f\x32\x55\x9d\x62\xd2\xc3\xa8\x39\xc8\x14\xe4\x63\xf7\x00\
\x84\xb6\x61\x67\x29\x7f\x42\x27\xe5\xdc\xd0\x54\x3a\xea\xff\xb8\
\xbb\x07\xd8\xc9\x96\x2c\x8e\xe3\xd9\x68\x1d\xad\x6d\xdb\xb6\x6d\
\xdb\xb6\x6d\xdb\x0e\xd6\xb6\x6d\xdb\x56\xb4\xc1\xda\x0f\x7d\xdf\
\xf9\x64\xf2\x9d\x8c\x67\x1e\xfe\xec\xe4\x97\xee\xbe\xa8\x7b\xeb\
\xa8\x4e\xe1\x9c\xba\xfe\xfd\x64\x9e\xbb\x16\xad\xd0\x88\x97\x75\
\xb5\x30\x3e\xf5\x65\xae\x31\x88\x20\xae\x0d\x43\x10\x94\x46\xb0\
\xad\x2d\x6b\x81\x5d\x43\x12\x76\x4d\xa5\x51\x22\x64\x2f\x66\x5a\
\xd7\x39\x50\x59\x76\x96\x96\x70\x29\xa7\xd3\xf5\xa6\x49\x77\x71\
\x8e\xc1\x39\xc7\x3c\x9e\x74\x03\x86\xd4\xdf\x90\xa4\xdf\xea\x16\
\xb7\x5a\xae\x7e\x8b\x6b\x2e\xb7\x7e\xc8\x75\x96\x5b\xde\x76\xda\
\x90\xeb\x4f\x4e\xf8\x1b\xdd\xb2\xbc\xf0\x41\xee\x78\x75\xd1\x80\
\xb3\x18\x65\x01\xc2\x8c\xb2\xe8\x35\xe2\xfd\xb9\xa9\xd3\x55\xd6\
\x84\x21\x11\x7c\xda\x8b\x95\x76\x82\x2b\xeb\x1b\xe1\xcb\x8d\x5b\
\x7b\x82\x29\xe0\x05\xdd\x37\xd2\x47\x6a\x9c\x2f\xd1\x0c\x35\xd7\
\x68\x2a\x6b\x29\x15\xf9\x30\xe7\xce\xeb\xa4\x15\xa7\x9b\xf8\x97\
\x3b\xcc\x68\xc4\x2d\xc7\x64\x7e\xa5\x91\x86\xf3\x5e\x7c\xc2\xf4\
\xee\x76\xe6\xe5\x9a\x8f\x3e\xd7\x72\xcd\xc7\x9e\x73\xb9\xc6\xc3\
\xcf\xb5\x5c\x75\x72\xc4\x5f\xf1\x16\xd3\xc1\xbb\xc6\xc5\xe5\x93\
\x2f\xf5\x9f\xfa\x11\x50\x56\xa1\xf8\x7c\xf5\x2d\x13\xaa\xf3\xe8\
\xb2\x66\x73\x42\xc7\xd3\x4f\x9a\xf1\xad\x67\xf0\xa2\x4c\x5a\xf1\
\xba\x24\x9e\xd4\xbf\x10\x8e\x20\x34\x81\xfb\xf7\xa0\x07\x3d\x88\
\xb4\x94\x17\xc4\x8b\x16\x75\x54\xbe\xc4\x66\xd3\x78\x5e\x5e\x5a\
\x25\xb9\xcb\x82\x43\x9f\x20\x67\xfc\xa8\xfe\xc5\xa6\x2f\x70\xa2\
\x35\x74\x6b\x1f\xc9\x4d\xf5\x7e\x70\xd7\xfb\xdf\x75\x79\xc0\x13\
\xee\xb6\x3c\xe0\x51\xf7\x5c\xee\xf7\xe0\x7b\xef\x38\x7e\xef\xfb\
\xed\x80\xdc\x25\x83\x07\xdc\xff\x81\x84\x89\x46\x40\x6b\x99\x69\
\xbc\xba\x69\x33\x0b\xfd\x26\x7c\x18\xec\xf7\x9b\xd7\x7a\x05\xf8\
\x53\xd8\x44\x7d\x92\x96\xd7\xfb\x5f\x9a\xbf\x3d\xd7\x6a\x95\xcd\
\xc1\x2c\x22\xc9\x61\x63\x4b\x21\x0b\x85\x89\x61\xcc\xf4\x67\x94\
\xa7\x5d\xd1\xc7\xd1\xb9\xbc\xd8\x71\xac\x15\x37\x19\x3c\x65\xf0\
\xc4\xd1\x8a\x2f\x10\x14\xef\x79\xe1\x8b\x8f\x89\xbd\xd9\xd9\xf7\
\xca\xfd\x7e\xb9\x5b\x9c\x7f\xb9\xe4\xe5\x2e\xc6\x79\x21\x30\x4c\
\x1a\x22\xd3\x6c\x50\x0f\x26\x9b\x13\x83\x01\x80\x49\xae\x29\x5d\
\xd5\x2f\x46\xa8\x9e\xb4\xd6\x0c\x79\xfa\x34\xc8\x25\x12\x58\x95\
\x8c\x18\x33\x48\xcb\x1e\xc8\xe3\xd2\x8e\x60\x4e\xd9\x1c\xca\x6d\
\xc8\x74\x31\x7d\xd4\x9e\xf9\x52\x16\x73\x60\x40\x52\xf9\x7f\x98\
\xb6\xe6\x17\x73\xcd\xaf\x87\x88\x0f\x38\x96\xe9\x6c\x85\xa5\x7d\
\x54\xbf\x89\x13\x42\xab\xaf\x75\xbf\xab\xac\xee\xf1\xd2\xd9\xcf\
\xea\x31\x13\x21\x7b\xcf\x59\x29\x72\x9b\xdb\xc1\x9c\xbf\xdd\x72\
\x9b\x5b\xdf\x46\x2e\xf8\xe5\x2e\x77\xbe\x8b\xf7\xd3\xde\x2d\xcd\
\xa3\x17\x21\xd6\xb0\x08\x01\xa4\x21\xc5\xd1\x94\x59\x7b\x3d\xc6\
\xee\xcc\x0b\x9c\x7d\x82\xe9\xaf\x35\x95\x7b\x2b\x89\x7e\xd5\xab\
\x5e\x25\xb2\x6a\x79\xec\x63\x1f\x6b\x8e\xdd\x4a\xf9\x5e\x5e\x46\
\x39\x92\xcf\x64\x39\xbe\x6b\xb6\xd2\xda\x22\x8c\xe4\x85\x65\xbe\
\x54\xc8\xfd\xb4\x85\x59\x28\xd0\x85\xb4\xbd\x67\xec\xfd\x6d\x07\
\xb7\x1f\x73\x73\xb3\x43\x7d\xdf\x11\x84\xcf\x17\x0a\xa0\xfc\xab\
\x5f\xef\x6a\xab\xeb\xde\xf4\x1a\x3c\x3d\xd1\xb1\x33\x8f\x71\x3d\
\xf9\xdc\x97\xeb\x5e\xe7\xba\xb4\x15\x68\x29\x10\x0c\xcc\xf3\xce\
\x9c\x90\xb6\xc8\x20\x34\x80\x09\xb4\x98\x86\x97\x30\xb3\x3a\xc5\
\x90\xf5\x1d\x8e\x26\x21\x7a\xec\x26\xad\x4a\xd3\x0d\x2d\x33\xcd\
\x09\x80\x92\x1e\xfb\xa6\x25\xc3\xa4\x2a\xc3\xf6\x66\xbe\x68\x91\
\xca\x23\x06\xb3\xd0\x28\xf2\x6a\xca\xcf\xde\x63\xd8\xbf\xa7\xe2\
\xf7\x83\xb9\xff\x81\xc3\xac\x0b\xcf\x28\xea\xa9\x60\x04\xe6\xd2\
\x75\x46\x83\x32\xc3\x45\x2e\x7c\x11\xf0\x2e\x79\x87\x49\xb8\x67\
\xb6\xa3\x03\x0d\xf0\x1c\x8e\x46\x02\xa3\x8e\x0d\x83\x40\xfd\x2b\
\x8c\xf0\x5e\x4d\xcc\xfd\x65\xae\xff\xfe\x38\x31\xdf\x5e\x77\x86\
\x24\x35\x87\x0a\x52\xa9\xd2\xb4\x07\x11\x98\x2e\xc4\x4b\x63\x78\
\x2c\x4c\x00\x62\xd0\x0e\x04\x40\x14\xa0\x39\xec\x38\x28\x6b\xd7\
\x30\x31\x0c\x8d\xe0\xbc\x1b\xd9\x26\xc6\x76\x87\x20\x9f\x23\x53\
\xd8\x76\x13\x4c\x69\x1d\x3b\xcf\xcc\x85\xf5\x3c\x02\xd2\xf4\x2b\
\x53\xd4\xea\x7e\x04\xf7\xbe\xe0\xb9\x98\x85\x49\xee\xc3\x90\x0d\
\xcb\xe5\xc2\x1e\x9f\x72\xda\x93\xf3\xcf\xf0\xc7\xed\x10\xeb\x63\
\x1f\xfb\x98\x74\xde\x3c\x2d\x52\xa5\x92\x18\xa0\xf1\xd7\x09\x54\
\x19\x52\xc6\x93\xaa\x47\x5f\x23\xc8\x03\x6b\x30\xae\x9e\x2d\x62\
\xb8\x0e\x93\x68\x0c\x62\x47\x2c\xe7\x48\x31\xd0\x1e\x1a\x89\xc8\
\xfa\x34\x4c\x8d\xf6\xc1\x6f\x53\xca\x9e\xc9\x5c\x02\xc2\xe9\x5d\
\xb7\x83\x5b\xd1\x5f\xe0\x59\x99\x25\xe6\x87\x80\xb8\xd7\x7f\x9a\
\xe0\x7f\x6e\x2d\xa6\x83\xfb\x95\xd3\xcc\xa8\xff\x04\xea\x4d\x1b\
\x1d\xbc\x72\x99\x26\xf0\x11\xbe\xbc\x58\x88\xeb\xd8\x1e\xc3\xf5\
\xc5\xae\xab\x00\xe9\x2b\xb8\x87\xb6\xd4\xc6\xb8\x96\xf4\x2a\x27\
\xc7\xc0\x3d\x45\x21\xa5\x39\xc1\x75\x25\xa8\x74\x2d\x2d\xc2\x2c\
\x4c\x70\x2e\xb3\x47\x7a\x7d\x37\xa7\x53\xdc\x3d\xec\x36\xe4\x93\
\x69\x52\x2f\xbf\x11\xbf\x05\x83\x90\x8b\x6b\xe9\xa8\x7b\xd4\x55\
\xce\xc8\x39\xfe\xc2\x31\x55\x37\xdf\x50\x86\x4c\xee\xab\x4b\x0c\
\x21\xff\x3b\x2a\xfd\xd7\xa9\xc0\x61\xa4\x8a\x84\xa9\xa4\xdf\x55\
\xb2\xd5\xf2\x7e\xb7\xab\x4e\x59\xe7\x68\x48\x52\x68\x62\xcb\xef\
\x4c\x50\x41\xa6\x34\xa7\xf2\xda\xac\xab\x4e\x19\xc6\x7b\x1e\x93\
\x13\xa3\x4b\x65\x51\x6f\x1a\x98\x47\xdf\x04\xc7\x37\x42\xd6\x80\
\x17\xc0\xda\xa2\xb6\xda\x3c\xff\x09\x4b\x9b\x09\xb4\x0d\x07\xc6\
\x60\x4a\x5e\xd6\x1e\x33\xad\x9b\x66\x89\xfe\x67\x10\x4c\xa7\x91\
\xa9\x2a\x90\x87\x64\x97\x00\x2d\x37\xb9\x35\xc3\x7e\xb3\xc7\x7e\
\xab\x98\xfe\x8a\xca\xd7\xfb\x4f\xc3\x98\x8a\xa2\x7f\x0b\x81\x80\
\xa4\x59\xf9\x11\x16\x93\x3c\x53\x7b\xd5\x39\xc0\xd4\xfa\x4d\xfe\
\x2b\x57\x59\x90\xbb\x9a\x86\xb7\xa3\x03\xc2\xb7\x61\x0c\x30\x9b\
\x66\x4e\x4b\xb4\x83\xb9\xb4\x76\xd3\xa6\xc5\x63\x0a\x1a\xab\x1a\
\xac\x6a\x03\x00\x91\x42\xc0\x34\x84\x48\x83\x76\xd1\x90\x24\xb3\
\xc6\x33\xb3\xd7\x38\x1a\xb4\x95\x45\x84\x2d\x4e\x3e\xc7\x00\x53\
\x3b\x87\xa9\xbe\x77\x0d\xbf\xc3\x08\x28\x75\x5f\xb9\xbe\x30\xa4\
\x01\xc2\xd6\x33\x37\x0c\xa4\x2e\x34\x83\x20\x7d\x70\x86\x5f\xce\
\x31\x6e\xf8\x79\x37\x23\x3f\xa4\xe2\xb8\xd1\x2c\xae\x7b\xc0\x34\
\xf8\xf7\x9f\xdf\x7f\x57\x61\xa1\x0b\x79\x49\xf5\xdc\x1d\x6f\xdb\
\xa3\x24\x9f\x54\x23\x22\x8f\x86\x39\x41\x58\x04\x29\xfb\x69\x3b\
\x18\xa4\x4d\x01\x73\x94\x51\x9b\x95\xd6\xa5\x29\xce\xc5\x3c\xc4\
\xee\x3a\xff\x9d\xa3\x35\x99\x27\xcf\x48\x1b\xf2\xa2\x9c\x1b\x68\
\x2f\x12\x88\xdc\x7b\xef\xfa\x96\xad\x30\xfb\x59\x2f\xf9\x4f\xcc\
\x13\x49\x6a\xe0\x2d\xec\x2f\x97\x2f\xc9\xe3\x21\x25\xc5\xed\x8c\
\x80\x70\x99\x32\x04\x2b\x72\x09\x48\x70\xb6\xbc\x8e\x59\xa3\xcd\
\xcc\x59\x49\x3b\x07\x65\xbc\x53\x5e\xe3\x4d\x31\xc1\xff\x76\x01\
\x72\x9d\x32\x68\x30\x21\xf2\x0e\x4c\xb0\x32\x94\xf7\xc7\x71\xf9\
\xdf\x3d\x5a\xf1\xae\xa9\xe3\x5d\xb7\x0c\x43\xc6\xe7\xff\xba\x3d\
\x3e\x06\xbf\x9a\x46\x4e\x54\xae\x8a\x32\x49\x88\xdb\x37\x68\xd4\
\x83\x4a\x33\x79\x4c\x11\xa2\x6a\x98\x33\x21\x49\xae\xe3\x88\x09\
\x24\xdc\x48\x32\xa2\x2a\x93\x16\xc4\x2c\xd7\x29\xd3\x39\x40\xec\
\x98\xaa\x9c\x18\x00\x18\x59\xc7\x15\xd3\x5a\x41\x43\x30\x9c\xaf\
\x7d\xa9\x9f\xb1\xb5\x3f\x43\xcc\x5f\x91\xfa\x43\x80\x46\x1f\x34\
\x96\xbe\x49\x26\xe2\x20\x34\x82\x61\x80\xff\xb9\x9e\xed\xc6\x80\
\xb8\x18\xd4\xe4\x10\xb8\x0e\xc3\xfb\xef\x9e\xd0\x6c\x1e\x02\xd3\
\x00\x0c\x29\x5f\x7b\xa1\xdd\xcc\x52\x3b\xf8\xfc\x7d\xee\xb7\x77\
\xc9\x27\xa7\xbc\xfb\x6f\x75\x7e\x68\x5b\x1e\xcf\x4f\x1f\xbc\x70\
\xf0\x73\x84\x40\xe0\x83\x00\x51\xf2\xc6\x48\xad\x58\xc7\xec\xbf\
\xe3\x3c\x25\xc4\x43\x60\x8c\xc0\xa0\xb6\x5a\xaa\xa3\x99\xa7\x94\
\x76\x61\xb2\x6b\x6a\x4f\x4a\x2a\x46\x33\x79\x65\x6d\x7e\xb9\xaa\
\xb3\xc7\xec\x8d\x40\x7d\x6f\xdb\x6c\x84\xbf\x67\xe2\xfb\x91\xb6\
\xb7\x91\xbc\x7c\xfe\xf9\x5e\xf9\x0e\xc1\xf9\x00\x08\x68\xfe\x22\
\x26\x21\x54\x03\x97\x60\x48\x84\x49\x69\xe3\x48\xe7\x5d\x97\x97\
\x97\x93\xa0\x47\xcf\x6d\xcd\xad\xc6\xe4\x1a\x6b\x28\x83\xe8\xb8\
\xf0\xaf\x9e\x91\x88\x47\x0e\x1e\x36\xc3\x2e\x37\xdc\xae\xe1\x79\
\x18\x74\xf2\xf6\x8b\x9a\x55\x81\x0f\xa9\xf7\x7e\x30\xb8\x6e\x4f\
\xb7\x97\x04\x37\x6f\x2f\xa5\x60\x52\x9d\x57\xd7\x6e\x71\x02\x32\
\xfd\x67\xa6\x30\xcc\x04\x5b\xed\x85\xef\xca\xd6\xc6\xa5\x61\xe6\
\xd8\xb7\x25\x03\x0e\xb2\x98\xfb\xc2\x63\x0e\x9e\x33\x92\xfb\x0c\
\x18\x02\xfd\x0b\x61\x0e\x06\x9a\x50\x5b\xa3\xcf\xd1\xee\x9e\x35\
\xcc\x18\xd6\x8c\x25\x22\x97\x6f\xbe\x15\xf9\x8d\xd8\x32\x83\xb4\
\x04\x98\xbd\x72\xd1\x6f\x9d\x74\xb6\x6b\x3f\xcf\xfd\xb4\xb1\xf5\
\xc6\x84\x5e\x34\x8c\x7a\x5f\x5e\xd0\x81\xe0\x1a\x04\xc5\x24\xa8\
\x93\xd8\x12\xa5\xfa\x21\x30\x65\x63\xc6\x4f\x86\x71\x3f\xf7\x3d\
\x04\xbf\xc5\xfa\xd7\x72\x0b\x5b\xb7\xc1\x09\xc2\x74\x38\xef\x37\
\xf6\xfc\x51\xfb\xc2\x74\x44\x77\x62\x6c\xff\xce\xe3\xd3\x5f\x78\
\xe0\xe0\x7a\xb3\x0e\xe0\x4a\x63\x1e\xaf\x38\xe7\x2e\x72\xb0\x75\
\x59\x47\xb5\x07\x07\x32\x00\x00\x00\x00\x83\xfc\xad\xef\xf1\x55\
\x00\x00\x1c\x05\x23\xd1\x5c\xb8\x5e\xd5\x4d\xed\x00\x00\x00\x00\
\x49\x45\x4e\x44\xae\x42\x60\x82\
"
qt_resource_name = b"\
\x00\x06\
\x07\x03\x7d\xc3\
\x00\x69\
\x00\x6d\x00\x61\x00\x67\x00\x65\x00\x73\
\x00\x06\
\x06\x8c\x8a\x82\
\x00\x61\
\x00\x76\x00\x61\x00\x74\x00\x61\x00\x72\
\x00\x05\
\x00\x33\x57\x47\
\x00\x30\
\x00\x2e\x00\x70\x00\x6e\x00\x67\
"
qt_resource_struct = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x12\x00\x02\x00\x00\x00\x01\x00\x00\x00\x03\
\x00\x00\x00\x24\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
"
def qInitResources():
QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
|
[
"mingrui.zhao@cern.ch"
] |
mingrui.zhao@cern.ch
|
eb5023ec4fb7c8bc7ec1220eec4973aa2f48faeb
|
0a602f99463e233666d0d95d033da51664bbf31d
|
/Chatbot 4/training.py
|
ede6cded656ab933496bcd8834c22845f5cebaed
|
[] |
no_license
|
MarkoMarcelo/JIRA
|
9a952c736312b7fcbb2966c765e5bbe19cc5fe7d
|
7f8301521a6c68e440930620476348acbc85e94e
|
refs/heads/main
| 2023-06-30T10:56:16.671076
| 2021-07-29T21:21:35
| 2021-07-29T21:21:35
| 367,192,921
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,608
|
py
|
import random,json,pickle
from re import S
from typing import Pattern
from nltk.chunk.util import accuracy
import numpy as np
import nltk
nltk.download('wordnet')
from nltk.stem import WordNetLemmatizer
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation, Dropout
from tensorflow.keras.optimizers import SGD
from tensorflow.python.keras.engine import training
def training():
lemmatizer = WordNetLemmatizer()
intents = json.loads(open('intents.json').read())
words = []
classes = []
documents = []
ignore_letters = ['¿', '?', '!', '¡', '.', ',', ';', ':', '-', '_']
for intent in intents['intents']:
for pattern in intent['patterns']:
word_list = nltk.word_tokenize(pattern)
words.extend(word_list)
documents.append((word_list, intent['tag']))
if intent['tag'] not in classes:
classes.append(intent['tag'])
words = [lemmatizer.lemmatize(word) for word in words if word not in ignore_letters]
#we prevent duplicates from being created or added and order the words
words = sorted(set(words))
classes = sorted(set(classes))
pickle.dump(words, open('words.pkl', 'wb'))
pickle.dump(classes, open('classes.pkl', 'wb'))
training =[]
output_empty = [0] * len(classes)
for document in documents:
bag = []
word_patterns = document[0]
word_patterns = [lemmatizer.lemmatize(word.lower()) for word in word_patterns]
for word in words:
bag.append(1) if word in word_patterns else bag.append(0)
output_row = list(output_empty)
output_row[classes.index(document[1])] = 1
training.append([bag, output_row])
random.shuffle(training)
training = np.array(training)
train_x = list(training[: , 0])
train_y = list(training[: , 1])
#Neural Network
model = Sequential()
model.add(Dense(128, input_shape = (len(train_x[0]),), activation = 'relu'))
model.add(Dense(0.5))
model.add(Dense(64, activation = 'relu'))
model.add(Dense(0.5))
model.add(Dense(64, activation = 'relu'))
model.add(Dense(len(train_y[0]), activation = 'softmax'))
sgd = SGD(lr=0.01, decay= 1e-6, momentum= 0.9, nesterov= True)
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
hist = model.fit(np.array(train_x), np.array(train_y), epochs=200, batch_size=5, verbose=0)
model.save('chatbotmodel.h5', hist)
print("Done")
|
[
"noreply@github.com"
] |
MarkoMarcelo.noreply@github.com
|
92ea62dd911a2970488e170fdfdb662b2d03f718
|
7caf540d2cf71e27c130f5cc791345b73774af50
|
/failed_attempts/migrations/0003_auto_20180503_1147.py
|
bd3b1cb80a16963d816629d65da30675a82dc165
|
[] |
no_license
|
eyetea-solutions/django-failed-attempts
|
b1e87c003cd82fb6d76e31ca39feb35eee745b38
|
8c1fffbc39c040b0460c227743521a47599962a0
|
refs/heads/master
| 2020-03-17T05:33:06.010389
| 2018-05-15T14:42:16
| 2018-05-15T14:42:16
| 133,321,046
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,082
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2018-05-03 11:47
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('failed_attempts', '0002_auto_20151224_0058'),
]
operations = [
migrations.AlterField(
model_name='failedattempt',
name='IP',
field=models.GenericIPAddressField(null=True, verbose_name='IP Address'),
),
migrations.AlterField(
model_name='failedattempt',
name='failures',
field=models.PositiveIntegerField(default=0, verbose_name='Failures'),
),
migrations.AlterField(
model_name='failedattempt',
name='timestamp',
field=models.DateTimeField(auto_now=True, verbose_name='Last failed attempt'),
),
migrations.AlterField(
model_name='failedattempt',
name='username',
field=models.CharField(max_length=255, verbose_name='Username'),
),
]
|
[
"martin.taleski@gmail.com"
] |
martin.taleski@gmail.com
|
8a59c51857c46c7ae343c70e1e3b12a38ff518e7
|
fd6051cd3b4d757083ab7bff376d6cdf654f7202
|
/e2e/Vectors/Generation/Merit/TwoHundredSeventyFour/RespondsWithRequestedCapacity.py
|
516b9a69c935498f84f6fe320f11eaf5ed59b6d1
|
[
"MIT",
"CC0-1.0"
] |
permissive
|
MerosCrypto/Meros
|
aa4532547ba61f06d2560a30a069a57d2c254584
|
5c282509fed18e8b23d466022787b017991e51b9
|
refs/heads/master
| 2023-05-31T02:08:40.653223
| 2023-05-30T21:33:28
| 2023-05-30T21:33:28
| 134,137,513
| 69
| 20
|
NOASSERTION
| 2023-05-30T21:33:29
| 2018-05-20T09:36:33
|
Nim
|
UTF-8
|
Python
| false
| false
| 1,107
|
py
|
import json
import e2e.Libs.Ristretto.Ristretto as Ristretto
from e2e.Classes.Transactions.Transactions import Data, Transactions
from e2e.Classes.Consensus.VerificationPacket import VerificationPacket
from e2e.Classes.Consensus.SpamFilter import SpamFilter
from e2e.Vectors.Generation.PrototypeChain import PrototypeChain
edPrivKey: Ristretto.SigningKey = Ristretto.SigningKey(b'\0' * 32)
dataFilter: SpamFilter = SpamFilter(5)
transactions: Transactions = Transactions()
proto: PrototypeChain = PrototypeChain(1)
#Create five Datas.
#Six in total, thanks to the Block Data.
data: Data = Data(bytes(32), edPrivKey.get_verifying_key())
for i in range(5):
data.sign(edPrivKey)
data.beat(dataFilter)
transactions.add(data)
data = Data(data.hash, b"\0")
#Create a Block verifying all of them.
proto.add(0, [VerificationPacket(tx.hash, [0]) for tx in transactions.txs.values()])
with open("e2e/Vectors/Merit/TwoHundredSeventyFour/RespondsWithRequestedCapacity.json", "w") as vectors:
vectors.write(json.dumps({
"blockchain": proto.toJSON(),
"transactions": transactions.toJSON()
}))
|
[
"noreply@github.com"
] |
MerosCrypto.noreply@github.com
|
04e56972a44469997666bf7a0521933ba1f6a34a
|
442dae288109b9b202fecbe728eb61cba6de83d4
|
/tests/testcase.py
|
cfc30dd7551917b6e638cb5e35ba658ea8e4f229
|
[
"MIT"
] |
permissive
|
capy-inc/django-stdnet
|
751e4b28a6cdc1065647056132fcab955c21e250
|
9b8ab02ec73692f867947168ee0b88d41e603d82
|
refs/heads/master
| 2021-01-25T10:05:51.580249
| 2015-10-13T05:13:44
| 2015-10-13T05:13:44
| 24,475,688
| 3
| 1
| null | 2015-10-13T05:13:44
| 2014-09-25T21:19:33
|
Python
|
UTF-8
|
Python
| false
| false
| 2,554
|
py
|
from distutils.version import LooseVersion
from django.test import TestCase
class BaseTestCase(TestCase):
app_label = 'test'
def _setup_redis_db(self):
from djangostdnet import models
from djangostdnet import mapper
from . import redis_server_info
models.mapper = mapper.Mapper(default_backend='redis://%(host)s:%(port)d?db=%(db)d' % redis_server_info,
install_global=True)
def setUp(self):
from django.core.management.color import no_style
from django.db.models import loading
from djangostdnet import DJANGO_VERSION
self._setup_redis_db()
self.seen_models = set()
self.style = no_style()
# HACK
if LooseVersion('1.6') <= DJANGO_VERSION < LooseVersion('1.7'):
pass
elif LooseVersion('1.7') <= DJANGO_VERSION:
from django.apps.config import AppConfig
from django.utils.importlib import import_module
self.app = AppConfig(self.app_label, import_module(__name__))
loading.cache.ready = True
loading.cache.set_installed_apps([self.app])
else:
raise NotImplementedError
def _clear_registered_models(self):
from django.db.models import loading
from djangostdnet import DJANGO_VERSION
# HACK
if LooseVersion('1.6') <= DJANGO_VERSION < LooseVersion('1.7'):
loading.cache.app_models.clear()
elif LooseVersion('1.7') <= DJANGO_VERSION:
loading.cache.unset_installed_apps()
loading.cache.all_models.clear()
else:
raise NotImplementedError
from stdnet.odm import globals
globals._model_dict.clear()
def _clear_redis_db(self):
import redis
from . import redis_server_info
r = redis.from_url('redis://%(host)s:%(port)d?db=%(db)d' % redis_server_info)
r.flushdb()
def tearDown(self):
self._clear_registered_models()
self._clear_redis_db()
def create_table_for_model(self, model):
from django.db import connection
sql = connection.creation.sql_create_model(model, self.style)[0]
cursor = connection.cursor()
for statement in sql:
cursor.execute(statement)
def finish_defining_models(self):
from django.db.models import loading
from djangostdnet import DJANGO_VERSION
if LooseVersion('1.7') <= DJANGO_VERSION:
loading.cache.populate([self.app])
|
[
"yusuke@jbking.org"
] |
yusuke@jbking.org
|
1bc8d97bb3d425a6081356f805a0fe3124198083
|
a80a31418ce85348d886b8b2a6135b3c4d294407
|
/docker-project-run.py
|
93bcf266964c1616c09f69655014d6c1cd561b2d
|
[
"WTFPL"
] |
permissive
|
lku/docker-project-run
|
cc154d3de0760a1a79c555519919d87e09517244
|
d863fba99b19b36ca5e7de146a4abb847538b12e
|
refs/heads/master
| 2021-01-21T03:50:17.204464
| 2015-06-27T11:17:42
| 2015-06-27T11:17:42
| 39,071,180
| 0
| 0
| null | 2015-07-14T11:34:24
| 2015-07-14T11:34:24
| null |
UTF-8
|
Python
| false
| false
| 1,020
|
py
|
#! /usr/bin/env python
# Copyright 2014 Jan Markup <mhmcze@gmail.com>
# This work is free. You can redistribute it and/or modify it under the
# terms of the Do What The Fuck You Want To Public License, Version 2,
# as published by Sam Hocevar. See the COPYING file for more details.
import glob
import os
import sys
if not os.geteuid() == 0:
sys.exit('You must be root to run this application, please use sudo and try again.')
PATHS = ['.'];
for path in PATHS:
path = os.path.abspath(path)
projects = glob.glob(path + '/*/docker-compose.yml')
if projects:
print '--- ' + os.path.basename(path) + ' ---'
for project in projects:
print os.path.basename(os.path.dirname(project))
project = raw_input('>>> ')
for path in PATHS:
compose = glob.glob(path + '/' + project + '/docker-compose.yml')
if compose:
projectDir = os.path.dirname(compose[0])
os.system('cd "' + projectDir + '" && docker-compose up')
else:
print 'Project not found.'
|
[
"mhmcze@gmail.com"
] |
mhmcze@gmail.com
|
6ccc7fea6fff8c7c66af04903599608f969a0107
|
4c403ba3d9880e52945083447f9c1bdf8e6dd2c5
|
/openssl_x509_verify_example.py
|
f911cca2b8ab8dbf263d3d16fb064795df943f02
|
[
"MIT"
] |
permissive
|
mk-j/Py_openssl_x509_verify
|
1618f4e23a1de53ca1d584f85b3f7e4894e9951a
|
6d877cc14efb23cff00142640a57850cc9afa442
|
refs/heads/master
| 2021-09-19T08:48:02.482871
| 2018-07-25T20:07:11
| 2018-07-25T20:07:11
| 111,472,562
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,516
|
py
|
#!/usr/bin/python
import x509tools
import os
def file_read(filename):
content=''
if os.path.exists(filename):
fp = open(filename, "r")
content = fp.read()
fp.close()
return content
def check_openssl_cipher():
v = x509tools.openssl_cipher_iv_length('AES-128-CBC')
print("openssl cipher iv length of aes-128-cbc is %s" % v)
def check_x509_verify_rsa():
ca_pem = file_read('./certs/RSA_DigiCertGlobalRootCA.crt')
cert_pem = file_read('./certs/RSA_DigiCertSHA2SecureServerCA.crt')
x = x509tools.openssl_x509_verify(cert_pem, ca_pem)
print("openssl x509 verify result for an RSA cert is %s" % x)
def check_x509_verify_ecc():
ca_pem = file_read('./certs/ECC_DigiCertGlobalRootCA3.crt')
cert_pem = file_read('./certs/ECC_DigiCertGlobalCAG3.crt')
x = x509tools.openssl_x509_verify(cert_pem, ca_pem)
print("openssl x509 verify result for an ECC cert is %s" % x)
def check_x509_verify_bad():
#ca_pem = file_read('./certs/ECC_DigiCertGlobalRootCA3.crt')
#cert_pem = file_read('./certs/RSA_DigiCertSHA2SecureServerCA.crt')
ca_pem = file_read('./certs/RSA_DigiCertGlobalRootCA.crt')
cert_pem = file_read('./certs/ECC_DigiCertGlobalCAG3.crt')
x = x509tools.openssl_x509_verify(cert_pem, ca_pem)
print("openssl x509 verify result for an RSA/ECC cert is %s" % x)
def main():
check_openssl_cipher()
check_x509_verify_rsa()
check_x509_verify_ecc()
check_x509_verify_bad()
if __name__ == "__main__":
main()
|
[
"mark@zedwood.com"
] |
mark@zedwood.com
|
0a72067d6495c2f7fdd93431093b9b9eb1ade8b5
|
493318707fd161c5a6b8c2be5818dbbce5889f8b
|
/trumptwitterarchive_spider/trumptwitterarchive_spider/settings.py
|
47e1f554f63028547d85257994a9ceee979c6475
|
[] |
no_license
|
marcpre/learning_python_scrapy
|
17999eb781fb30fdd4a6dabfd5b822f0597a0793
|
13a22009fb3f6c95d17d7ece2f7eec4503240af6
|
refs/heads/master
| 2020-03-30T00:30:39.446819
| 2018-10-18T04:16:14
| 2018-10-18T04:16:14
| 150,527,431
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,289
|
py
|
# -*- coding: utf-8 -*-
# Scrapy settings for trumptwitterarchive_spider project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'trumptwitterarchive_spider'
SPIDER_MODULES = ['trumptwitterarchive_spider.spiders']
NEWSPIDER_MODULE = 'trumptwitterarchive_spider.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'trumptwitterarchive_spider (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'trumptwitterarchive_spider.middlewares.TrumptwitterarchiveSpiderSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'trumptwitterarchive_spider.middlewares.TrumptwitterarchiveSpiderDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'trumptwitterarchive_spider.pipelines.TrumptwitterarchiveSpiderPipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
|
[
"marcus.presich@gmail.com"
] |
marcus.presich@gmail.com
|
9d1748c309ef8b2c3091a29a960d0bd680b5c3ac
|
06810ff6338306fbb114a20e416d3e891e9db84c
|
/rve_generator/rve_gen.py
|
69a4fd0c55e296830f0c87e7e58c7daee44348e5
|
[
"MIT"
] |
permissive
|
hossen-code/RVE_PY
|
fd88d7c7fed5e484a6ca00335acbabf00498788a
|
09029f0aecbdfef25470e657baa9a103e59a569a
|
refs/heads/master
| 2022-11-24T00:50:03.586085
| 2020-07-24T03:16:17
| 2020-07-24T03:16:17
| 282,079,125
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 667
|
py
|
# -*- coding: utf-8 -*-
"""
Creating random micromechanical RVE of composites
author Hossein Ghayoor
"""
from rve_generator.point import Point
from rve_generator.utility import is_colliding, calculate_all_distances, sum_n_closest_point_distance, plot_all_points
if __name__ == "__main__":
all_points = []
point_collection = []
for i in range(2000):
new_point = Point()
if not is_colliding(point_collection, new_point):
point_collection.append(new_point)
all_distances = calculate_all_distances(point_collection)
three_closest_dist = sum_n_closest_point_distance(all_distances)
plot_all_points(point_collection)
|
[
"hghayoor@gmail.com"
] |
hghayoor@gmail.com
|
5f27d85de8ca8dac144d8acdf4b03212ec2d074c
|
009a6574b3f655c607b19a5d3468dce13cd59bfa
|
/forms.py
|
df907102e628e0a0dbe896809ab585cab1d07d50
|
[] |
no_license
|
ADLIAhmed/JobifyAMOA
|
29e97bec8d799e7c2b2d0e9ea74f4018b50ff2d9
|
0c76c65d4089f0929c1d6b3feb282170f6bb4e0c
|
refs/heads/master
| 2021-05-22T21:33:02.156032
| 2020-04-05T17:17:52
| 2020-04-05T17:17:52
| 253,104,964
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 766
|
py
|
from django import forms
from .models import offre
class offre_form(forms.ModelForm):
class offre:
model = offre
fields = [
'offre_title',
'offre_contenu',
'offre_ville',
'offre_renumeration',
'offre_periode',
]
from django.contrib.auth.models import User
class FormName(forms.Form):
Name=forms.CharField()
Age=forms.IntegerField()
Email=forms.CharField()
PhoneNumber=forms.CharField()
Addresse=forms.CharField()
class FormDescription(forms.Form):
Name=forms.CharField()
desc_text=forms.CharField()
class FormPortfolio(forms.Form):
user=forms.CharField()
Portfolio_name=forms.CharField()
image=forms.ImageField()
Type=forms.CharField()
date=forms.DateField()
|
[
"ahmedadli.etude@gmail.com"
] |
ahmedadli.etude@gmail.com
|
535820d87dd62c3fd0d9dbf0aaf588fb9b1d93a6
|
8ce656578e04369cea75c81b529b977fb1d58d94
|
/clients/apps.py
|
2dcd6c20d3ff5901668f0dd30dd374d114919e48
|
[] |
no_license
|
JJvzd/django_exp
|
f9a08c40a6a7535777a8b5005daafe581d8fe1dc
|
b1df4681e67aad49a1ce6426682df66b81465cb6
|
refs/heads/master
| 2023-05-31T13:21:24.178394
| 2021-06-22T10:19:43
| 2021-06-22T10:19:43
| 379,227,324
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 202
|
py
|
from django.apps import AppConfig
class ClientsConfig(AppConfig):
name = 'clients'
verbose_name = 'Компании'
def ready(self):
import clients.signal_handlers # noqa: F401
|
[
"javad@MacBook-Pro-Namig.local"
] |
javad@MacBook-Pro-Namig.local
|
c52b7a3785f2776a9cde133ad821bd1dc1f8affb
|
9ba474c019baaded3a1918fe8723de7e8ad0ccf5
|
/lga_dict.py
|
add798b83367611f51f293ebf79256db11335830
|
[] |
no_license
|
ait360/query_google_places
|
4039643e765d9ef9f6ffcce1232ee63785240125
|
93e828019c7d47957040395caaa1daa788495ea7
|
refs/heads/master
| 2022-02-08T05:31:27.061705
| 2022-02-02T19:59:40
| 2022-02-02T19:59:40
| 249,086,698
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,608
|
py
|
from collections import defaultdict
local_govt_area = defaultdict(list,
Abuja= ['Abaj', 'Abaji', 'Abuja+Municipal', 'Bwari', 'Gwagwalada', 'Kuje',
'Kwali', 'Municipal+Area+Council'],
Abia= ['Aba+North', 'Aba+North', 'Aba+South', 'Arochukwu', 'Bende', 'Bende',
'Ikwuano', 'Ikwuano', 'Isiala', 'Isiala+Ngwa+North', 'Isiala+Ngwa+South',
'Isiala+Ngwa+South', 'Isuikwuato', 'Isukwuato', 'Ngwa+North', 'Obi+Ngwa',
'Ohafia', 'Osisioma', 'Ugwunagbo', 'Ukwa+East', 'Ukwa+East', 'Ukwa+West',
'Ukwa+West', 'Umu+Nneochi', 'Umuahia', 'Umuahia+North', 'Umuahia+South'],
Adamawa= ['Hung', 'Demsa', 'Fufore', 'Fufure', 'Ganye', 'Gayuk', 'Girei', 'Gombi',
'Grie', 'Hong', 'Jada', 'Jimeta', 'Lamurde', 'Madagali', 'Maiha',
'Mayo+Belwa', 'Michika', 'Mubi+North', 'Mubi+South', 'Numan', 'Numna',
'Shelleng', 'Song', 'Toungo', 'Yola+North', 'Yola+North', 'Yola+South'],
Anambra = ['Aguata', 'Anambra', 'Anambra+East', 'Anambra+West', 'Anaocha',
'Awka+North', 'Awka+South', 'Ayamelum', 'Dunukofia', 'Ekwusigo',
'Idemili+North', 'Idemili+South', 'Ihiala', 'Njikoka', 'Nnewi+North',
'Nnewi+South', 'Ogbaru', 'Onitsha+North', 'Onitsha+South', 'Orumba+North',
'Orumba+South', 'Oyi'],
Bauchi = ['Alkaleri', 'Ganjuwa', 'hira', 'Bauchi', 'Giade', 'Tafawa+Balewa',
'Bogoro', 'Jama+are', 'Itas+gadau', 'Darazo', 'Katagum', 'Toro', 'Dass',
'Kirfi', 'Warji', 'Gamawa', 'Misau', 'Zaki', 'Ningi', 'Dambam', 'Damban',
'Itas', 'Jamaare', 'Shira', 'Gadau'],
Bayelsa= ['Brass', 'Ekeremor', 'Kolok', 'Kolokuma', 'Membe', 'Nembe',
'Ogbia', 'Opokuma', 'Sagbama', 'Southern+Ijaw', 'Yenagoa'],
Benue= ['Ador', 'Obi', 'Ukum', 'Agatu', 'Kastina+ala', 'Ogbadibo', 'Vandekya',
'Apa', 'Konshisha', 'Ohimini', 'Buruku', 'Kwande', 'Oju', 'Gboko', 'Logo',
'Okpokwu', 'Guma', 'Makurdi', 'Oturkpo', 'Gwer+east', 'Tarka', 'Ado',
'Gwer+West', 'Katsina+Ala', 'Ushongo', 'Vandeikya'],
Bornu= ['Abadam', 'Abadan', 'Askira', 'Balge', 'Bama', 'Bayo', 'Biu', 'Chibok',
'Damboa', 'Dikwa', 'Dikwagubio', 'Gubio', 'Guzamala', 'Gwoza', 'Hawul',
'Jere', 'Kaga', 'Kala', 'Kalka', 'Konduga', 'Kukawa', 'Kwaya+Kusar',
'Kwaya+ku', 'Mafa', 'Magumeri', 'Maiduguri', 'Marte', 'Mobbar', 'Monguno',
'Ngala', 'Nganzai', 'Shani', 'Uba'],
Delta= ['Aniocha+North', 'Aniocha+south', 'Anioha', 'Bomadi', 'Burutu',
'Ethiope+east', 'Ethiope+west', 'Ika+north+east', 'Ika+south', 'Isoko+north',
'Isoko+south', 'Ndokwa+east', 'Ndokwa+west', 'Okpe', 'Oshimili+north',
'Oshimili+south', 'Patani', 'Sapele', 'Udu', 'Ughelli+north', 'Ughelli+south',
'Ukwuani', 'Uviwie', 'Uvwie', 'Warri+central', 'Warri+north', 'Warri+south',
'Warri+South+West'],
Ebonyi= ['Abakaliki', 'Afikpo+north', 'Afikpo+south', 'Ebonyi', 'Edda', 'Ezza',
'Ezza+North', 'Ezza+south', 'Ikwo', 'Ishielu', 'Ivo', 'Izzi', 'Ohaozara',
'Ohaukwu', 'Onicha'],
Edo= ['Akoko+Edo', 'Egor', 'Esan+central', 'Esan+North-East', 'Esan+south+east',
'Esan+west', 'Etsako', 'Etsako+central', 'Etsako+east', 'Etsako+West',
'Igueben', 'Ikpoba+Okha', 'Ivia+north', 'Oredo', 'Orhionmwon', 'Orhionwon',
'Ovia+North-East', 'Ovia+south+west', 'Owan+East', 'Owan+south', 'Owan+west',
'Uhunmwonde', 'Uhunwonde'],
Ekiti= ['Osi', 'Ado+Ekiti', 'Aiyekire', 'Effon+Alaiye', 'Efon', 'Ekiti+east',
'Ekiti+south+west', 'Ekiti+west', 'Emure', 'Emure', 'Gbonyin', 'Ido',
'Ido+Osi', 'Ijero', 'Ikere', 'Ikole', 'Ilejemeje', 'Irepodun', 'Ise',
'ljero', 'llejemejeIrepodun', 'Moba', 'Orun', 'Oye', 'Ifelodun'],
Enugu= ['Aninri', 'Awgu', 'Enugu+east', 'Enugu+north', 'Enugu+south', 'Ezeagu',
'Igbi+etiti', 'Igbo+Etiti', 'Igbo+Eze+north', 'Igbo+Eze+South', 'Isi+Uzo',
'Nkanu+East', 'Nkanu+West', 'Nsukka', 'Oji+river', 'Udenu', 'Udi', 'Undenu',
'Uzo+Uwani'],
Gombe= ['Akko', 'Bajoga', 'Balanga', 'Biliri', 'Billiri', 'Deba', 'Dukku', 'Dunakaye',
'Funakaye', 'Gombe', 'Kaltungo', 'Kwami', 'Nafada', 'Shomgom', 'Shongom', 'Yamaltu'],
Imo= ['Aboh+Mbaise', 'Aguta', 'Ahiazu+Mbaise', 'Ehime+Mbano', 'Ezinhite', 'Ezinihitte',
'Ideato+North', 'Ideato+south', 'Ihitte', 'Ikeduru', 'Isiala', 'Isiala+Mbano', 'Isu',
'Mbaitoli', 'Ngor+Okpala', 'Njaba', 'Nkwere+Obowo', 'Nkwerre', 'Nwangele', 'Obowo',
'Oguta', 'Ohaji+Egbema', 'Egbema', 'Okigwe', 'Onuimo', 'Orlu', 'Orsu', 'Oru', 'Oru+East',
'Oru+west', 'Owerri', 'Owerri+Municipal', 'Owerri+North', 'Owerri+south', 'Owerri+West',
'Uboma', 'Unuimo'],
Jigawa= ['Auyo', 'Babura', 'Biriniwa', 'Birnin+Kudu', 'Birnin+magaji', 'Birniwa', 'Buijiį',
'Buji', 'Dute', 'Dutse', 'Gagarawa', 'Garki', 'Gumel', 'Guri', 'Gwaram', 'Gwiwa',
'Hadeji', 'Hadejia', 'Jahun', 'Kafin+Hausa', 'kaugama', 'Kazaure', 'Kiri+Kasama',
'Kirikisamma', 'Kiyawa', 'Maigatari', 'Malam+Madori', 'Malamaduri', 'Miga', 'Ringim',
'Roni', 'Sule+Tankarka', 'Sule+Tankarkar', 'Taura', 'Yankwashi'],
Kaduna= ['Birnin+Gwari', 'Brnin+Gwari', 'Chikun', 'Chukun', 'Giwa', 'Igabi', 'Ikara', 'Jaba',
'Jemaa', 'Kabau', 'Kachia', 'Kaduna+North', 'Kaduna+south', 'Kagarko', 'Kagarok',
'Kajuru', 'Kaura', 'Kauru', 'Kere', 'Kubau', 'Kudan', 'Lere', 'Makarfi',
'Sabon+Gari', 'Sabongari', 'Sanga', 'Soba', 'Zangon+Kataf', 'Zaria', 'Jema'],
Kano= ['Kunch', 'Kura', 'Ajigi', 'Ajingi', 'Albasu', 'Bagwai', 'Bebeji', 'Bichi',
'Bunkure', 'Dala', 'Dambatta', 'Dawakin+kudu', 'Dawakin+tofa', 'doguwa',
'Fagge', 'Gabasawa', 'Garko', 'Garun+mallam', 'Gaya', 'Gezawa', 'Gwale',
'Gwarzo', 'Kabo', 'Kano', 'Kano+Municipal', 'Karay', 'Karaye', 'Kibiya', 'Kiru',
'Kumbotso', 'Kumbtso', 'Kunchi', 'Kura', 'Madobi', 'Maidobi', 'Makoda', 'Minjibir',
'MInjibir+Nassarawa', 'Nasarawa', 'Rano', 'Rimin+gado', 'Rogo', 'Shanono', 'Sumaila',
'Takai', 'Tarauni', 'Tofa', 'Tsanyawa', 'Tudun+Wada', 'Tudun+wada', 'Ungogo', 'Warawa',
'Wudil'],
Katsina= ['Bakori', 'Batagarawa', 'Batsari', 'Baure', 'Bindawa', 'Charanchi', 'Dan+Musa',
'Dandume', 'Danja', 'Dan-Musa', 'Daura', 'Dutsi', 'Dutsin+Ma', 'Faskar',
'Faskari', 'Funtua', 'Furfi', 'Ingawa', 'Jibia', 'Jibiya', 'Kafur', 'Kaita',
'Kankara', 'Kankia', 'Kankiya', 'Katsina', 'Kurfi', 'Kusada', 'KusadaMai+aduwa',
'Mai+Adua', 'Malumfashi', 'Mani', 'Mash', 'Mashi', 'Matazu', 'Musawa', 'Rimi',
'Sabuwa', 'Safana', 'Sandamu', 'Zango', 'MaiAdua'],
Kebbi= ['Aleiro', 'Aliero', 'Arewa+Dandi', 'Argungu', 'Augie', 'Bagudo', 'Birnin+Kebbi',
'Bunza', 'Dandi', 'Danko', 'Fakai', 'Gwandu', 'Jeda', 'Jega', 'Kalgo', 'Koko',
'Koko+besse', 'Maiyaama', 'Maiyama', 'Ngaski', 'Sakaba', 'Shanga', 'Suru', 'Wasugu',
'Yauri', 'Zuru', 'Besse'],
Kogi= ['Adavi', 'Ajaokuta', 'Ankpa', 'Bassa', 'Dekina', 'Ibaji', 'idah', 'Igalamela',
'Igalamela+Odolu', 'Ijumu', 'Kabba+bunu', 'Kabba', 'Kogi', 'lbaji', 'ljumu',
'Lokoja', 'Mopa+muro', 'Ofu', 'Ogori+magongo', 'Ogori', 'Okehi', 'Okene', 'Olamaboro',
'Omala', 'Yagba+east', 'Yagba+west', 'Bunu', 'Magongo'],
Kwara= ['Asa', 'Baruten', 'Ede', 'Edu', 'Ekiti', 'Ifelodun', 'Ilorin+East', 'Ilorin+south',
'Ilorin+West', 'Irepodun', 'Isin', 'Kaiama', 'Moro', 'Offa', 'Oke+ero', 'Oyun',
'Pategi'],
Lagos= ['Ikorodu', 'Agege', 'Alimosho', 'Alimosho+lfelodun', 'Amuwo+Odofin', 'Apapa',
'Badagry', 'Ejigbo', 'Epe', 'Eti+Osa', 'ljaye', 'fako', 'Ibeju+Lekki',
'Ifako+Ijaiye', 'Ikeja', 'Ikorodu', 'Kosofe', 'Lagos+Island', 'Lagos+Mainland',
'lIbeju+Lekki', 'Mushin', 'Ojo', 'Oshodi+-Isolo', 'Shomolu', 'Surulere'],
Nasarawa= ['Akwanga', 'Awe', 'Doma', 'Karu', 'Keana', 'Keffi', 'Kokona', 'Lafia',
'Nasarawa', 'Nasarawa+Egon', 'Nassarawa', 'Obi', 'Toto', 'Wamba', 'Eggon'],
Niger= ['Agaie', 'Agwara', 'Bida', 'Borgu', 'Bosso', 'Chanchaga', 'Chanchanga', 'Edati',
'Gbako', 'Gurara', 'Katcha', 'Kitcha', 'Kontagora', 'Lapai', 'Lavun', 'Magama',
'Mariga', 'Mashegu', 'Mokwa', 'Moshegu', 'Moya', 'Muya', 'Paiko', 'Paikoro',
'Rafi', 'Rijau', 'Shiroro', 'Suleija', 'Suleja', 'Tafa', 'Tawa+Wushishi',
'Wushishi'],
Ogun= ['Abeokuta+north', 'Abeokuta+south', 'Ado+Odo', 'Agbado+north', 'Agbado+south',
'Ewekoro', 'Idarapo', 'Ifo', 'Ijebu+North+East', 'Ijebu+Ode', 'Ikenne', 'Imeko+afon',
'Ipokia', 'jebu+north', 'ljebu+east', 'owode', 'lkenne', 'llugun+Alaro',
'Obafemi+Owode', 'Obafemi', 'Odeda', 'Odogbolu', 'Ogun+waterside', 'Remo+North',
'Sagamu', 'Shagamu', 'Yewa+North', 'Yewa+South', 'Ota', 'otta'],
Ondo= ['Akoko+north', 'Akoko+north+east', 'Akoko+North-West', 'Akoko+south', 'Akoko+south+east',
'Akoko+South-West', 'Akure', 'Akure+north', 'Akure+South', 'Ese+odo', 'Idanre',
'Ifedore', 'Ilaje', 'Okeigbo', 'Ile+Oluji', 'Irele', 'laje+oke-igbo', 'llaje', 'Odigbo',
'Okitipupa', 'Ondo', 'Ondo+east', 'Ondo+West', 'Ose', 'Owo'],
Osun= ['Aiyedaade', 'Aiyedire', 'Atakumosa+east', 'Atakumosa+west', 'Ayeda+ade', 'Ayedire',
'Bolawaduro', 'Boluwaduro', 'Boripe', 'Ede', 'Ede+north', 'Ede+South', 'Egbedore',
'Ejigbo', 'Ife+central', 'Ife+east', 'Ife+north', 'Ife+south', 'Ifedayo', 'Ifelodun',
'Ila', 'Ilesa+East', 'Ilesa+West', 'Ilesah+east', 'Irepodun', 'Irewole', 'Isokan', 'Iwo',
'lla+orangun', 'llesha+west', 'Obokun', 'Odo+Otin', 'Odo-otin', 'ola+oluwa', 'olorunda',
'Oriade', 'Orolu', 'Osogbo'],
Oyo= ['Afijio', 'Akinyele', 'Atiba', 'Atigbo', 'Atisbo', 'Attba', 'Egbeda', 'Ibadan+central',
'Ibadan+North', 'Ibadan+north+east', 'Ibadan+North-West', 'Ibadan+south+east',
'Ibadan+South-West', 'Ibarapa+Central', 'Ibarapa+East', 'Ibarapa+north', 'Ido',
'Ifedapo', 'Ifeloju', 'Irepo', 'Iseyin', 'Itesiwaju', 'Iwajowa', 'Kajola', 'Lagelu',
'lseyin', 'lwajowa', 'lwajowa+olorunshogo', 'Ogbomosho+north', 'Ogbomosho+south',
'Ogo+oluwa', 'Olorunsogo', 'Oluyole', 'Ona+ara', 'Ore+lope', 'Orelope', 'Ori+Ire',
'Orire', 'Oyo', 'Oyo+east', 'Oyo+west', 'Saki+east', 'Saki+west', 'Surulere'],
Plateau= ['Barkin+Ladi', 'Barkin', 'Bassa', 'Bokkos', 'Jos+east', 'Jos+north', 'Jos+south',
'Kanam', 'Kanke', 'kiyom', 'Langtang+north', 'Langtang+south', 'Mangu', 'Mikang',
'Pankshin', 'Quaan+pan', 'Riyom', 'Shendam', 'Wase', 'ladi'],
Rivers= ['Odial', 'Abua', 'Ahoada+East', 'Ahoada+west', 'Akuku+toru', 'Andoni',
'Asari+toru', 'Akpor', 'Edoni', 'Bonny', 'Degema', 'Eleme', 'Emohua', 'Emuoha',
'Etche', 'Gokana', 'Ikwerre', 'Khana', 'Obio', 'Ogba+east', 'Ogba', 'Ogu', 'Okrika',
'Omuma', 'Omumma', 'Opobo', 'Oyigbo', 'Port+Harcourt', 'Portharcourt', 'Tai', 'yigbo',
'Odual', 'Egbema', 'Ndoni', 'bolo', 'Nkoro'],
Sokoto= ['Binji', 'Bodinga', 'Dange+Shuni', 'Dange', 'Gada', 'Goronyo', 'Gudu',
'Gwadabawa', 'Ilella', 'Illela', 'Isa', 'Kebbe', 'Kware', 'Rabah', 'Sabon+Birni',
'Shagari', 'Silame', 'Sokoto+north', 'Sokoto+south', 'Tambuwal', 'Tangaza', 'Tureta',
'Wamakko', 'Wamako', 'Wurno', 'Yabo', 'shuni'],
Taraba= ['Akdo+kola', 'Ardo+Kola', 'Bali', 'Donga', 'Gashaka', 'Gassol', 'Ibi', 'Jalingo',
'K+Lamido', 'Karim+Lamido', 'Kumi', 'Kurmi', 'lan', 'Lau', 'Sardauna', 'Takum',
'Tarum', 'Ussa', 'Wukari', 'Yorro', 'Zing'],
Yobe= ['Bade', 'Borsari', 'Bursari', 'Damaturu', 'Fika', 'Fune', 'G+ashua', 'Geidam', 'Gogaram',
'Gujba', 'Gulani', 'Jakusko', 'Karasuwa', 'Machina', 'Nagere', 'Nangere', 'Nguru', 'Potiskum',
'Tarmua', 'Tarmuwa', 'Yunusari', 'Yusufari'],
Zamfara= ['Anka', 'bukkuyum', 'Dungudu', 'Chafe', 'Gummi', 'Gusau', 'Isa', 'Kaura', 'Mradun',
'Maru', 'Shinkafi', 'Talata', 'Zumi', 'Bakura', 'Birnin+Magaji', 'Bungudu', 'Kaura+Namoda',
'Maradun', 'Talata+Mafara', 'Zurmi', 'Namoda', 'Mafara', 'Kiyaw'] )
local_govt_area['Akwa+Ibom'] = ['Abak', 'Eastern+Obolo', 'Eket', 'Esit+Eket', 'Essien+Udim', 'Etim+Ekpo',
'Etimekpo', 'Etinan', 'Ibeno', 'Ibesikpo+Asutan', 'Ibiono+lbom', 'Ika',
'Ikono', 'Ikot+Abasi', 'Ikot+Ekpene', 'Ini', 'Itu', 'lkot+Abasi',
'Mbo', 'Mkpat+Enin', 'Nsit', 'Nsit+lbom', 'Nsit-Atai', 'Nsit-Ubium',
'Obot+Akara', 'Okobo', 'Onna', 'Oron', 'Oruk+Anam', 'Oruko+Ete',
'Ubium', 'Udung', 'Udung+Uko', 'Ukanafun', 'Uko', 'Uruan',
'Urue+Offoung', 'Uyo', 'Oruko'],
local_govt_area['Cross+River'] = ['Abi', 'Akamkpa', 'Akampa', 'Akpabuyo', 'Bakassi', 'Bekwara',
'Bekwarra', 'Biase', 'Boki', 'Calabar+Municipal', 'Calabar+south',
'Etung', 'Ikom', 'Obanliku', 'Obubra', 'Obudu', 'Odukpani', 'Ogoja',
'Ugep+north', 'Yakuur', 'Yala', 'Yarkur'],
|
[
"amaefunatheophilus@gmail.com"
] |
amaefunatheophilus@gmail.com
|
798d67a4c5573a240974eb33442594dac3882564
|
e9b79a80d1eca76a2f430dc6fd63a27a971b1b1d
|
/Algorithms/Frahst_v6_1.py
|
cbac38101ba7c30846dc6ab17484dfcd58ccaa5e
|
[] |
no_license
|
MrKriss/Old-PhD-Code
|
3e4138f34d216b13d1febff41b0966b61ea4d1c1
|
35de436f500d5a1fe09cf4309d7e9b2e6dbddc45
|
refs/heads/master
| 2020-06-08T23:43:40.147486
| 2012-03-12T14:56:40
| 2012-03-12T14:56:40
| 2,950,882
| 1
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 21,541
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 05 23:19:11 2011
THIS IS THE VERSION TO RUN ON MAC, NOT WINDOWS
@author: musselle
"""
from numpy import eye, zeros, dot, sqrt, log10, trace, arccos, nan, arange, ones
import numpy as np
import scipy as sp
import numpy.random as npr
from numpy.linalg import qr, eig, norm, solve
from matplotlib.pyplot import plot, figure, title, step, ylim
from artSigs import genCosSignals_no_rand , genCosSignals
import scipy.io as sio
from utils import analysis, QRsolveA, pltSummary2
from PedrosFrahst import frahst_pedro_original
from Frahst_v3_1 import FRAHST_V3_1
from Frahst_v3_3 import FRAHST_V3_3
from Frahst_v3_4 import FRAHST_V3_4
from Frahst_v4_0 import FRAHST_V4_0
from load_syn_ping_data import load_n_store
from QR_eig_solve import QRsolve_eigV
from create_Abilene_links_data import create_abilene_links_data
from MAfunctions import MA_over_window
def FRAHST_V6_1(data, r=1, alpha=0.96, EW_alpha = 0.1, e_low = 0.96, e_high = 0.98, fix_init_Q = 0, holdOffTime=0,
evalMetrics = 'F', static_r = 0, r_upper_bound = None, L = 5, ignoreUp2 = 0,
data_norm_window = 50):
"""
Fast Rank Adaptive Householder Subspace Tracking Algorithm (FRAHST)
Version 6.1 - basicly 6.0 but without the junk func + the actual new eigen(enegy)tracking
- Turns out E_dash_t ~ S_trace or sum(eig_val)
E_t ~ EW_var2(zt) discounted by alpha a la covarience matrix
- no need to calculate incremental mean and var anymore
- Thresholding mechanism now uses two thresholds.
- if below the lowest -- > increment r
- if abouve the higher --> test if (E_dast_t - eig_i ) / E_t is above e_high,
if so remove dimentions.
- difference between e_low and e_high acts as a 'safety' buffer, as removing an eig can
result in too much variance being subtracted because eigs are only smoothed estimates
of the true values. Takes time for est_eit to reach true eigs.
- NEXT (maybe) Normalisation of data optional as a preprocessing of data.
Version 6.0 - Aim: Different rank adjusting mechanism
compares sum of r eigenvalues to variance of entire data.
- Performes incremental calculation of data mean and variance. (no longer in later version )
Version 5.0 - No changes of 5.0 incorperated in this version
Version 4.0 - Now also approximates eigenvalues for the approximated tracked basis for the eignevectors
- Approach uses an orthogonal iteration arround X.T
- Note, only a good approximation if alpha ~< 1. Used as its the fastest method
as X.T b --> b must be solved anyway.
- New entries in res
['eig_val'] - estimated eigenvalues
['true_eig_val'] - explicitly calculated eigenvalues (only if evalMetrics = T)
VErsion 3.4 - input data z is time lagged series up to length l.
- Algorithm is essentially same as 3.3, just adds pre processing to data vector
- input Vector z_t is now of length (N times L) where L is window length
- Use L = 1 for same results as 3.3
- Q is increased accordingly
Version 3.3 - Add decay of S and in the event of vanishing inputs
- Make sure rank of S does not drop (and work out what that means!) - stops S going singular
Version 3.2 - Added ability to fix r to a static value., and also give it an upper bound.
If undefined, defaults to num of data streams.
Version 3.1 - Combines good bits of Pedros version, with my correction of the bugs
Changed how the algorithm deals with sci. only difference, but somehow has a bigish
effect on the output.
"""
# Initialise variables and data structures
#########################################
# Derived Variables
# Length of z or numStreams is now N x L
numStreams = data.shape[1] * L
timeSteps = data.shape[0]
if r_upper_bound == None :
r_upper_bound = numStreams
#for energy test
lastChangeAt = 1
sumYSq = 0.
sumXSq = 0.
# Data Stores
res = {'hidden' : zeros((timeSteps, numStreams)) * nan, # Array for hidden Variables
'E_t' : zeros([timeSteps, 1]), # total energy of data
'E_dash_t' : zeros([timeSteps, 1]), # hidden var energy
'e_ratio' : zeros([timeSteps, 1]), # Energy ratio
'RSRE' : zeros([timeSteps, 1]), # Relative squared Reconstruction error
'recon' : zeros([timeSteps, numStreams]), # reconstructed data
'r_hist' : zeros([timeSteps, 1]), # history of r values
'eig_val': zeros((timeSteps, numStreams)) * nan, # Estimated Eigenvalues
'zt_mean' : zeros((timeSteps, numStreams)), # history of data mean
'zt_var' : zeros((timeSteps, numStreams)), # history of data var
'zt_var2' : zeros((timeSteps, numStreams)), # history of data var
'S_trace' : zeros((timeSteps, 1)), # history of S trace
'skips' : zeros((timeSteps, 1)), # tracks time steps where Z < 0
'Phi' : [],
'S' : [],
'anomalies' : []}
# Initialisations
# Q_0
if fix_init_Q != 0: # fix inital Q as identity
q_0 = eye(numStreams);
Q = q_0
else: # generate random orthonormal matrix N x r
Q = eye(numStreams) # Max size of Q
Q_0, R_0 = qr(rand(numStreams,r))
Q[:,:r] = Q_0
# S_0
small_value = 0.0001
S = eye(numStreams) * small_value # Avoids Singularity
# v-1
v = zeros((numStreams,1))
# U(t-1) for eigenvalue estimation
U = eye(numStreams)
# zt mean and var
zt_mean = zeros((numStreams,1))
zt_var = zeros((numStreams,1))
zt_var2 = zeros((numStreams,1))
# NOTE algorithm's state (constant memory), S, Q and v and U are kept at max size
# Use iterable for data
# Now a generator to calculate z_tl
iter_data = lag_inputs(data, L)
# Main Loop #
#############
for t in range(1, timeSteps + 1):
#alias to matrices for current r
Qt = Q[:, :r]
vt = v[:r, :]
St = S[:r, :r]
Ut = U[:r, :r]
zt = iter_data.next()
'''Data Preprocessing'''
# Update zt mean and var
zt_var, zt_mean = EW_mean_var(zt, EW_alpha, zt_var, zt_mean)
zt_var2 = alpha_var(zt, alpha, zt_var2)
# Convert to a column Vector
# Already taken care of in this version
# zt = zt.reshape(zt.shape[0],1)
# Check S remains non-singular
for idx in range(r):
if S[idx, idx] < small_value:
S[idx,idx] = small_value
'''Begin main algorithm'''
ht = dot(Qt.T , zt)
Z = dot(zt.T, zt) - dot(ht.T , ht)
if Z > 0 :
# Refined version, use of extra terms
u_vec = dot(St , vt)
X = (alpha * St) + (2 * alpha * dot(u_vec, vt.T)) + dot(ht , ht.T)
# Estimate eigenValues + Solve Ax = b using QR decomposition
b_vec, e_values, Ut = QRsolve_eigV(X.T, Z, ht, Ut)
beta = 4 * (dot(b_vec.T , b_vec) + 1)
phi_sq = 0.5 + (1.0 / sqrt(beta))
phi = sqrt(phi_sq)
gamma = (1.0 - 2 * phi_sq) / (2 * phi)
delta = phi / sqrt(Z)
vt = gamma * b_vec
St = X - ((1 /delta) * dot(vt , ht.T))
w = (delta * ht) - (vt)
ee = delta * zt - dot(Qt , w)
Qt = Qt - 2 * dot(ee , vt.T)
else: # if Z is not > 0
if norm(zt) > 0 and norm(ht) > 0 : # May be due to zt <= ht
res['skips'][t-1] = 2 # record Skips
else: # or may be due to zt and ht = 0
St = alpha * St # Continue decay of St
res['skips'][t-1] = 1 # record Skips
#restore data structures
Q[:,:r] = Qt
v[:r,:] = vt
S[:r, :r] = St
U[:r,:r] = Ut
''' EVALUATION '''
# Deviations from true dominant subspace
if evalMetrics == 'T' :
if t == 1 :
res['subspace_error'] = zeros((timeSteps,1))
res['orthog_error'] = zeros((timeSteps,1))
res['angle_error'] = zeros((timeSteps,1))
res['true_eig_val'] = ones((timeSteps, numStreams)) * np.NAN
Cov_mat = zeros([numStreams,numStreams])
# Calculate Covarentce Matrix of data up to time t
Cov_mat = alpha * Cov_mat + dot(zt, zt.T)
#
res['Phi'].append(Cov_mat)
#
# Get eigenvalues and eigenvectors
W , V = eig(Cov_mat)
# Use this to sort eigenVectors in according to deccending eigenvalue
eig_idx = W.argsort() # Get sort index
eig_idx = eig_idx[::-1] # Reverse order (default is accending)
# v_r = highest r eigen vectors (accoring to thier eigenvalue if sorted).
V_r = V[:, eig_idx[:r]]
# Calculate subspace error
C = dot(V_r , V_r.T) - dot(Qt , Qt.T)
res['subspace_error'][t-1,0] = 10 * log10(trace(dot(C.T , C))) #frobenius norm in dB
# Store True r Dominant Eigenvalues
res['true_eig_val'][t-1,:r] = W[eig_idx[:r]]
# Calculate angle between projection matrixes
#D = dot(dot(dot(V_r.T, Qt), Qt.T), V_r)
#eigVal, eigVec = eig(D)
#angle = arccos(sqrt(max(eigVal)))
#res['angle_error'][t-1,0] = angle
# Calculate deviation from orthonormality
F = dot(Qt.T , Qt) - eye(r)
res['orthog_error'][t-1,0] = 10 * log10(trace(dot(F.T , F))) #frobenius norm in dB
'''Store Values'''
# Record data mean and Var
res['zt_mean'][t-1,:] = zt_mean.T[0,:]
res['zt_var'][t-1,:] = zt_var.T[0,:]
res['zt_var2'][t-1,:] = zt_var2.T[0,:]
# REcord S
res['S'].append(St)
# Record S trace
res['S_trace'][t-1] = np.trace(St)
# Store eigen values
if 'e_values' not in locals():
e_values = zt_var2
else:
res['eig_val'][t-1,:r] = e_values[:r]
# Record reconstrunted z
z_hat = dot(Qt , ht)
res['recon'][t-1,:] = z_hat.T[0,:]
# Record hidden variables
res['hidden'][t-1, :r] = ht.T[0,:]
# Record RSRE
if t == 1:
top = 0.0
bot = 0.0
top = top + (norm(zt - z_hat) ** 2 )
bot = bot + (norm(zt) ** 2)
res['RSRE'][t-1, 0] = top / bot
# Record r
res['r_hist'][t-1, 0] = r
'''Rank Estimation'''
# Calculate energies
sumXSq = alpha * sumXSq + np.sum(zt ** 2) # Energy of Data
sumYSq = alpha * sumYSq + np.sum(ht ** 2) # Energy of hidden Variables
res['E_t'][t-1,0] = sumXSq
res['E_dash_t'][t-1,0] = sumYSq
res['e_ratio'][t-1, 0] = sumYSq / sumXSq
if static_r == 0: # optional parameter to keep r unchanged
# Adjust Q_t, St and Ut for change in r
if sumYSq < (e_low * sumXSq) and lastChangeAt < (t - holdOffTime) and r < r_upper_bound and t > ignoreUp2:
"""Note indexing with r works like r + 1 as index is from 0 in python"""
# Extend Q by z_bar
h_dash = dot(Q[:, :r].T, zt)
z_bar = zt - dot(Q[:, :r] , h_dash)
z_bar_norm = norm(z_bar)
z_bar = z_bar / z_bar_norm
Q[:numStreams, r] = z_bar.T[0,:]
s_end = z_bar_norm ** 2
# Set next row and column to zero
S[r, :] = 0.0
S[:, r] = 0.0
S[r, r] = s_end # change last element
# Update Ut_1
# Set next row and column to zero
U[r, :] = 0.0
U[:, r] = 0.0
U[r, r] = 1.0 # change last element
# Update eigenvalue
e_values = sp.r_[e_values, z_bar_norm ** 2]
# This is the bit where the estimate is off? dont really have anything better
# new r, increment
r = r + 1
# Record time step of anomaly
res['anomalies'].append(t-1)
# Reset lastChange
lastChangeAt = t
elif sumYSq > (e_high * sumXSq) and lastChangeAt < t - holdOffTime and r > 1 and t > ignoreUp2:
keeper = ones(r, dtype = bool)
# Sorted in accending order
sorted_eigs = e_values[e_values.argsort()]
acounted_var = sumYSq
for idx in range(r):
if ((acounted_var - sorted_eigs[idx]) / sumXSq) > e_high:
keeper[idx] = 0
acounted_var = acounted_var - sorted_eigs[idx]
# use keeper as a logical selector for S and Q and U
if not keeper.all():
# Delete rows/cols in Q, S, and U.
newQ = Q[:r,:r].copy()
newQ = newQ[keeper,:][:,keeper] # rows/cols eliminated
Q[:newQ.shape[0], :newQ.shape[1]] = newQ
newS = S[:r,:r].copy()
newS = newS[keeper,:][:,keeper] # rows/cols eliminated
S[:newS.shape[0], :newS.shape[1]] = newS
newU = U[:r,:r].copy()
newU = newU[keeper,:][:,keeper] # rows/cols eliminated
U[:newU.shape[0], :newU.shape[1]] = newU
r = keeper.sum()
if r == 0 :
r = 1
# Reset lastChange
lastChangeAt = t
return res
def lag_inputs(data, L):
"""Generator function to construct an input vector ztl that is the lagged zt
up to time l.
z_tl = [zt, zt-t, zt-2,..., zt-l]
Takes input data as a matrix.
"""
N = data.shape[1]
total_timesteps = data.shape[0]
z_tl = np.zeros((L*N,1))
for i in range(total_timesteps):
#shift values
z_tl[N:] = z_tl[:-N]
# add new one to start of vector
z_tl[:N] = np.atleast_2d(data[i,:]).T
yield z_tl
def alpha_var(x, alpha, var):
""" Simple exponential forgetting of Variance """
var = alpha * var + ( np.power(x,2))
return var
def EW_mean_var(x, alpha, var, mean):
""" Work out the exponentially weighted mean and variance of the data """
if alpha > 1 :
alpha = 2.0 / (alpha + 1)
diff = x - mean
incr = alpha * diff
mean = mean + incr
var = (1 - alpha) * (var + diff * incr)
return var, mean
def simple_sins(p1,p11, p2,p22, noise_scale, N = 500):
t = arange(N)
z1 = np.sin(2 * np.pi * t / p1) + npr.randn(t.shape[0]) * noise_scale
z2 = np.sin(2 * np.pi * t / p2) + npr.randn(t.shape[0]) * noise_scale
z11 = np.sin(2 * np.pi * t / p11) + npr.randn(t.shape[0]) * noise_scale
z22 = np.sin(2 * np.pi * t / p22) + npr.randn(t.shape[0]) * noise_scale
data = sp.r_['1,2,0', sp.r_[z1, z11], sp.r_[z2, z22]]
return data
def simple_sins_3z(p1,p11, p2,p22, p3, p33, noise_scale, N = 500):
t = arange(N)
z1 = np.sin(2 * np.pi * t / p1) + npr.randn(t.shape[0]) * noise_scale
z2 = np.sin(2 * np.pi * t / p2) + npr.randn(t.shape[0]) * noise_scale
z3 = np.sin(2 * np.pi * t / p3) + npr.randn(t.shape[0]) * noise_scale
z11 = np.sin(2 * np.pi * t / p11) + npr.randn(t.shape[0]) * noise_scale
z22 = np.sin(2 * np.pi * t / p22) + npr.randn(t.shape[0]) * noise_scale
z33 = np.sin(2 * np.pi * t / p33) + npr.randn(t.shape[0]) * noise_scale
data = sp.r_['1,2,0', sp.r_[z1, z11], sp.r_[z2, z22], sp.r_[z3, z33]]
return data
if __name__ == '__main__' :
first = 1
if first:
# data = genCosSignals(0, -3.0)
# data, G = create_abilene_links_data()
#execfile('/Users/chris/Dropbox/Work/MacSpyder/Utils/gen_Anomalous_peakORshift_data.py')
#data = A
#data = simple_sins(10,10,10,25, 0.1)
data = simple_sins_3z(10,10,13,13, 10, 27, 0.0)
# data = genCosSignals_no_rand(timesteps = 10000, N = 3)
# data = array([[0,0,0], [1,2,2], [1,3,4], [3,6,6], [5,6,10], [6,8,11]])
#sig_PN, ant_PN, time_PN = load_n_store('SYN', 'PN')
#data = sig_PN
#AbileneMat = sio.loadmat('/Users/chris/DataSets/Abilene/Abilene.mat')
#data = AbileneMat['P']
# Mean adjust data
#data_mean = MA_over_window(data,50)
#data = data - data_mean
# Fix Nans
whereAreNaNs = np.isnan(data)
data[whereAreNaNs] = 0
e_high = 0.90
e_low = 0.85
alpha = 0.96
EW_alpha = 0.1
ignoreUp2 = 50
holdOFF = 0
L = 1
# Flags
v6_1 = 1
v6_0 = 0
v4_0 = 0
v3_4 = 0
v3_3 = 0
v3_1 = 0
pedro = 0
if v6_1:
'''My Latest version'''
res_v6_1 = FRAHST_V6_1(data, L = L, alpha = alpha, EW_alpha = EW_alpha, e_low=e_low,
e_high = e_high, holdOffTime=holdOFF, fix_init_Q = 1, r = 1, evalMetrics = 'T',
ignoreUp2 = ignoreUp2, static_r = 0, r_upper_bound = None)
res_v6_1['Alg'] = 'My Implimentation of FRAUST Version 6.0 '
pltSummary2(res_v6_1, data, (e_high, e_low))
ylim(e_low - 0.05 , 1.02)
if v6_0:
'''My Latest version'''
res_v6_0 = FRAHST_V6_0(data, L = L, alpha = alpha, EW_alpha = EW_alpha, e_low=e_low, e_high=e_high,
holdOffTime=holdOFF, fix_init_Q = 1, r = 1, evalMetrics = 'T',
ignoreUp2 = ignoreUp2, static_r = 0, r_upper_bound = None)
res_v6_0['Alg'] = 'My Implimentation of FRAUST Version 6.0 '
pltSummary2(res_v6_0, data, (e_high, e_low))
if v4_0:
'''My Latest version'''
res_v4_0 = FRAHST_V4_0(data, L = L, alpha=alpha, e_low=e_low, e_high=e_high,
holdOffTime=holdOFF, fix_init_Q = 1, r = 1, evalMetrics = 'T',
ignoreUp2 = ignoreUp2, static_r = 0, r_upper_bound = None)
res_v4_0['Alg'] = 'My Implimentation of FRAUST Version 4.0 '
pltSummary2(res_v4_0, data, (e_high, e_low))
if v3_4:
'''My Latest version'''
res_new = FRAHST_V3_4(data, L = L, alpha=alpha, e_low=e_low, e_high=e_high,
holdOffTime=holdOFF, fix_init_Q = 1, r = 1, evalMetrics = 'F',
ignoreUp2 = ignoreUp2)
res_new['Alg'] = 'My other latest Implimentation of FRAUST '
pltSummary2(res_new, data, (e_high, e_low))
if v3_3:
'''My previous version'''
res_old1 = FRAHST_V3_3(data, alpha=alpha, e_low=e_low, e_high=e_high,
holdOffTime=holdOFF, fix_init_Q = 1, r = 1, evalMetrics = 'F',
ignoreUp2 = ignoreUp2)
res_old1['Alg'] = 'My Previous Implimentation of FRAUST '
pltSummary2(res_old1, data, (e_high, e_low))
if v3_1:
'''My older version'''
res_old2 = FRAHST_V3_1(data, alpha=alpha, e_low=e_low, e_high=e_high,
holdOffTime=holdOFF, fix_init_Q = 1, r = 1, evalMetrics = 'F')
res_old2['Alg'] = 'My Older Implimentation of FRAUST '
pltSummary2(res_old2, data, (e_high, e_low))
if pedro:
'''Pedros Version'''
res_ped = frahst_pedro_original(data, r=1, alpha=alpha, e_low=e_low, e_high=e_high,
holdOffTime=holdOFF, evalMetrics='F')
res_ped['Alg'] = 'Pedros Original Implimentation of FRAUST'
pltSummary2(res_ped, data, (e_high, e_low))
first = 0
|
[
"chris.j.musselle@gmail.com"
] |
chris.j.musselle@gmail.com
|
3a17d7c330985b8f290aca777695885ba4773cd1
|
48ade1b3a9f2ce51cebb5e236448c0f977efa71e
|
/tagmaster.py
|
3e761a4f92e4ebdbe3e70d84c0db7767ac8829cc
|
[] |
no_license
|
naliferopoulos/PasteCrawl
|
359f2e266ec12fe80f579dc8c07da9bd8b397421
|
a875547d57822e8a35b7d4ec0c5985a2c221e8d7
|
refs/heads/master
| 2022-03-31T10:44:59.475347
| 2020-01-14T11:31:18
| 2020-01-14T11:31:18
| 232,835,469
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 573
|
py
|
import os
import importlib
# The list of taggers
taggers = []
def collect_taggers(dir):
files = os.listdir(dir)
for file in files:
if file.endswith(".py"):
module = importlib.import_module(dir + '.' + file[:-3])
tagger = my_class = getattr(module, 'Tagger')
taggers.append(tagger)
def reload_taggers(dir):
global taggers
taggers = []
collect_taggers(dir)
def run_taggers(text):
tags = []
for tagger in taggers:
for tag in tagger.tag(text):
tags.append(tag)
return tags
|
[
"aliferopoulos@icloud.com"
] |
aliferopoulos@icloud.com
|
c1ae17639a9bb83b8bf96c38ba86307c480cf464
|
4f8900cb2474282ae355a952901df4bc9b95e81c
|
/mooring/api_tools.py
|
131a9cc627d77ad87064fd16ccd05c55de29e2f5
|
[
"Apache-2.0"
] |
permissive
|
dbca-wa/moorings
|
cb6268c2b7c568b0b34ac007a42210fd767620f7
|
37d2942efcbdaad072f7a06ac876a40e0f69f702
|
refs/heads/master
| 2023-06-09T04:16:57.600541
| 2023-05-31T05:50:55
| 2023-05-31T05:50:55
| 209,494,941
| 0
| 6
|
NOASSERTION
| 2023-05-22T04:56:38
| 2019-09-19T07:53:30
|
Python
|
UTF-8
|
Python
| false
| false
| 823
|
py
|
from django.http import HttpResponse, JsonResponse
from django.views.decorators.csrf import csrf_exempt
from django.contrib.auth.models import Group
from mooring import models
from mooring import common_iplookup
from django.db.models import Q
import json
import ipaddress
def create_vessel(request, apikey):
ledger_json = {}
jsondata = {'status': 404, 'message': 'API Key Not Found'}
if models.API.objects.filter(api_key=apikey,active=1).count():
if common_iplookup.api_allow(common.get_client_ip(request),apikey) is True:
jsondata['status'] = 200
jsondata['message'] = 'Groups Retreived'
else:
jsondata['status'] = 403
jsondata['message'] = 'Access Forbidden'
return HttpResponse(json.dumps(jsondata), content_type='application/json')
|
[
"jason.moore@dbca.wa.gov.au"
] |
jason.moore@dbca.wa.gov.au
|
1c943d111acf300d36712000062dc8efdc38d5a6
|
dddbd1a49f6ecc002e37bb0682ddfc2e61b19e71
|
/src/robogenerator/test/test_random_pairs.py
|
6590b0c6304e2759794a907fa0f06ba723de58be
|
[
"Apache-2.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
blueenergy/robogenerator
|
1c3ea9af8adc7a74db50ee73a2051154ce5a75d5
|
cd5a72a350e84f8563fa50874e8325993520840e
|
refs/heads/master
| 2021-01-17T17:05:19.021503
| 2015-07-03T09:14:31
| 2015-07-03T09:14:31
| 6,330,394
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 715
|
py
|
import unittest
import sys
sys.path.append('../')
from random_pairs import get_random_combination
old_parameters = {'unit_type': [ "DMCU"]
, 'state':[ "WO-EX"]
, 'restart_mode':[ "OPT","TOT","DSK"]
,'restart_level':["master","slave"]
, 'element_type':["RNC"]
, 'sfutype':["SF20H"]
}
class RandomPairsTestCase(unittest.TestCase):
def test_random_pairs_1(self):
parameters_1 = {'unit_type': [ "DMCU"],'state':[ "WO-EX"]}
expected_result = {'unit_type': "DMCU",'state':"WO-EX"}
self.assertEqual(expected_result,get_random_combination(parameters_1))
if __name__ == '__main__':
unittest.main()
|
[
"shuyong.y.lin@nsn.com"
] |
shuyong.y.lin@nsn.com
|
cc461be28775fdbfab839012d2de66d810dc5087
|
a5a5d21751ab6e6dddda703899d891a95859f102
|
/Dust Invaders B02.py
|
338137316fbf89b65c14bed3011cdd8edb4297d8
|
[
"MIT"
] |
permissive
|
ecodallaluna/dustinvaders
|
a787f77cc762f4201eb1c3ca44e454154af10b02
|
bb06d1d6d41c9169f5c35a3f01cd658f5ed0296f
|
refs/heads/master
| 2021-06-26T01:52:40.323130
| 2020-09-24T18:00:22
| 2020-09-24T18:00:22
| 132,319,234
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,245
|
py
|
#
# ______ _ _ _____ _____ _ _____ _ _ _ _ ___ ______ ___________ _____
# | _ \ | | / ___|_ _/\| |/\_ _| \ | | | | |/ _ \| _ \ ___| ___ \/ ___|
# | | | | | | \ `--. | | \ ` ' / | | | \| | | | / /_\ \ | | | |__ | |_/ /\ `--.
# | | | | | | |`--. \ | ||_ _|| | | . ` | | | | _ | | | | __|| / `--. \
# | |/ /| |_| /\__/ / | | / , . \_| |_| |\ \ \_/ / | | | |/ /| |___| |\ \ /\__/ /
# |___/ \___/\____/ \_/ \/|_|\/\___/\_| \_/\___/\_| |_/___/ \____/\_| \_|\____/
#
#
# Dust Invaders
# by Ilario Jeff Toniello
# https://github.com/ecodallaluna/dustinvaders
#
# Version B.0.2 - Control version to check engagement allure
#
# Description
# =========================================================================
# This version of the app has the purpose of collecting info about the utilisation of a vacuum cleaner
# without the interaction with the game for statistical analysis. This app will silently storage the
# amount of usage of the vacuum cleaner by saving the movement of the accelerometer. Each 30 seconds
# of usage will be saved as one "token". Other saved data are: time and kind of movement of the
# vacuum cleaner on axes x and y (left/right up/down). While in the 30 sec circle, the display will
# show a smiley face. This app will be a control version for the game itself, to simulate the effect
# of an observer in the activety of vacuum the floor
#
# Version update:
# v.B.0.1 Use of internal clock for missing hardware
# v.B.0.2 Tested RTC Board, fixed function for clock, string for time saving
#
# hardware requirement
# =========================================================================
# - micro:bit board microbit.org/
# - RTC (Real Time Clock) Board, this app is set for a DS3231 RCT Board
#
# software requirement
# =========================================================================
# this app was developed with Mu https://codewith.mu/
# with Mu or by command-line is possible to interact with the memory of the micro:bit board and save
# a copy of the usage data collected by the app
#
# !! WARNING !!
# =========================================================================
# every time the micro:bit is flashed ALL the files in the micro:bit
# are destroyed. Remember to download the usage date before that
#import microbit code library
from microbit import *
# set variables
# =========================================================================
# set sensibility in milli-g
sensibility_sensor = 500
#set initial movement
mov_before = "-"
# set no 30s movement tokens
token = 0
# set image (not needed)
# check_led = Image("00000:00000:00500:00000:00000")
#variables for RTC
addr = 0x68
buf = bytearray(7)
# needed by RTC Board
sleep(1000)
# list of functions
# =========================================================================
# functions to read time from DS3231 RTC (Real Time Clock) Board
# To use the board is need to connect 4 pins from the board to the micro:bit
# - Connect GND on the breakout to GND on the microbit.
# - Connect VCC to 3V.
# - Connect SDA to pin 20.
# - Connect SCL to pin 19.
# functions to convert values for get_time()
# functions for RTC board from http://www.multiwingspan.co.uk
def bcd2dec(bcd):
return (((bcd & 0xf0) >> 4) * 10 + (bcd & 0x0f))
def dec2bcd(dec):
tens, units = divmod(dec, 10)
return (tens << 4) + units
# this function reads time from the RTC board
# get_time() returns an arrey of int with information from RTC Board
# e.g. if called tm = get_time()
# tm[0] equal to hh
def get_time():
i2c.write(addr, b'\x00', repeat=False)
buf = i2c.read(addr, 7, repeat=False)
ss = bcd2dec(buf[0])
mm = bcd2dec(buf[1])
if buf[2] & 0x40:
hh = bcd2dec(buf[2] & 0x1f)
if buf[2] & 0x20:
hh += 12
else:
hh = bcd2dec(buf[2])
wday = buf[3]
DD = bcd2dec(buf[4])
MM = bcd2dec(buf[5] & 0x1f)
YY = bcd2dec(buf[6])+2000
return [hh,mm,ss,YY,MM,DD,wday]
# #set_time(0,0,12,5,1,4,2016), can be used set the time and date of RTC Board.
# The order of the numbers should be seconds, minutes, hours, week day, day, month, year.
# Needed to do just once to set time on the board, the stand alone battery will keep the clock working if micro:bit is turned off
def set_time(s,m,h,w,dd,mm,yy):
t = bytes([s,m,h,w,dd,mm,yy-2000])
for i in range(0,7):
i2c.write(addr, bytes([i,dec2bcd(t[i])]), repeat=False)
return
# check movement and report
def is_moving(sens) :
# read from accelerometer
readx = accelerometer.get_x()
ready = accelerometer.get_y()
if readx > sens :
result = "right"
elif readx < -sens :
result = "left"
elif ready > sens :
result = "up"
elif ready < -sens :
result = "down"
else :
result = "-"
return result
# write function cicle of 30 sec that save all new movements
def capture_30s(sens) :
# initialise variables
# set display on
display.show(Image.HAPPY)
time_sec = 0
right_n = 0
left_n = 0
up_n = 0
down_n = 0
mov_before_cicle = "-"
# set 30s acquisition data cicle
while time_sec <= 30 :
# get movement
mov_into_cicle = is_moving(sens)
# set inner if cicle to be sure to save only changes
if mov_into_cicle != mov_before_cicle :
mov_before_cicle = mov_into_cicle
print("in cicle "+ str(mov_into_cicle)) # just to check, remove from final
if mov_into_cicle == "right" :
right_n = right_n + 1
elif mov_into_cicle == "left" :
left_n = left_n + 1
elif mov_into_cicle == "up" :
up_n = up_n + 1
elif mov_into_cicle == "down" :
down_n = down_n + 1
sleep(500) # wait 0.5 sec before next reading
time_sec = time_sec + 0.5 # advance time count
# after 30s out of cicle, return data as arrey of int
display.clear() #turn off the led after 30 sec
return [right_n, left_n, up_n, down_n]
# function to storage data in txt file
def save_data(time2save, token2save, r2save, l2save, u2save, d2save) :
# get string time in format YYYY-MM-DD HH:MM:SS
time2save_string = str(time2save[3]) + "-" + str(time2save[4]) + "-" + str(time2save[5]) + "\t" + str(time2save[0]) + ":" + str(time2save[1]) + ":" + str(time2save[2]) + "\t" + str(time2save[6])
#create string to save
string2save = time2save_string + "\t" + str(token2save) + "\t" + str(r2save) + "\t" + str(l2save) + "\t" + str(u2save) + "\t" + str(d2save) + "\n"
try : # try to open existing file and copy data on saved_data
with open('vacuum_data.txt', 'r') as my_file:
saved_data = my_file.read()
except OSError : # in case file do not exist create empty file and variable
saved_data = ""
with open('vacuum_data.txt', 'w') as my_file:
my_file.write("")
#update data with new reading
saved_data = saved_data + string2save
print("saved data:") #just to chek, remove from final
print(saved_data) #just to chek, remove from final
#save new file with updated data
with open('vacuum_data.txt', 'w') as my_file:
my_file.write(saved_data)
return
# start programme
# ===============================
while True :
mov = is_moving(sensibility_sensor)
if mov != mov_before :
mov_before = mov
token = token + 1 # token counter is updated
print("identified movement! start cicle - " + mov_before) # print to check remove from final
data_30s = capture_30s(sensibility_sensor) #function 30s cicle + save movement
print("token no: "+ str(token) + ", movements: R: " + str(data_30s[0]) + ", L: " + str(data_30s[1]) + ", U: " + str(data_30s[2]) + ", D: " + str(data_30s[3]) ) # print to check remove from final
# read time
time_now = get_time()
#save data in file txt
save_data(time_now, token, data_30s[0], data_30s[1], data_30s[2], data_30s[3])
sleep(1000)
|
[
"noreply@github.com"
] |
ecodallaluna.noreply@github.com
|
2e9d73f47d2c9a9e999c0c451e6fbc6f6b3d80b3
|
debeb32aa1ec296ac67c7b0665e8b8ee6d111efe
|
/combine_preds_and_analyze.py
|
b085a28f2eaa873f6b08d564d1424b0ba050e888
|
[] |
no_license
|
sacohen11/climate-risk-capstone
|
393671098059a5f0204af9c0772ad5c25e8b6860
|
e83a873c0337874464c278a3ccd64b28100c5e66
|
refs/heads/master
| 2023-04-18T11:46:04.824645
| 2021-05-05T20:26:09
| 2021-05-05T20:26:09
| 364,498,557
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,478
|
py
|
#################################################################
# Title: Combine Predictions and Analyze
# Author: Sam Cohen
# Notes:
# These functions combine all the results of the research pipeline
# and performs joint analysis on all the results.
##################################################################
# Packages
import pandas as pd
import numpy as np
import os
import matplotlib.pyplot as plt
import seaborn as sns
import scipy
def load_ten_k_sents(file_path):
"""
This function loads the 10-K sentences into Python.
:param file_path: the path of the file
:return: the 10-K sentences
"""
ten_k = np.load(file_path, allow_pickle=True)
return ten_k
def combine_predictions():
"""
This function combines multiple text files that have been created as the output of the research pipeline.
Those files are the 10-K sentences, the information on the companies behind the 10-K sentences, the
predictions for specificity, and the predictions for climate.
Some code is obtained from https://stackoverflow.com/questions/39717828/python-combine-two-text-files-into-one-line-by-line.
:return: nothing, but outputs a file with all the above information combined into one text file
"""
# arrays to track metrics
total_climate = []
specificities = []
with open("Domain-Agnostic-Sentence-Specificity-Prediction/dataset/data/ten_k_sentences.txt") as xh:
with open('Domain-Agnostic-Sentence-Specificity-Prediction/dataset/data/ten_k_info.txt') as yh:
with open("Domain-Agnostic-Sentence-Specificity-Prediction/predictions.txt") as zh:
with open("Domain-Agnostic-Sentence-Specificity-Prediction/climate_predictions.txt") as th:
with open("combined.txt", "w") as wh:
#Read first file
xlines = xh.readlines()
#Read second file
ylines = yh.readlines()
#Read third file
zlines = zh.readlines()
#Read fourth file
tlines = th.readlines()
#Print lengths
print('sentence length:', len(xlines))
print('info length:', len(ylines))
print('specificity prediction length:', len(zlines))
print('climate prediction length:', len(tlines))
#Combine content of both lists
#combine = list(zip(ylines,xlines))
#Write to third file
for i in range(len(xlines)):
if i == 0:
print()
else:
# need to add climate predictions
#total_climate.append()
# regex = re.compile(
# r'[0-9]\.[0-9]{3,6}')
#matches = regex.finditer(document['10-K'])
specificities.append(float(zlines[i-1].strip()))
total_climate.append(int(tlines[i]))
line = ylines[i].strip() + '\t' + zlines[i-1].strip() + '\t' + tlines[i].strip() + '\t' + xlines[i]
wh.write(line)
print(75 * '=')
print("Specificity Statistics:")
print('Mean Specificity:', np.mean(np.array(specificities)))
print('Standard Deviation Specificity:', np.std(np.array(specificities)))
print('Max Specificity:', np.max(np.array(specificities)))
print('Min Specificity:', np.min(np.array(specificities)))
print(75*'=')
print("Climate Prediction Statistics:")
print('Climate Related Sentences Sum:', np.sum(np.array(total_climate)))
print('Non Climate Related Sentences Sum:', len(total_climate) - np.sum(np.array(total_climate)))
print('Climate Related Sentences Percent', np.sum(np.array(total_climate))/len(total_climate))
print(75*'=')
def eda_plots():
"""
This function reads in the file with combined information and outputs some EDA plots on the data.
:return: nothing, but outputs EDA plots
"""
print(os.getcwd())
df = pd.read_csv("combined.txt", delimiter="\t", names=["CIK", "Year", "Stock Ticker", "Company Name", "Industry", "Specificity", "Climate Prediction", "Sentence"])
print(df.info())
print(df.columns)
# drop data sources that have less than 20 sentences
# drop sentences that are less than 10 words
new_df = df.groupby(["Stock Ticker", "Year"])
new_df = new_df.agg({"Sentence": "nunique"})
new_df = new_df.rename(columns={"Sentence":"num_sents"})
df = df.merge(new_df, how='inner', on=["Stock Ticker", "Year"])
df["sentence_length"] = df['Sentence'].str.split().apply(len)
df = df[df["num_sents"]>=20]
df = df[df["sentence_length"]>10]
training_sents()
unique_company_count(df)
plot_companies_by_sector(df, "Count of Companies by Sector", 'output/eda_companies_by_sector')
plot_sents_by_sector(df, "Count of Sentences by Sector", 'output/eda_sentences_by_sector.jpg')
plot_companies_per_year(df, "Count of Companies by Year", 'output/eda_companies_by_year.jpg')
plot_sents_per_year(df, "Count of Sentences by Year", 'output/eda_sentences_by_year.jpg')
num_sents_per_filing(df, "Histogram of Sentences by Filing", 'output/eda_hist_sentences_by_filing')
hist_sent_specificity(df, "Sentence Specificity Distribution", 'output/eda_dist_sentence_specificity')
climate_bar(df, "Climate Predictions", 'output/eda_climate_preds.jpg')
stats_on_sents_per_10k(df)
t_test(df)
print('end')
def unique_company_count(df):
"""
This function prints the number of unique companies in the final dataset.
:param df: dataframe with the combined information
:return: nothing, but prints the number of unique companies
"""
# number of unique companies
print("Number of Unique Companies:", df["Stock Ticker"].nunique())
print("Number of Unique Filings:", df.groupby(["Stock Ticker", "Year"]).agg("nunique"))
def plot_companies_by_sector(df, title, out_path):
"""
This function plots the companies by sector.
The code comes from https://www.kite.com/python/answers/how-to-count-unique-values-in-a-pandas-dataframe-group-in-python.
:param df: dataframe with combined results
:param title: title of the new chart
:param out_path: the output location of the charts
:return: nothing, but outputs charts
"""
# Number of companies by sector
sectored_df = df.groupby("Industry")
sectored_df = sectored_df.agg({"Stock Ticker": "nunique"})
sectored_df = sectored_df.reset_index()
# Plot
sns.set(style="whitegrid")
plt.title(title)
ax = sns.barplot(data=sectored_df, x="Industry", y="Stock Ticker")
ax.set_xticklabels(ax.get_xticklabels(), rotation=40, ha="right", fontsize=8)
#plt.xticks(ticks=np.arange(len(sectored_df["Industry"])),labels=sectored_df["Industry"], fontsize=16)
plt.yticks(ticks=range(0,max(sectored_df["Stock Ticker"])+1, 5))
plt.xlabel("Industry Sector")
plt.ylabel("Number of Companies")
sns.despine(left=True)
plt.show()
plt.savefig(out_path)
def plot_sents_by_sector(df, title, out_path):
"""
This function plots the sentences by sector.
The code comes from https://www.kite.com/python/answers/how-to-count-unique-values-in-a-pandas-dataframe-group-in-python.
:param df: dataframe with combined results
:param title: title of the new chart
:param out_path: the output location of the charts
:return: nothing, but outputs charts
"""
# Number of companies by sector
sectored_df = df.groupby("Industry")
sectored_df = sectored_df.agg({"Sentence": "nunique"})
sectored_df = sectored_df.reset_index()
# Plot
sns.set(style="whitegrid")
plt.title(title)
ax = sns.barplot(data=sectored_df, x="Industry", y="Sentence")
ax.set_xticklabels(ax.get_xticklabels(), rotation=40, ha="right", fontsize=8)
#plt.xticks(ticks=np.arange(len(sectored_df["Industry"])),labels=sectored_df["Industry"], fontsize=16)
plt.yticks(ticks=range(0,max(sectored_df["Sentence"])+1, 5000))
plt.xlabel("Industry Sector")
plt.ylabel("Number of Sentences")
sns.despine(left=True)
plt.show()
plt.savefig(out_path)
def plot_companies_per_year(df, title, out_path):
"""
This function plots the number of companies per year.
:param df: dataframe of combined information
:param title: title of the new chart
:param out_path: the location of the new chart
:return: nothing, but saves a new chart
"""
# Number of companies per year
yeared_df = df.groupby("Year")
yeared_df = yeared_df.agg({"Stock Ticker": "nunique"})
yeared_df = yeared_df.reset_index()
# Plot
sns.set(style="whitegrid")
plt.title(title)
ax = sns.barplot(data=yeared_df, x="Year", y="Stock Ticker")
ax.set_xticklabels(ax.get_xticklabels(), rotation=40, ha="right", fontsize=8)
plt.yticks(ticks=range(0,max(yeared_df["Stock Ticker"])+1, 50))
#ax.set_yticklabels(ax.get_yticklabels())
plt.xlabel("Years")
plt.ylabel("Number of Companies")
sns.despine(left=True)
plt.show(ax=ax)
plt.savefig(out_path)
def plot_sents_per_year(df, title, out_path):
"""
This function plots the number of sentences per year.
:param df: dataframe of combined information
:param title: title of the new chart
:param out_path: the location of the new chart
:return: nothing, but saves a new chart
"""
# Number of companies per year
yeared_df = df.groupby("Year")
yeared_df = yeared_df.agg({"Sentence": "nunique"})
yeared_df = yeared_df.reset_index()
# Plot
sns.set(style="whitegrid")
plt.title(title)
ax = sns.barplot(data=yeared_df, x="Year", y="Sentence")
ax.set_xticklabels(ax.get_xticklabels(), rotation=40, ha="right", fontsize=8)
plt.yticks(ticks=range(0,max(yeared_df["Sentence"])+1, 5000))
#ax.set_yticklabels(ax.get_yticklabels())
plt.xlabel("Years")
plt.ylabel("Number of Sentences")
sns.despine(left=True)
plt.show(ax=ax)
plt.savefig(out_path)
def num_sents_per_filing(df, title, out_path):
"""
This function plots the number of sentences per filing.
:param df: dataframe of combined information
:param title: title of the new chart
:param out_path: the location of the new chart
:return: nothing, but saves a new chart
"""
# Number of companies per year
yeared_df = df.groupby(["Stock Ticker", "Year"])
yeared_df = yeared_df.agg({"Sentence": "nunique"})
yeared_df = yeared_df.reset_index()
# Plot
sns.set(style="whitegrid")
plt.title(title)
ax = sns.histplot(data=yeared_df, x="Sentence", bins=20)
#ax.set_xticklabels(ax.get_xticklabels(), rotation=40, ha="right", fontsize=8)
plt.yticks(ticks=range(0, 250, 50))
plt.xticks(ticks=range(0, 850, 100))
# ax.set_yticklabels(ax.get_yticklabels())
plt.xlabel("Number of Sentences")
plt.ylabel("Count")
sns.despine(left=True)
plt.show(ax=ax)
plt.savefig(out_path)
def hist_sent_specificity(df, title, out_path):
"""
This function plots the distribution of sentences specificity.
:param df: dataframe of combined information
:param title: title of the new chart
:param out_path: the location of the new chart
:return: nothing, but saves a new chart
"""
# Plot
ax = sns.displot(df, x="Specificity")
#ax.set_xticklabels(ax.get_xticklabels(), rotation=40, ha="right", fontsize=8)
#plt.yticks(ticks=range(0, 250, 50))
#plt.xticks(ticks=range(0, 850, 100))
# ax.set_yticklabels(ax.get_yticklabels())
sns.set(style="whitegrid")
plt.title(title)
#plt.xlabel("Specificity")
#plt.ylabel("Density")
sns.despine(left=True)
plt.show(ax=ax)
plt.savefig(out_path)
def climate_bar(df, title, out_path):
"""
This function plots the number of climate and non-climate related sentences.
:param df: dataframe of combined information
:param title: title of the new chart
:param out_path: the location of the new chart
:return: nothing, but saves a new chart
"""
# Number of companies per year
#yeared_df = df.groupby("Climate Prediction")
#yeared_df = yeared_df.agg({"Sentence": "nunique"})
# yeared_df = yeared_df.reset_index()
# Plot
sns.set(style="whitegrid")
plt.title(title)
ax = sns.countplot(data=df, x="Climate Prediction")
ax.set_xticklabels(ax.get_xticklabels(), rotation=0, ha="right", fontsize=8)
plt.yticks(ticks=range(0,300000, 25000))
#ax.set_yticklabels(ax.get_yticklabels())
plt.xlabel("Climate Prediciton")
plt.ylabel("Count")
sns.despine(left=True)
plt.show(ax=ax)
plt.savefig(out_path)
def t_test(df):
"""
This function plots the distribution of climate prediction.
:param df: dataframe of combined information
:param title: title of the new chart
:param out_path: the location of the new chart
:return: nothing, but saves a new chart
"""
climate_df = df[df["Climate Prediction"] == 1]
non_climate_df = df[df["Climate Prediction"] == 0]
print("Mean of Climate Preds:", np.mean(climate_df["Specificity"].values))
print("Var of Climate Preds:", np.var(climate_df["Specificity"].values))
print("Mean of Non-Climate Preds:", np.mean(non_climate_df["Specificity"].values))
print("Var of Non-Climate Preds:", np.var(non_climate_df["Specificity"].values))
t, p = scipy.stats.ttest_ind(climate_df["Specificity"].values, non_climate_df["Specificity"].values)
print("T-Score:", t)
print("P-value:", p)
# Plot
ax = sns.displot(df, x="Specificity", col="Climate Prediction", multiple="dodge")
# ax.set_xticklabels(ax.get_xticklabels(), rotation=40, ha="right", fontsize=8)
# plt.yticks(ticks=range(0, 250, 50))
# plt.xticks(ticks=range(0, 850, 100))
# ax.set_yticklabels(ax.get_yticklabels())
sns.set(style="whitegrid")
#plt.title("Comparison of Climate vs. Non-Climate Specificities")
# plt.xlabel("Specificity")
# plt.ylabel("Density")
sns.despine(left=True)
plt.show(ax=ax)
def anova_year(df):
"""
This function plots the distribution of climate prediction.
:param df: dataframe of combined information
:param title: title of the new chart
:param out_path: the location of the new chart
:return: nothing, but saves a new chart
"""
climate_df = df[df["Climate Prediction"] == 1]
non_climate_df = df[df["Climate Prediction"] == 0]
print("Mean of Climate Preds:", np.mean(climate_df["Specificity"].values))
print("Var of Climate Preds:", np.var(climate_df["Specificity"].values))
print("Mean of Non-Climate Preds:", np.mean(non_climate_df["Specificity"].values))
print("Var of Non-Climate Preds:", np.var(non_climate_df["Specificity"].values))
t, p = scipy.stats.ttest_ind(climate_df["Specificity"].values, non_climate_df["Specificity"].values)
print("T-Score:", t)
print("P-value:", p)
# Plot
ax = sns.displot(df, x="Specificity", col="Climate Prediction", multiple="dodge")
# ax.set_xticklabels(ax.get_xticklabels(), rotation=40, ha="right", fontsize=8)
# plt.yticks(ticks=range(0, 250, 50))
# plt.xticks(ticks=range(0, 850, 100))
# ax.set_yticklabels(ax.get_yticklabels())
sns.set(style="whitegrid")
#plt.title("Comparison of Climate vs. Non-Climate Specificities")
# plt.xlabel("Specificity")
# plt.ylabel("Density")
sns.despine(left=True)
plt.show(ax=ax)
def stats_on_sents_per_10k(df):
"""
This function prints stats on the 10-K sentences.
:param df: dataframe of the combined information
:return: nothing, but printed statistics
"""
new_df = df.groupby(["Stock Ticker", "Year"])
new_df = new_df.agg({"Sentence": "nunique"})
new_df = new_df.reset_index()
print(75 * '=')
print("Specificity Statistics:")
print('Mean Number of Sentences per 10-K:', np.mean(new_df["Sentence"]))
print('Standard Deviation Number of Sentences per 10-K:', np.std(new_df["Sentence"]))
print('Max Number of Sentences per 10-K:', np.max(new_df["Sentence"]))
print('Min Number of Sentences per 10-K:', np.min(new_df["Sentence"]))
def sentences_climate_related(df):
"""
This function finds the percent of sentences that are climate related by sector and prints out statistics.
:param df: dataframe of combined information
:return: nothing, but charts and statistics
"""
# Here we are finding the percent of sentences that are climate related by sector
new_df = df.groupby(["Stock Ticker", "Year"])
new_df = new_df.agg({"Sentence": "nunique"})
new_df = new_df.reset_index()
print(75 * '=')
print("Specificity Statistics:")
print('Mean Number of Sentences per 10-K:', np.mean(new_df["Sentence"]))
print('Standard Deviation Number of Sentences per 10-K:', np.std(new_df["Sentence"]))
print('Max Number of Sentences per 10-K:', np.max(new_df["Sentence"]))
print('Min Number of Sentences per 10-K:', np.min(new_df["Sentence"]))
def training_sents():
import climate_identification_models
climate_identification_models.load_training_data()
|
[
"cohen.samuel@wustl.edu"
] |
cohen.samuel@wustl.edu
|
59ccfb00af43a84ba98ada0362d7440538825d0d
|
32f02062d755d8244b23ab88a729bfc71331e3b3
|
/todolist/migrations/0004_auto_20190219_0837.py
|
b9e772bc1c3635aab8265950aa3b2b4530dd0b2c
|
[] |
no_license
|
malkoG/todoquest-server
|
6a377566146a974494ed55f71ea30a748d4e38df
|
bfdbdd5fcf44cf815b948f952ee1ca3088eb8d9b
|
refs/heads/master
| 2022-01-22T07:48:13.811514
| 2019-02-19T09:25:24
| 2019-02-19T09:25:24
| 156,067,845
| 3
| 2
| null | 2022-01-13T07:04:51
| 2018-11-04T09:44:01
|
Python
|
UTF-8
|
Python
| false
| false
| 816
|
py
|
# Generated by Django 2.1 on 2019-02-19 08:37
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('todolist', '0003_auto_20181104_0750'),
]
operations = [
migrations.AddField(
model_name='notification',
name='owner',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='todoentry',
name='owner',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
[
"rijgndqw012@gmail.com"
] |
rijgndqw012@gmail.com
|
df88f160d8e3d64b52b0f4ba5bc1542d33dc16f8
|
a838d4bed14d5df5314000b41f8318c4ebe0974e
|
/sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_06_01/aio/operations/_route_filter_rules_operations.py
|
d8a897fed9168ffc0bf0bde43c42fdcf66d0eae4
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] |
permissive
|
scbedd/azure-sdk-for-python
|
ee7cbd6a8725ddd4a6edfde5f40a2a589808daea
|
cc8bdfceb23e5ae9f78323edc2a4e66e348bb17a
|
refs/heads/master
| 2023-09-01T08:38:56.188954
| 2021-06-17T22:52:28
| 2021-06-17T22:52:28
| 159,568,218
| 2
| 0
|
MIT
| 2019-08-11T21:16:01
| 2018-11-28T21:34:49
|
Python
|
UTF-8
|
Python
| false
| false
| 21,710
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class RouteFilterRulesOperations:
"""RouteFilterRulesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
route_filter_name: str,
rule_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
route_filter_name: str,
rule_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes the specified rule from a route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param rule_name: The name of the rule.
:type rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
route_filter_name=route_filter_name,
rule_name=rule_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
async def get(
self,
resource_group_name: str,
route_filter_name: str,
rule_name: str,
**kwargs
) -> "_models.RouteFilterRule":
"""Gets the specified rule from a route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param rule_name: The name of the rule.
:type rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RouteFilterRule, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_06_01.models.RouteFilterRule
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RouteFilterRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
route_filter_name: str,
rule_name: str,
route_filter_rule_parameters: "_models.RouteFilterRule",
**kwargs
) -> "_models.RouteFilterRule":
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(route_filter_rule_parameters, 'RouteFilterRule')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('RouteFilterRule', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('RouteFilterRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
route_filter_name: str,
rule_name: str,
route_filter_rule_parameters: "_models.RouteFilterRule",
**kwargs
) -> AsyncLROPoller["_models.RouteFilterRule"]:
"""Creates or updates a route in the specified route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param rule_name: The name of the route filter rule.
:type rule_name: str
:param route_filter_rule_parameters: Parameters supplied to the create or update route filter
rule operation.
:type route_filter_rule_parameters: ~azure.mgmt.network.v2020_06_01.models.RouteFilterRule
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either RouteFilterRule or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_06_01.models.RouteFilterRule]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterRule"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
route_filter_name=route_filter_name,
rule_name=rule_name,
route_filter_rule_parameters=route_filter_rule_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('RouteFilterRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
def list_by_route_filter(
self,
resource_group_name: str,
route_filter_name: str,
**kwargs
) -> AsyncIterable["_models.RouteFilterRuleListResult"]:
"""Gets all RouteFilterRules in a route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RouteFilterRuleListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_06_01.models.RouteFilterRuleListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterRuleListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_route_filter.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('RouteFilterRuleListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_route_filter.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules'} # type: ignore
|
[
"noreply@github.com"
] |
scbedd.noreply@github.com
|
627eadd9d8d814f6afc5bcf1f793fbd777de080d
|
1c0a80b4d366003e0717e53e389b0def90914ffa
|
/BTD_DTB/DTB.py
|
574da28237dfb56dbd52dfd5b0cfeb3737a7f9e9
|
[] |
no_license
|
AbolfazlAslani/BTD_DTB
|
4a6a66cdfd0de93a8a05d3d1160f4e02b34d0eac
|
97e57f4e2fce8a99ea5304c361a29f872123ea70
|
refs/heads/main
| 2023-01-22T21:27:38.428297
| 2020-12-05T20:52:35
| 2020-12-05T20:52:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 299
|
py
|
def decimal_to_binary(d):
kharej_ghesmat = 1
baghi_mande = []
answer = ""
while kharej_ghesmat != 0 :
kharej_ghesmat = d // 2
baghi_mande.append(d % 2)
d = kharej_ghesmat
for i in baghi_mande:
answer = str(i) + answer
return answer
|
[
"noreply@github.com"
] |
AbolfazlAslani.noreply@github.com
|
6bfedf399151cd596fada9f893a54261433e2c76
|
69577168f63f53d4ca10898e9b18eec14b87c132
|
/song player/guicource.py
|
702654a854da59f86621bb3d42737963458249c0
|
[] |
no_license
|
ved-op/Music-player
|
5ac7f8b66a7bab9203246e9fa40c9954b7963eb8
|
8543bb84bb9e0a1b156393545ee7d89b15bcc700
|
refs/heads/main
| 2023-02-04T09:41:31.613162
| 2020-12-25T07:17:34
| 2020-12-25T07:17:34
| 324,310,019
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,485
|
py
|
from tkinter import *
from pygame import mixer
mixer.init()
def play(audio):
mixer.music.load(audio)
mixer.music.play(-1)
#create a window
root=Tk()
root.title("tkinter course")
root.geometry("600x700")
root.config(bg="#262626")
root.resizable(False,False)
lbl_title = Label(root,text="Please Choose the song",font=('arial',35,'bold'),bg='yellow',fg='red').pack(fill=X,pady=15,padx=10)
btn1=Button(root,padx=16,pady=16,fg="black", font=('Times New Roman', 15 ,'bold'),text="ghamand kar",bg="orange",command=lambda:play('ghamand.mpeg')).pack(pady=10,padx=10,)
btn2=Button(root,padx=16,pady=16,fg="black", font=('Times New Roman', 15 ,'bold'),text="chandigarh me",bg="orange",command=lambda:play('ghar.mpeg')).pack(pady=10,padx=10,)
btn3=Button(root,padx=16,pady=16,fg="black", font=('Times New Roman', 15 ,'bold'),text="hanuman chalisa",bg="orange",command=lambda:play('hanuman.mp3')).pack(pady=10,padx=10,)
btn4=Button(root,padx=16,pady=16,fg="black", font=('Times New Roman', 15 ,'bold'),text="achutam",bg="orange",command=lambda:play('achutam.mpeg')).pack(pady=10,padx=10,)
btn5=Button(root,padx=16,pady=16,fg="black", font=('Times New Roman', 15 ,'bold'),text="delhi de diya",bg="orange",command=lambda:play('mumbai.mpeg')).pack(pady=10,padx=10,)
btn6=Button(root,padx=16,pady=16,fg="black", font=('Times New Roman', 15 ,'bold'),text="shankara",bg="orange",command=lambda:play('shankara.mpeg')).pack(pady=10,padx=10,)
root.mainloop()
|
[
"noreply@github.com"
] |
ved-op.noreply@github.com
|
893256007045a8503bce3be751d3b018b761938e
|
c17ed27d8862b0adecff9dde55d4a8ad0be545db
|
/system_tests/test_framework.py
|
c1548a0cfdfd4275e037139105180af6947c8101
|
[] |
no_license
|
hussainsultan/mesos-distributed
|
72cb11e7728121139805899e8e1e38691ab69f17
|
88d9e840eed65f8fe26e53c639350049df49a4b2
|
refs/heads/master
| 2021-01-21T13:57:37.155233
| 2016-06-03T21:39:42
| 2016-06-03T21:39:42
| 47,972,700
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,119
|
py
|
import unittest
import time
from distributed import Executor
from expects import expect, equal
from framework.distributed_scheduler import DistributedScheduler
from framework.scheduler_driver import DistributedDriver
from system_tests.matchers.framework_matchers import have_activated_slaves, have_framework_name
from system_tests.support.mesos_cluster import MesosCluster
class TestSystem(unittest.TestCase):
def test_framework_runs(self):
with MesosCluster() as cluster:
time.sleep(2)
driver = DistributedDriver().create_driver(DistributedScheduler)
driver.start()
time.sleep(5)
expect(cluster).to(have_activated_slaves(1))
expect(cluster).to(have_framework_name('distributed-framework'))
# distributed test - this probably doesnt belong here
executor = Executor('127.0.0.1:8787')
A = executor.map(lambda x: x**2, range(10))
B = executor.map(lambda x: -x, A)
total = executor.submit(sum, B)
expect(total.result()).to(equal(-285))
driver.stop()
|
[
"hussainz@gmail.com"
] |
hussainz@gmail.com
|
3c46b7366cc5ea8f244b9d5f62ceca308f58388f
|
5dd82b41a5d4629d9353e196a43b78b66bbf5201
|
/pycoin/tx/script/Stack.py
|
a9720ca616ee0acad46582f84940bbca8d62c17c
|
[
"MIT"
] |
permissive
|
haobtc/pycoin
|
41009bd152fec29aa38a4e22f85049eb002e06ca
|
cba22aede3e7fc4edb742b8d6fe4d9aa198de556
|
refs/heads/master
| 2022-07-21T00:19:07.143110
| 2017-10-21T23:44:13
| 2017-10-21T23:44:13
| 109,953,071
| 1
| 2
|
MIT
| 2019-12-25T06:13:48
| 2017-11-08T09:12:09
|
Python
|
UTF-8
|
Python
| false
| false
| 1,641
|
py
|
"""
VM Stack data structure
The MIT License (MIT)
Copyright (c) 2017 by Richard Kiss
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from . import errno
from . import ScriptError
class Stack(list):
def pop(self, *args, **kwargs):
try:
return super(Stack, self).pop(*args, **kwargs)
except IndexError:
raise ScriptError("pop from empty stack", errno.INVALID_STACK_OPERATION)
def __getitem__(self, *args, **kwargs):
try:
return super(Stack, self).__getitem__(*args, **kwargs)
except IndexError:
raise ScriptError("getitem out of range", errno.INVALID_STACK_OPERATION)
|
[
"him@richardkiss.com"
] |
him@richardkiss.com
|
8a833d65aba89aad125270ef26e31edfcd8b121c
|
5be0d8a6cdfa7d5664cb750403ba0522da77967f
|
/app2.py
|
d7d9eae03ad3478fbb6159dbb2e1bbf8860ee5b5
|
[] |
no_license
|
Rajesh-mandal/predict-the-breast-cancer-result
|
f358827e8e4cf1923600220037e6bd31d5116504
|
567bb733a3ca12f9ffc7f599719746cff00aac22
|
refs/heads/master
| 2023-03-16T22:12:02.211324
| 2020-04-21T22:12:32
| 2020-04-21T22:12:32
| 257,723,766
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,476
|
py
|
import numpy as np
import pandas as pd
from flask import Flask, request, render_template
import pickle
app = Flask(__name__)
model = pickle.load(open('breast_cancer_detector.pkl', 'rb'))
@app.route('/')
def home():
return render_template('index.html')
@app.route('/predict',methods=['POST'])
def predict():
input_features = [float(x) for x in request.form.values()]
features_value = [np.array(input_features)]
features_name = ['mean radius', 'mean texture', 'mean perimeter', 'mean area',
'mean smoothness', 'mean compactness', 'mean concavity',
'mean concave points', 'mean symmetry', 'mean fractal dimension',
'radius error', 'texture error', 'perimeter error', 'area error',
'smoothness error', 'compactness error', 'concavity error',
'concave points error', 'symmetry error', 'fractal dimension error',
'worst radius', 'worst texture', 'worst perimeter', 'worst area',
'worst smoothness', 'worst compactness', 'worst concavity',
'worst concave points', 'worst symmetry', 'worst fractal dimension']
df = pd.DataFrame(features_value, columns=features_name)
output = model.predict(df)
if output == 0:
res_val = "** breast cancer **"
else:
res_val = "no breast cancer"
return render_template('index.html', prediction_text='Patient has {}'.format(res_val))
if __name__ == "__main__":
app.run()
|
[
"noreply@github.com"
] |
Rajesh-mandal.noreply@github.com
|
70f0f0e8bc68e514d1a62f1c3ef6de699f93c6f7
|
1fe0b680ce53bb3bb9078356ea2b25e572d9cfdc
|
/venv/lib/python2.7/site-packages/ansible/modules/cloud/azure/azure_rm_availabilityset_facts.py
|
aab729a6cf6ab48539d8af1e4d2915bbc618e22b
|
[
"MIT"
] |
permissive
|
otus-devops-2019-02/devopscourses_infra
|
1929c4a9eace3fdb0eb118bf216f3385fc0cdb1c
|
e42e5deafce395af869084ede245fc6cff6d0b2c
|
refs/heads/master
| 2020-04-29T02:41:49.985889
| 2019-05-21T06:35:19
| 2019-05-21T06:35:19
| 175,780,457
| 0
| 1
|
MIT
| 2019-05-21T06:35:20
| 2019-03-15T08:35:54
|
HCL
|
UTF-8
|
Python
| false
| false
| 4,695
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2016, Julien Stroheker <juliens@microsoft.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_availabilityset_facts
version_added: "2.4"
short_description: Get availability set facts.
description:
- Get facts for a specific availability set or all availability sets.
options:
name:
description:
- Limit results to a specific availability set
resource_group:
description:
- The resource group to search for the desired availability set
tags:
description:
- List of tags to be matched
extends_documentation_fragment:
- azure
author:
- "Julien Stroheker (@julienstroheker)"
'''
EXAMPLES = '''
- name: Get facts for one availability set
azure_rm_availabilityset_facts:
name: Testing
resource_group: myResourceGroup
- name: Get facts for all availability sets in a specific resource group
azure_rm_availabilityset_facts:
resource_group: myResourceGroup
'''
RETURN = '''
azure_availabilityset:
description: List of availability sets dicts.
returned: always
type: list
example: [{
"location": "eastus2",
"name": "myavailabilityset",
"properties": {
"platformFaultDomainCount": 3,
"platformUpdateDomainCount": 2,
"virtualMachines": []
},
"sku": "Aligned",
"type": "Microsoft.Compute/availabilitySets"
}]
'''
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
try:
from msrestazure.azure_exceptions import CloudError
except Exception:
# handled in azure_rm_common
pass
AZURE_OBJECT_CLASS = 'AvailabilitySet'
class AzureRMAvailabilitySetFacts(AzureRMModuleBase):
"""Utility class to get availability set facts"""
def __init__(self):
self.module_args = dict(
name=dict(type='str'),
resource_group=dict(type='str'),
tags=dict(type='list')
)
self.results = dict(
changed=False,
ansible_facts=dict(
azure_availabilitysets=[]
)
)
self.name = None
self.resource_group = None
self.tags = None
super(AzureRMAvailabilitySetFacts, self).__init__(
derived_arg_spec=self.module_args,
supports_tags=False,
facts_module=True
)
def exec_module(self, **kwargs):
for key in self.module_args:
setattr(self, key, kwargs[key])
if self.name and not self.resource_group:
self.fail("Parameter error: resource group required when filtering by name.")
if self.name:
self.results['ansible_facts']['azure_availabilitysets'] = self.get_item()
else:
self.results['ansible_facts']['azure_availabilitysets'] = self.list_items()
return self.results
def get_item(self):
"""Get a single availability set"""
self.log('Get properties for {0}'.format(self.name))
item = None
result = []
try:
item = self.compute_client.availability_sets.get(self.resource_group, self.name)
except CloudError:
pass
if item and self.has_tags(item.tags, self.tags):
avase = self.serialize_obj(item, AZURE_OBJECT_CLASS)
avase['name'] = item.name
avase['type'] = item.type
avase['sku'] = item.sku.name
result = [avase]
return result
def list_items(self):
"""Get all availability sets"""
self.log('List all availability sets')
try:
response = self.compute_client.availability_sets.list(self.resource_group)
except CloudError as exc:
self.fail('Failed to list all items - {0}'.format(str(exc)))
results = []
for item in response:
if self.has_tags(item.tags, self.tags):
avase = self.serialize_obj(item, AZURE_OBJECT_CLASS)
avase['name'] = item.name
avase['type'] = item.type
avase['sku'] = item.sku.name
results.append(avase)
return results
def main():
"""Main module execution code path"""
AzureRMAvailabilitySetFacts()
if __name__ == '__main__':
main()
|
[
"skydevapp@gmail.com"
] |
skydevapp@gmail.com
|
d336a1532cc9bd3a3f8a783f7d0fe0d8fcd1ed5a
|
9b7e4ac45d454c26a572e9a059f04792017b9028
|
/apps/employees/migrations/0001_initial.py
|
a3a6f420e17fdc6f4434a4cf7df10220e56d717a
|
[] |
no_license
|
konradsofton/basic-django-setup
|
f3300fa4e5bc7ce8fdc147b3dd0d9544e6bf9442
|
3a10a10ee9d860e36e274e32b4e32d88876245d4
|
refs/heads/master
| 2020-07-28T08:38:18.697378
| 2019-09-18T17:34:49
| 2019-09-18T17:34:49
| 209,367,573
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 862
|
py
|
# Generated by Django 2.2.5 on 2019-09-18 15:51
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='EmployeeModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('lname', models.CharField(max_length=255)),
('created_at', models.DateTimeField(blank=True, default=django.utils.timezone.now)),
('updated_at', models.DateTimeField(blank=True, default=django.utils.timezone.now)),
],
options={
'db_table': 'employee',
},
),
]
|
[
"kjimenez@softonitg.com"
] |
kjimenez@softonitg.com
|
55a82f69a588e3653988ca5cbc8a7f89b73eeb30
|
a2ea48005742e4eb70c3df1c2df588c9cfe160e3
|
/abong/manage.py
|
b3d6f98f92979a956fd4270c28b4cb8384816541
|
[
"MIT"
] |
permissive
|
matheusdemicheli/tcc
|
fa79257fe1dc7cf989f9cb9302271919906e2b2a
|
3d85db8c68b01d1a4e55cb311dcad552a60513fd
|
refs/heads/master
| 2020-12-30T12:34:49.937308
| 2017-07-04T15:30:59
| 2017-07-04T15:30:59
| 91,387,743
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 803
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "abong.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
[
"matheusdemicheli@gmail.com"
] |
matheusdemicheli@gmail.com
|
789ae31427a108126e1be34f17aba9e2d1a4469c
|
89199143b2e566190458cabfc428d107528a5e91
|
/tests/test_symap.py
|
46ff01e02a040c08685658bc32737528ce4deffe
|
[] |
no_license
|
msrocka/pslink
|
b7e650e500a4921413000d4e5ed8f89849e4e556
|
99bce2bb09b2dd553af308d56361d32b46ee59f7
|
refs/heads/master
| 2022-03-08T22:25:11.154473
| 2019-11-12T16:51:49
| 2019-11-12T16:51:49
| 180,141,790
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,164
|
py
|
import unittest
import pslink.symap as symap
class SymapTest(unittest.TestCase):
def test_stopwords(self):
self.assertTrue(symap.is_stopword("at"))
self.assertFalse(symap.is_stopword("steel"))
def test_similarity(self):
self.assertAlmostEqual(1.0, symap.similarity("steel", "steel"))
self.assertAlmostEqual(0.0, symap.similarity("steel", "car"))
def test_keywords(self):
p = "Steel product, secondary structural, girts and purlins, at plant"
expected = ["steel", "product", "secondary", "structural",
"girts", "purlins", "plant"]
r = symap.keywords(p)
self.assertEqual(len(r), len(expected))
for e in expected:
self.assertTrue(e in r)
def test_best_match(self):
match = symap.best_match("stainless steel", [
"World Stainless Steel. 2005. World Stainless Steel LCI",
"Steel, stainless 304",
"steel, generic",
"Stainless steel; Manufacture; Production mix, at plant; 316 2B"])
self.assertEqual(match, "Steel, stainless 304")
if __name__ == "__main__":
unittest.main()
|
[
"michael.srocka@gmail.com"
] |
michael.srocka@gmail.com
|
79ada77352538c72e1c32df63bc6464785ce8e76
|
119365cbb6e632b4fc01baf627fb345a334fe6d8
|
/pizza_app/admin.py
|
5b99c57b48972d26129f0dbff21286674470fb61
|
[] |
no_license
|
Pythpiyush/Ezyschooling_Intern_Task
|
fa2a387951e6313e87411ba34e8e3e04f4fbfe68
|
f89682ed4804b442a0f757dc475f92990b7ca1b6
|
refs/heads/master
| 2023-07-23T00:26:19.088521
| 2021-09-02T12:27:20
| 2021-09-02T12:27:20
| 402,406,827
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 118
|
py
|
from django.contrib import admin
from .models import Piz_Mod
# Register your models here.
admin.site.register(Piz_Mod)
|
[
"piyushagrawal111@ms.com"
] |
piyushagrawal111@ms.com
|
6ee93adf49a11d96f1fc6e51ebf370ecf779b5c0
|
7ecaab8f6c74e8f01d4db2655e8bdf84a09331c2
|
/venv/bin/easy_install
|
e0b2108645b518774b5693911765c372f2d5b4b6
|
[] |
no_license
|
zhouyapengzi/apollo_analyze_tools
|
11577ef8c2316e2bf89f01309665e10c3e31b0ad
|
f6ecf2421c1d63c3ae492d3a3bb87e47a9386aba
|
refs/heads/master
| 2022-11-25T18:48:05.554694
| 2020-07-28T16:07:39
| 2020-07-28T16:07:39
| 273,553,845
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 442
|
#!/Users/zipeng/Projects/MyProject/tools/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install')()
)
|
[
"zi_peng@encs.concordia.ca"
] |
zi_peng@encs.concordia.ca
|
|
1852c0762c5bdea1a2e6541b37699de565be8f1c
|
5b7fd3672ca2ec2b91a21b946325de1ed62be6e8
|
/python/spinn/data/snli/load_snli_data.py
|
0ff2942d001c4e021cd2981684e169540dd0a42e
|
[
"Apache-2.0"
] |
permissive
|
VikingMew/spinn
|
8e7472dbc6bf2f088629896dccca4328b5be76c2
|
e52f9a680cda28d36b32e15ee4b381912fbaccf4
|
refs/heads/master
| 2021-01-15T20:29:36.938472
| 2016-06-14T23:42:18
| 2016-06-14T23:42:18
| 61,280,747
| 2
| 0
| null | 2016-06-16T09:39:09
| 2016-06-16T09:39:07
| null |
UTF-8
|
Python
| false
| false
| 1,465
|
py
|
#!/usr/bin/env python
import json
SENTENCE_PAIR_DATA = True
LABEL_MAP = {
"entailment": 0,
"neutral": 1,
"contradiction": 2
}
def convert_binary_bracketing(parse):
transitions = []
tokens = []
for word in parse.split(' '):
if word[0] != "(":
if word == ")":
transitions.append(1)
else:
# Downcase all words to match GloVe.
tokens.append(word.lower())
transitions.append(0)
return tokens, transitions
def load_data(path):
print "Loading", path
examples = []
with open(path, 'r') as f:
for line in f:
loaded_example = json.loads(line)
if loaded_example["gold_label"] not in LABEL_MAP:
continue
example = {}
example["label"] = loaded_example["gold_label"]
example["premise"] = loaded_example["sentence1"]
example["hypothesis"] = loaded_example["sentence2"]
(example["premise_tokens"], example["premise_transitions"]) = convert_binary_bracketing(loaded_example["sentence1_binary_parse"])
(example["hypothesis_tokens"], example["hypothesis_transitions"]) = convert_binary_bracketing(loaded_example["sentence2_binary_parse"])
examples.append(example)
return examples, None
if __name__ == "__main__":
# Demo:
examples = load_data('snli-data/snli_1.0_dev.jsonl')
print examples[0]
|
[
"sbowman@stanford.edu"
] |
sbowman@stanford.edu
|
f3d8f00ed640a83a5927278ffa43a11380162c80
|
52d15a041042284259a9b0793ec3d182a5024b67
|
/_site/scripts/collection_data.py
|
861eee6c0f9a1097d96c51d94fa29e82121fbca0
|
[] |
no_license
|
MLBazaar/mlbazaar.github.io
|
745703b5e262a1d2b804caee870727554dbf8d74
|
3f9971508fe140a841f5ae3687a237a3a1285ca1
|
refs/heads/master
| 2022-11-06T14:47:53.785857
| 2021-10-28T14:40:04
| 2021-10-28T14:40:04
| 158,607,143
| 3
| 1
| null | 2022-10-06T03:21:25
| 2018-11-21T21:17:19
|
HTML
|
UTF-8
|
Python
| false
| false
| 4,004
|
py
|
import os
import sys
import glob
import errno
import operator
import json
import codecs
from urllib.parse import urlparse
PATH_TO_DATA = "_data"
PATH_TO_LIBRARY = 'data-library/'
def split_last(s, c):
words = s.split(c)
return words[len(words) - 1]
def extract_domain_from_url(url):
parsed_uri = urlparse(url)
domain = '{uri.netloc}'.format(uri=parsed_uri)
domain = domain.replace("www.", "")
#result = '{uri.scheme}://{uri.netloc}/'.format(uri=parsed_uri)
return domain
# GENERATE TEMPLATE FILE
PATH_TO_DATASET = "dataset"
URL_DATASET = "/dataset"
def write_template_file(file_path, layout, permalink, title, options={}):
if not os.path.exists(os.path.dirname(file_path)):
os.makedirs(os.path.dirname(file_path))
f = codecs.open(file_path, "w+", "utf-8")
f.write("---\n")
f.write("layout: '{0}'\n".format(layout))
f.write("permalink: '{0}'\n".format(permalink))
f.write("title: '{0}'\n".format(title))
for keyField in options:
f.write(str(keyField) + ": '" + options[keyField] + "'\n")
f.write("---\n")
f.close()
DATASET = {}
for subdir, dirs, files in os.walk(PATH_TO_LIBRARY):
try:
item = {}
for filename in files:
file = subdir + '/' + filename
if os.path.isfile(file):
with open(file) as f: # No need to specify 'r': this is the default.
# datasetDoc
if filename == 'datasetDoc.json':
item["datasetDoc"] = json.load(f)
# pipeline
if filename == 'best_pipeline.json':
item["pipeline"] = json.load(f)
# problemDoc
if filename == 'problemDoc.json':
item["problemDoc"] = json.load(f)
if bool(item):
# set path to detail page
name = split_last(subdir, '/')
item["dataset_path"] = name.replace("_", "-")
DATASET[name] = item
except IOError as exc:
if exc.errno != errno.EISDIR:
raise "Error when load data"
# Save to _data directory
file_path = PATH_TO_DATA + "/" + "datasets.json"
with open(file_path, "w+") as f:
json.dump(DATASET, f)
print("LOG: Saved datasets to", file_path)
# Extract Domain from URL
LIST_DOMAIN = []
for dataset_name in DATASET:
data = DATASET[dataset_name]
sourceURI = data["datasetDoc"]["about"]["sourceURI"]
if sourceURI:
domain = extract_domain_from_url(sourceURI)
if domain not in LIST_DOMAIN:
LIST_DOMAIN.append(domain)
# Save to _data directory
file_path = PATH_TO_DATA + "/" + "domains.json"
with open(file_path, "w+") as f:
json.dump(LIST_DOMAIN, f)
print("LOG: Saved domains to", file_path)
# Task Type
LIST_TASKTYPE = []
for dataset_name in DATASET:
data = DATASET[dataset_name]
task_type = data["pipeline"]["loader"]["task_type"]
if task_type and task_type not in LIST_TASKTYPE:
LIST_TASKTYPE.append(task_type)
# Save to _data directory
file_path = PATH_TO_DATA + "/" + "tasktype.json"
with open(file_path, "w+") as f:
json.dump(LIST_TASKTYPE, f)
print("LOG: Saved task_type to", file_path)
# Data Type
LIST_DATATYPE = []
for dataset_name in DATASET:
data = DATASET[dataset_name]
data_type = data["pipeline"]["loader"]["data_modality"]
if data_type and data_type not in LIST_DATATYPE:
LIST_DATATYPE.append(data_type)
# Save to _data directory
file_path = PATH_TO_DATA + "/" + "datatype.json"
with open(file_path, "w+") as f:
json.dump(LIST_DATATYPE, f)
print("LOG: Saved data_type to", file_path)
# Generate template for detail dataset
for datasetID in DATASET:
data = DATASET[datasetID]
dataset_path = data["dataset_path"]
datasetName = data['problemDoc']['_id']
if dataset_path:
detail_path = PATH_TO_DATASET + "/" + dataset_path + ".md"
layout = "detail"
permalink = PATH_TO_DATASET + "/" + dataset_path
title = datasetName.capitalize()
options = {"datasetID": datasetID}
write_template_file(detail_path, layout, permalink, title, options)
|
[
"tai.pham@hdwebsoft.co"
] |
tai.pham@hdwebsoft.co
|
50f13f2b8949f3724c12ab6100cbbe40fa915d45
|
52e84a20e898345a596311f0c78a8c02adc7262e
|
/venv/bin/pip
|
2eed03cac0b4adf31317a6b4ae54027c92bc25fa
|
[] |
no_license
|
sumit-saurabh/ltv
|
70b8fc90d64695754a8bf5d4f2ef166219618607
|
d5082d149fb8a3d2e024de0734a0adf8a723074b
|
refs/heads/master
| 2022-12-30T13:22:54.813954
| 2020-09-25T08:41:06
| 2020-09-25T08:41:06
| 298,218,428
| 0
| 1
| null | 2020-09-24T12:18:04
| 2020-09-24T08:37:20
|
Python
|
UTF-8
|
Python
| false
| false
| 262
|
#!/data/workspace/sumit.saurabh/hackathon/galaxy/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"sumit.saurabh@bounceshare.com"
] |
sumit.saurabh@bounceshare.com
|
|
aebef1fff76ee0c51fa95840257749b0f61b431f
|
f1c5c43e905ebc04899da51d08ff754acc19a586
|
/preprocessing.py
|
3f91e3662c3badbcc82468b706a196d76f73320d
|
[] |
no_license
|
SarahFLestari/Merchant-and-Bank-Mining
|
5524339a82cf5a06791e00b411a8e22c229052db
|
090920c971a9b8cd2dbce35942dde3a48f7ae11e
|
refs/heads/main
| 2022-12-20T11:47:32.810835
| 2020-10-07T01:57:55
| 2020-10-07T01:57:55
| 300,483,569
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,264
|
py
|
import os
from xml.dom import minidom
import xlsxwriter
location = os.getcwd()
counter = 0
trainfiles = []
otherfiles = []
print(location)
workbook = xlsxwriter.Workbook('datatrainpre.xlsx')
worksheet = workbook.add_worksheet()
worksheet.set_column('A:A',20)
for file in os.listdir(location):
try:
if file.endswith(".xml"):
#print "txt file found:\t", file
trainfiles.append(str(file))
counter = counter+1
else:
otherfiles.append(file)
counter = counter+1
except Exception as e:
raise e
print "No files found here!"
count = 0
worksheet.write(0,0,'Berita')
worksheet.write(0,1,'File')
trainfiles.sort()
for i in trainfiles:
print(i)
for xmlfile in trainfiles:
xmldoc = minidom.parse(xmlfile)
p = xmldoc.getElementsByTagName("p")[1]
par = xmldoc.getElementsByTagName("p")
paragraph = " "
for i in range(0,len(par)):
p = xmldoc.getElementsByTagName("p")[i]
paragraph += p.childNodes[0].data
worksheet.write(count+1,0,paragraph)
worksheet.write(count+1,1,str(trainfiles[count]).replace('.xml',''))
#print(p.childNodes[0].data)
#print("Isi tag : ",p.childNodes[0].data)
count +=1
print "Total files found:\t", counter
workbook.close()
|
[
"sarahfauziah17@gmail.com"
] |
sarahfauziah17@gmail.com
|
9e51efdc26412f7a5bd036226469eeed706a488c
|
83de24182a7af33c43ee340b57755e73275149ae
|
/aliyun-python-sdk-sas/aliyunsdksas/request/v20181203/ListVulAutoRepairConfigRequest.py
|
8914a8dab27048a9245aee71264c164922cb1833
|
[
"Apache-2.0"
] |
permissive
|
aliyun/aliyun-openapi-python-sdk
|
4436ca6c57190ceadbc80f0b1c35b1ab13c00c7f
|
83fd547946fd6772cf26f338d9653f4316c81d3c
|
refs/heads/master
| 2023-08-04T12:32:57.028821
| 2023-08-04T06:00:29
| 2023-08-04T06:00:29
| 39,558,861
| 1,080
| 721
|
NOASSERTION
| 2023-09-14T08:51:06
| 2015-07-23T09:39:45
|
Python
|
UTF-8
|
Python
| false
| false
| 2,002
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdksas.endpoint import endpoint_data
class ListVulAutoRepairConfigRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Sas', '2018-12-03', 'ListVulAutoRepairConfig')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_CurrentPage(self): # Integer
return self.get_query_params().get('CurrentPage')
def set_CurrentPage(self, CurrentPage): # Integer
self.add_query_param('CurrentPage', CurrentPage)
def get_Type(self): # String
return self.get_query_params().get('Type')
def set_Type(self, Type): # String
self.add_query_param('Type', Type)
def get_AliasName(self): # String
return self.get_query_params().get('AliasName')
def set_AliasName(self, AliasName): # String
self.add_query_param('AliasName', AliasName)
def get_PageSize(self): # Integer
return self.get_query_params().get('PageSize')
def set_PageSize(self, PageSize): # Integer
self.add_query_param('PageSize', PageSize)
|
[
"sdk-team@alibabacloud.com"
] |
sdk-team@alibabacloud.com
|
b3bd51e7239261a5e940aee2d21bdc3eafc90bf0
|
72a146dad10c3330548f175643822e6cc2e2ccba
|
/ui/events/events.gyp
|
c2692a69bcfc92adc1fbb5e3e9b9f532c8d8d4d9
|
[
"BSD-3-Clause"
] |
permissive
|
daotianya/browser-android-tabs
|
bb6772394c2138e2f3859a83ec6e0860d01a6161
|
44e83a97eb1c7775944a04144e161d99cbb7de5b
|
refs/heads/master
| 2020-06-10T18:07:58.392087
| 2016-12-07T15:37:13
| 2016-12-07T15:37:13
| 75,914,703
| 1
| 0
| null | 2016-12-08T07:37:51
| 2016-12-08T07:37:51
| null |
UTF-8
|
Python
| false
| false
| 13,460
|
gyp
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'variables': {
'chromium_code': 1,
},
'targets': [
{
# GN version: //ui/events:dom_keycode_converter
'target_name': 'dom_keycode_converter',
'type': 'static_library',
'dependencies': [
'<(DEPTH)/base/base.gyp:base',
],
'sources': [
# Note: sources list duplicated in GN build.
'keycodes/dom/dom_code.h',
'keycodes/dom/dom_key.h',
'keycodes/dom/dom_key_data.inc',
'keycodes/dom/keycode_converter.cc',
'keycodes/dom/keycode_converter.h',
'keycodes/dom/keycode_converter_data.inc',
],
},
{
# GN version: //ui/events:events_base
'target_name': 'events_base',
'type': '<(component)',
'dependencies': [
'<(DEPTH)/base/base.gyp:base',
'<(DEPTH)/base/third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations',
'../gfx/gfx.gyp:gfx_geometry',
'dom_keycode_converter',
],
'defines': [
'EVENTS_BASE_IMPLEMENTATION',
],
'sources': [
# Note: sources list duplicated in GN build.
'android/scroller.cc',
'android/scroller.h',
'base_event_utils.cc',
'base_event_utils.h',
'event_constants.h',
'event_switches.cc',
'event_switches.h',
'events_base_export.h',
'gesture_curve.h',
'gesture_event_details.cc',
'gesture_event_details.h',
'gestures/fling_curve.cc',
'gestures/fling_curve.h',
'keycodes/dom_us_layout_data.h',
'keycodes/keyboard_code_conversion.cc',
'keycodes/keyboard_code_conversion.h',
'keycodes/keyboard_code_conversion_android.cc',
'keycodes/keyboard_code_conversion_android.h',
'keycodes/keyboard_code_conversion_mac.h',
'keycodes/keyboard_code_conversion_mac.mm',
'keycodes/keyboard_code_conversion_win.cc',
'keycodes/keyboard_code_conversion_win.h',
'keycodes/keyboard_codes.h',
'latency_info.cc',
'latency_info.h',
],
'export_dependent_settings': [
'../../ui/gfx/gfx.gyp:gfx_geometry',
],
'conditions': [
['use_x11==1', {
'dependencies': [
'keycodes/events_keycodes.gyp:keycodes_x11',
],
}],
],
},
{
# GN version: //ui/events
'target_name': 'events',
'type': '<(component)',
'dependencies': [
'<(DEPTH)/base/base.gyp:base',
'<(DEPTH)/base/third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations',
'<(DEPTH)/skia/skia.gyp:skia',
'../display/display.gyp:display',
'../gfx/gfx.gyp:gfx',
'../gfx/gfx.gyp:gfx_geometry',
'dom_keycode_converter',
'events_base',
'gesture_detection',
],
'defines': [
'EVENTS_IMPLEMENTATION',
],
'sources': [
# Note: sources list duplicated in GN build.
'cocoa/cocoa_event_utils.h',
'cocoa/cocoa_event_utils.mm',
'cocoa/events_mac.mm',
'event.cc',
'event.h',
'event_dispatcher.cc',
'event_dispatcher.h',
'event_handler.cc',
'event_handler.h',
'event_processor.cc',
'event_processor.h',
'event_rewriter.h',
'event_source.cc',
'event_source.h',
'event_target.cc',
'event_target.h',
'event_target_iterator.h',
'event_targeter.h',
'event_utils.cc',
'event_utils.h',
'events_export.h',
'events_stub.cc',
'gestures/gesture_provider_aura.cc',
'gestures/gesture_provider_aura.h',
'gestures/gesture_recognizer.h',
'gestures/gesture_recognizer_impl.cc',
'gestures/gesture_recognizer_impl.h',
'gestures/gesture_recognizer_impl_mac.cc',
'gestures/gesture_types.h',
'gestures/motion_event_aura.cc',
'gestures/motion_event_aura.h',
'keycodes/platform_key_map_win.cc',
'keycodes/platform_key_map_win.h',
'null_event_targeter.cc',
'null_event_targeter.h',
'scoped_target_handler.cc',
'scoped_target_handler.h',
'ozone/events_ozone.cc',
'win/events_win.cc',
'win/system_event_state_lookup.cc',
'win/system_event_state_lookup.h',
'x/events_x.cc',
],
'conditions': [
['use_x11==1', {
'dependencies': [
'../../build/linux/system.gyp:x11',
'../gfx/x/gfx_x11.gyp:gfx_x11',
'devices/events_devices.gyp:events_devices',
'devices/x11/events_devices_x11.gyp:events_devices_x11',
'keycodes/events_keycodes.gyp:keycodes_x11',
'x/events_x.gyp:events_x',
],
}],
['use_aura==0', {
'sources!': [
'gestures/gesture_provider_aura.cc',
'gestures/gesture_provider_aura.h',
'gestures/gesture_recognizer.h',
'gestures/gesture_recognizer_impl.cc',
'gestures/gesture_recognizer_impl.h',
'gestures/gesture_types.h',
'gestures/motion_event_aura.cc',
'gestures/motion_event_aura.h',
],
}],
['use_ozone==1 or (OS=="android" and use_aura==1)', {
'sources': [
'events_default.cc',
],
}],
# We explicitly enumerate the platforms we _do_ provide native cracking
# for here.
['OS=="win" or OS=="mac" or use_x11==1 or use_ozone==1 or (OS=="android" and use_aura==1)', {
'sources!': [
'events_stub.cc',
],
}],
['use_ozone==1', {
'dependencies': [
'ozone/events_ozone.gyp:events_ozone_layout',
],
}],
['OS=="android"', {
'sources': [
'android/motion_event_android.cc',
'android/motion_event_android.h',
'android/key_event_utils.cc',
'android/key_event_utils.h',
],
'dependencies': [
'motionevent_jni_headers',
'keyevent_jni_headers',
],
}],
],
},
{
# GN version: //ui/events/gestures/blink
'target_name': 'gestures_blink',
'type': 'static_library',
'dependencies': [
'../../base/base.gyp:base',
'../../third_party/WebKit/public/blink_headers.gyp:blink_headers',
'../gfx/gfx.gyp:gfx_geometry',
'events',
'gesture_detection',
],
'sources': [
# Note: sources list duplicated in GN build.
'gestures/blink/web_gesture_curve_impl.cc',
'gestures/blink/web_gesture_curve_impl.h',
],
},
{
# GN version: //ui/events:gesture_detection
'target_name': 'gesture_detection',
'type': '<(component)',
'dependencies': [
'<(DEPTH)/base/base.gyp:base',
'<(DEPTH)/base/third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations',
'../display/display.gyp:display',
'../gfx/gfx.gyp:gfx',
'../gfx/gfx.gyp:gfx_geometry',
'events_base',
],
'defines': [
'GESTURE_DETECTION_IMPLEMENTATION',
],
'sources': [
# Note: sources list duplicated in GN build.
'gesture_detection/bitset_32.h',
'gesture_detection/filtered_gesture_provider.cc',
'gesture_detection/filtered_gesture_provider.h',
'gesture_detection/gesture_configuration.cc',
'gesture_detection/gesture_configuration.h',
'gesture_detection/gesture_configuration_android.cc',
'gesture_detection/gesture_configuration_aura.cc',
'gesture_detection/gesture_detection_export.h',
'gesture_detection/gesture_detector.cc',
'gesture_detection/gesture_detector.h',
'gesture_detection/gesture_event_data.cc',
'gesture_detection/gesture_event_data.h',
'gesture_detection/gesture_event_data_packet.cc',
'gesture_detection/gesture_event_data_packet.h',
'gesture_detection/gesture_listeners.cc',
'gesture_detection/gesture_listeners.h',
'gesture_detection/gesture_provider.cc',
'gesture_detection/gesture_provider.h',
'gesture_detection/gesture_provider_config_helper.cc',
'gesture_detection/gesture_provider_config_helper.h',
'gesture_detection/gesture_touch_uma_histogram.cc',
'gesture_detection/gesture_touch_uma_histogram.h',
'gesture_detection/motion_event.cc',
'gesture_detection/motion_event.h',
'gesture_detection/motion_event_buffer.cc',
'gesture_detection/motion_event_buffer.h',
'gesture_detection/motion_event_generic.cc',
'gesture_detection/motion_event_generic.h',
'gesture_detection/scale_gesture_detector.cc',
'gesture_detection/scale_gesture_detector.h',
'gesture_detection/scale_gesture_listeners.cc',
'gesture_detection/scale_gesture_listeners.h',
'gesture_detection/snap_scroll_controller.cc',
'gesture_detection/snap_scroll_controller.h',
'gesture_detection/touch_disposition_gesture_filter.cc',
'gesture_detection/touch_disposition_gesture_filter.h',
'gesture_detection/velocity_tracker.cc',
'gesture_detection/velocity_tracker.h',
'gesture_detection/velocity_tracker_state.cc',
'gesture_detection/velocity_tracker_state.h',
],
'conditions': [
['use_aura!=1 and OS!="android"', {
'sources': [
'gesture_detection/gesture_configuration_default.cc',
],
}],
],
},
{
# GN version: //ui/events/ipc:events_ipc
'target_name': 'events_ipc',
'type': '<(component)',
'dependencies': [
'<(DEPTH)/base/base.gyp:base',
'<(DEPTH)/ipc/ipc.gyp:ipc',
'../gfx/gfx.gyp:gfx_geometry',
'../gfx/ipc/geometry/gfx_ipc_geometry.gyp:gfx_ipc_geometry',
'events_base',
],
'defines': [
'EVENTS_IPC_IMPLEMENTATION',
],
'sources': [
'ipc/latency_info_param_traits.cc',
'ipc/latency_info_param_traits.h',
'ipc/latency_info_param_traits_macros.h',
],
},
{
# GN version: //ui/events:test_support
'target_name': 'events_test_support',
'type': 'static_library',
'dependencies': [
'<(DEPTH)/base/base.gyp:base',
'<(DEPTH)/skia/skia.gyp:skia',
'../gfx/gfx.gyp:gfx',
'../gfx/gfx.gyp:gfx_geometry',
'events',
'events_base',
'gesture_detection',
'platform/events_platform.gyp:events_platform',
],
'sources': [
# Note: sources list duplicated in GN build.
'test/cocoa_test_event_utils.h',
'test/cocoa_test_event_utils.mm',
'test/device_data_manager_test_api.h',
'test/event_generator.cc',
'test/event_generator.h',
'test/events_test_utils.cc',
'test/events_test_utils.h',
'test/events_test_utils_x11.cc',
'test/events_test_utils_x11.h',
'test/motion_event_test_utils.cc',
'test/motion_event_test_utils.h',
'test/platform_event_source_test_api.cc',
'test/platform_event_source_test_api.h',
'test/platform_event_waiter.cc',
'test/platform_event_waiter.h',
'test/test_event_handler.cc',
'test/test_event_handler.h',
'test/test_event_processor.cc',
'test/test_event_processor.h',
'test/test_event_target.cc',
'test/test_event_target.h',
'test/test_event_targeter.cc',
'test/test_event_targeter.h',
],
'conditions': [
['OS=="ios"', {
# The cocoa files don't apply to iOS.
'sources/': [['exclude', 'cocoa']],
}],
['use_x11==1', {
'dependencies': [
'devices/x11/events_devices_x11.gyp:events_devices_x11',
'keycodes/events_keycodes.gyp:keycodes_x11',
],
}],
['use_x11==1', {
'dependencies': [
'x/events_x.gyp:events_x',
],
}],
['use_x11==1 or use_ozone==1', {
'sources' : [
'test/device_data_manager_test_api_impl.cc',
],
'dependencies': [
'devices/events_devices.gyp:events_devices',
],
}, { # else use_x11=1 or use_ozone=1
'sources' : [
'test/device_data_manager_test_api_stub.cc',
]
}],
],
},
],
'conditions': [
['OS == "android"', {
'targets': [
{
'target_name': 'motionevent_jni_headers',
'type': 'none',
'variables': {
'jni_gen_package': 'ui',
'input_java_class': 'android/view/MotionEvent.class',
},
'includes': [ '../../build/jar_file_jni_generator.gypi' ],
},
{
'target_name': 'keyevent_jni_headers',
'type': 'none',
'variables': {
'jni_gen_package': 'ui',
'input_java_class': 'android/view/KeyEvent.class',
},
'includes': [ '../../build/jar_file_jni_generator.gypi' ],
},
],
}],
],
}
|
[
"serg.zhukovsky@gmail.com"
] |
serg.zhukovsky@gmail.com
|
8b2e95fe8a7669b13070f9c8b42e9a1eb2454dbd
|
0e24b45eedb8166e5da1f07ebd935cc77e718523
|
/project_views/project_views/settings.py
|
f773c2c0d7f69ec6c98f574c60f0f900fb0f9aab
|
[] |
no_license
|
woka20/DJANGO_MVC
|
ce17724f248ad86364843a21203ef0f2f0775f34
|
cfbbf8491e97caed593561979db3756aa478022c
|
refs/heads/master
| 2020-09-28T12:26:12.741319
| 2019-12-10T16:51:26
| 2019-12-10T16:51:26
| 226,776,331
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,141
|
py
|
"""
Django settings for project_views project.
Generated by 'django-admin startproject' using Django 3.0.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 't8+2br$ov*sqltfm4n-$n*=4&#l5yhk#r6^sv!lbjq-3!ndjc7'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'Alterra.apps.AlterraConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'project_views.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'project_views.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
|
[
"woka@alterra.id"
] |
woka@alterra.id
|
524c77805ed66ca0c79e0d50cfffef081ef2319b
|
26b5053c5581b15571ffcedf9eae58da831f2e12
|
/ledfx/api/presets.py
|
878dc5a466933f0a938d8aa87f20e36ac3bb4b85
|
[
"MIT"
] |
permissive
|
camcs1/LedFx
|
a9453ab4309865cea09c5b28f6e92f7ce1eed452
|
1dff9e64a40d219cb6a87c2212e2e40ca8513735
|
refs/heads/master
| 2021-07-15T15:15:57.320084
| 2021-03-05T22:41:18
| 2021-03-05T22:41:18
| 238,537,668
| 0
| 0
|
MIT
| 2020-02-05T20:04:14
| 2020-02-05T20:04:09
| null |
UTF-8
|
Python
| false
| false
| 5,591
|
py
|
from ledfx.config import save_config
from ledfx.api import RestEndpoint
from ledfx.utils import generate_id
from aiohttp import web
import logging
import json
_LOGGER = logging.getLogger(__name__)
class PresetsEndpoint(RestEndpoint):
"""REST end-point for querying and managing presets"""
ENDPOINT_PATH = "/api/presets"
async def get(self) -> web.Response:
"""Get all presets"""
response = {
'status' : 'success' ,
'presets' : self._ledfx.config['presets']
}
return web.Response(text=json.dumps(response), status=200)
async def delete(self, request) -> web.Response:
"""Delete a preset"""
data = await request.json()
preset_id = data.get('id')
if preset_id is None:
response = { 'status' : 'failed', 'reason': 'Required attribute "preset_id" was not provided' }
return web.Response(text=json.dumps(response), status=500)
if not preset_id in self._ledfx.config['presets'].keys():
response = { 'status' : 'failed', 'reason': 'Preset {} does not exist'.format(preset_id) }
return web.Response(text=json.dumps(response), status=500)
# Delete the preset from configuration
del self._ledfx.config['presets'][preset_id]
# Save the config
save_config(
config = self._ledfx.config,
config_dir = self._ledfx.config_dir)
response = { 'status' : 'success' }
return web.Response(text=json.dumps(response), status=200)
async def put(self, request) -> web.Response:
"""Activate a preset"""
data = await request.json()
action = data.get('action')
if action is None:
response = { 'status' : 'failed', 'reason': 'Required attribute "action" was not provided' }
return web.Response(text=json.dumps(response), status=500)
if action not in ['activate', 'rename']:
response = { 'status' : 'failed', 'reason': 'Invalid action "{}"'.format(action) }
return web.Response(text=json.dumps(response), status=500)
preset_id = data.get('id')
if preset_id is None:
response = { 'status' : 'failed', 'reason': 'Required attribute "preset_id" was not provided' }
return web.Response(text=json.dumps(response), status=500)
if not preset_id in self._ledfx.config['presets'].keys():
response = { 'status' : 'failed', 'reason': 'Preset "{}" does not exist'.format(preset_id) }
return web.Response(text=json.dumps(response), status=500)
preset = self._ledfx.config['presets'][preset_id]
if action == "activate":
for device in self._ledfx.devices.values():
# Check device is in preset, make no changes if it isn't
if not device.id in preset['devices'].keys():
_LOGGER.info(('Device with id {} has no data in preset {}').format(device.id, preset_id))
continue
# Set effect of device to that saved in the preset,
# clear active effect of device if no effect in preset
if preset['devices'][device.id]:
# Create the effect and add it to the device
effect = self._ledfx.effects.create(
ledfx = self._ledfx,
type = preset['devices'][device.id]['type'],
config = preset['devices'][device.id]['config'])
device.set_effect(effect)
else:
device.clear_effect()
elif action == "rename":
name = data.get('name')
if name is None:
response = { 'status' : 'failed', 'reason': 'Required attribute "name" was not provided' }
return web.Response(text=json.dumps(response), status=500)
# Update and save config
self._ledfx.config['presets'][preset_id]['name'] = name
save_config(
config = self._ledfx.config,
config_dir = self._ledfx.config_dir)
response = { 'status' : 'success' }
return web.Response(text=json.dumps(response), status=200)
async def post(self, request) -> web.Response:
"""Save current effects of devices as a preset"""
data = await request.json()
preset_name = data.get('name')
if preset_name is None:
response = { 'status' : 'failed', 'reason': 'Required attribute "preset_name" was not provided' }
return web.Response(text=json.dumps(response), status=500)
preset_id = generate_id(preset_name)
preset_config = {}
preset_config['name'] = preset_name
preset_config['devices'] = {}
for device in self._ledfx.devices.values():
effect = {}
if device.active_effect:
effect['type'] = device.active_effect.type
effect['config'] = device.active_effect.config
#effect['name'] = device.active_effect.name
preset_config['devices'][device.id] = effect
# Update the preset if it already exists, else create it
self._ledfx.config['presets'][preset_id] = preset_config
save_config(
config = self._ledfx.config,
config_dir = self._ledfx.config_dir)
response = { 'status' : 'success', 'preset': {'id': preset_id, 'config': preset_config }}
return web.Response(text=json.dumps(response), status=200)
|
[
"m.bowley98@gmail.com"
] |
m.bowley98@gmail.com
|
8750b5b4618d786178b2e0ac22e0953500558b58
|
f5a7e05dc40045076d7b4448c5d0c584048d7ab0
|
/django/django_react/accounts/urls.py
|
a1d1bc082a2c9a45309d946cc92f49f7665aae67
|
[] |
no_license
|
cse442-at-ub/cse442-semester-project-tapp
|
b21cf6a3c85f62a1c277909258919639e82864da
|
e8daff694a468b5d33c5eb9fdec752cbd50d7911
|
refs/heads/master
| 2022-06-18T13:30:08.386552
| 2020-05-04T20:34:00
| 2020-05-04T20:34:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 454
|
py
|
from django.urls import path, include
from .api import RegAPI, LogAPI, UserAPI, InstructAPI
from knox import views as knox_views
urlpatterns = [
path('api/auth', include('knox.urls')),
path('api/auth/register', RegAPI.as_view()),
path('api/auth/login', LogAPI.as_view()),
path('api/auth/user', UserAPI.as_view()),
path('api/instructors', InstructAPI.as_view()),
path('api/auth/logout', knox_views.LogoutView.as_view(), name='knox_logout')
]
|
[
"anrao3@buffalo.edu"
] |
anrao3@buffalo.edu
|
3dd58093282863966917bca26c8d879f7fd478a3
|
0ec0fa7a6dc0659cc26113e3ac734434b2b771f2
|
/4.refactored/log/2016-10-03@14:03/minibatch.py
|
f824a9d890c1a51f5661688dfa4b6fb11a6c7377
|
[] |
no_license
|
goldleaf3i/3dlayout
|
b8c1ab3a21da9129829e70ae8a95eddccbf77e2f
|
1afd3a94a6cb972d5d92fe373960bd84f258ccfe
|
refs/heads/master
| 2021-01-23T07:37:54.396115
| 2017-03-28T10:41:06
| 2017-03-28T10:41:06
| 86,431,368
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 26,766
|
py
|
from __future__ import division
import datetime as dt
import numpy as np
import util.layout as lay
import util.GrafoTopologico as gtop
import util.transitional_kernels as tk
import util.MappaSemantica as sema
from object import Segmento as sg
from util import pickle_util as pk
from util import accuracy as ac
from util import layout as lay
from util import disegna as dsg
from object import Superficie as fc
import parameters as par
import pickle
import os
import glob
import shutil
import time
import cv2
import warnings
warnings.warn("Settare i parametri del lateralLine e cvThresh")
def start_main(parametri_obj, path_obj):
#----------------------------1.0_LAYOUT DELLE STANZE----------------------------------
#------inizio layout
#leggo l'immagine originale in scala di grigio e la sistemo con il thresholding
img_rgb = cv2.imread(path_obj.metricMap)
img_ini = img_rgb.copy() #copio l'immagine
# 127 per alcuni dati, 255 per altri
ret,thresh1 = cv2.threshold(img_rgb,parametri_obj.cv2thresh,255,cv2.THRESH_BINARY)#prova
#------------------1.1_CANNY E HOUGH PER TROVARE MURI---------------------------------
walls , canny = lay.start_canny_ed_hough(thresh1,parametri_obj)
#walls , canny = lay.start_canny_ed_hough(img_rgb,parametri_obj)
if par.DISEGNA:
#disegna mappa iniziale, canny ed hough
dsg.disegna_map(img_rgb,filepath = path_obj.filepath )
dsg.disegna_canny(canny,filepath = path_obj.filepath)
dsg.disegna_hough(img_rgb,walls,filepath = path_obj.filepath)
lines = lay.flip_lines(walls, img_rgb.shape[0]-1)
walls = lay.crea_muri(lines)
if par.DISEGNA:
#disegno linee
dsg.disegna_segmenti(walls)#solo un disegno poi lo elimino
#------------1.2_SETTO XMIN YMIN XMAX YMAX DI walls-----------------------------------
#tra tutti i punti dei muri trova l'ascissa e l'ordinata minima e massima.
estremi = sg.trova_estremi(walls)
xmin = estremi[0]
xmax = estremi[1]
ymin = estremi[2]
ymax = estremi[3]
offset = 20
xmin -= offset
xmax += offset
ymin -= offset
ymax += offset
#-------------------------------------------------------------------------------------
#---------------1.3_CONTORNO ESTERNO--------------------------------------------------
(contours, vertici) = lay.contorno_esterno(img_rgb, parametri_obj, path_obj)
if par.DISEGNA:
dsg.disegna_contorno(vertici,xmin,ymin,xmax,ymax,filepath = path_obj.filepath)
#-------------------------------------------------------------------------------------
#---------------1.4_MEAN SHIFT PER TROVARE CLUSTER ANGOLARI---------------------------
(indici, walls, cluster_angolari) = lay.cluster_ang(parametri_obj.h, parametri_obj.minOffset, walls, diagonali= parametri_obj.diagonali)
if par.DISEGNA:
#dsg.disegna_cluster_angolari(walls, cluster_angolari, filepath = path_obj.filepath,savename = '5b_cluster_angolari')
dsg.disegna_cluster_angolari_corretto(walls, cluster_angolari, filepath = path_obj.filepath,savename = '5b_cluster_angolari')
#-------------------------------------------------------------------------------------
#---------------1.5_CLUSTER SPAZIALI--------------------------------------------------
#questo metodo e' sbagliato, fai quella cosa con il hierarchical clustering per classificarli meglio.e trovare in sostanza un muro
#cluster_spaziali = lay.cluster_spaz(parametri_obj.minLateralSeparation, walls)
#inserisci qui il nuovo Cluster_spaz
nuovo_clustering = 2
#in walls ci sono tutti i segmenti
if nuovo_clustering == 1:
cluster_spaziali = lay.cluster_spaz(parametri_obj.minLateralSeparation, walls)#metodo di matteo
elif nuovo_clustering ==2:
cluster_mura = lay.get_cluster_mura(walls, cluster_angolari, parametri_obj)#metodo di valerio
cluster_mura_senza_outliers = []
for c in cluster_mura:
if c!=-1:
cluster_mura_senza_outliers.append(c)
#ottengo gli outliers
outliers = []
for s in walls:
if s.cluster_muro == -1:
outliers.append(s)
#ora che ho un insieme di cluster relativi ai muri voglio andare ad unire quelli molto vicini
#ottengo i rappresentanti dei cluster (tutti tranne gli outliers)
#segmenti_rappresentanti = lay.get_rappresentanti(walls, cluster_mura)
segmenti_rappresentanti = lay.get_rappresentanti(walls, cluster_mura_senza_outliers)
segmenti_rappresentanti = segmenti_rappresentanti + outliers #i segmenti rappresentati di un cluster li unisco agli altri e faccio lo stesso con gli outliers
if par.DISEGNA:
dsg.disegna_segmenti(segmenti_rappresentanti, savename = "5c_segmenti_rappresentanti")
#classifico i rappresentanti
#qui va settata la soglia con cui voglio separare i cluster
segmenti_rappresentanti = sg.spatialClustering(parametri_obj.sogliaLateraleClusterMura, segmenti_rappresentanti)
#in questo momento ho un insieme di segmenti rappresentanti che hanno il cluster_spaziale settato correttamente, ora setto anche gli altri che hanno lo stesso cluster muro
cluster_spaziali = lay.new_cluster_spaziale(walls, segmenti_rappresentanti)
'''
#creo lista di cluster spaziali
cluster_spaziali = []
for muro in walls:
if muro.cluster_spaziale !=None:
cluster_spaziali.append(muro.cluster_spaziale)
for spaz in list(set(cluster_spaziali)):
#raccolgo i cluster muri che hanno stesso cluster spaziale
cluster_mura_uguali = []
for segmento in segmenti_rappresentanti:
if segmento.cluster_spaziale == spaz:
cluster_mura_uguali.append(segmento.cluster_muro)
cluster_mura_uguali = list(set(cluster_mura_uguali))
for segmento in walls:
if segmento.cluster_muro in cluster_mura_uguali:
segmento.set_cluster_spaziale(spaz)
dsg.disegna_cluster_mura(cluster_mura, walls,filepath = path_obj.filepath, savename= '5d_cluster_mura')
'''
if par.DISEGNA:
dsg.disegna_cluster_spaziali(cluster_spaziali, walls,filepath = path_obj.filepath)
dsg.disegna_cluster_mura(cluster_mura, walls,filepath = path_obj.filepath, savename= '5d_cluster_mura')
#-------------------------------------------------------------------------------------
#-------------------1.6_CREO EXTENDED_LINES-------------------------------------------
(extended_lines, extended_segments) = lay.extend_line(cluster_spaziali, walls, xmin, xmax, ymin, ymax,filepath = path_obj.filepath)
if par.DISEGNA:
dsg.disegna_extended_segments(extended_segments, walls,filepath = path_obj.filepath)
#-------------------------------------------------------------------------------------
#-------------1.7_CREO GLI EDGES TRAMITE INTERSEZIONI TRA EXTENDED_LINES--------------
edges = sg.crea_edges(extended_segments)
#-------------------------------------------------------------------------------------
#----------------------1.8_SETTO PESI DEGLI EDGES-------------------------------------
edges = sg.setPeso(edges, walls)
#-------------------------------------------------------------------------------------
#----------------1.9_CREO LE CELLE DAGLI EDGES----------------------------------------
celle = fc.crea_celle(edges)
#-------------------------------------------------------------------------------------
#----------------CLASSIFICO CELLE-----------------------------------------------------
global centroid
#verificare funzioni
if par.metodo_classificazione_celle ==1:
print "1.metodo di classificazione ", par.metodo_classificazione_celle
(celle, celle_out, celle_poligoni, indici, celle_parziali, contorno, centroid, punti) = lay.classificazione_superfici(vertici, celle)
elif par.metodo_classificazione_celle==2:
print "2.metodo di classificazione ", par.metodo_classificazione_celle
#sto classificando le celle con il metodo delle percentuali
(celle_out, celle, centroid, punti,celle_poligoni, indici, celle_parziali) = lay.classifica_celle_con_percentuale(vertici, celle, img_ini)
#-------------------------------------------------------------------------------------
#--------------------------POLIGONI CELLE---------------------------------------------
(celle_poligoni, out_poligoni, parz_poligoni, centroid) = lay.crea_poligoni_da_celle(celle, celle_out, celle_parziali)
#ora vorrei togliere le celle che non hanno senso, come ad esempio corridoi strettissimi, il problema e' che lo vorrei integrare con la stanza piu' vicina ma per ora le elimino soltanto
#RICORDA: stai pensando solo a celle_poligoni
#TODO: questo metodo non funziona benissimo(sbagli ad eliminare le celle)
#celle_poligoni, celle = lay.elimina_celle_insensate(celle_poligoni,celle, parametri_obj)#elimino tutte le celle che hanno una forma strana e che non ha senso siano stanze
#-------------------------------------------------------------------------------------
#------------------CREO LE MATRICI L, D, D^-1, ED M = D^-1 * L------------------------
(matrice_l, matrice_d, matrice_d_inv, X) = lay.crea_matrici(celle)
#-------------------------------------------------------------------------------------
#----------------DBSCAN PER TROVARE CELLE NELLA STESSA STANZA-------------------------
clustersCelle = lay.DB_scan(parametri_obj.eps, parametri_obj.minPts, X, celle_poligoni)
#questo va disegnato per forza perche' restituisce la lista dei colori
if par.DISEGNA:
colori, fig, ax = dsg.disegna_dbscan(clustersCelle, celle, celle_poligoni, xmin, ymin, xmax, ymax, edges, contours,filepath = path_obj.filepath)
else:
colori = dsg.get_colors(clustersCelle)
#-------------------------------------------------------------------------------------
#------------------POLIGONI STANZE(spazio)--------------------------------------------
stanze, spazi = lay.crea_spazio(clustersCelle, celle, celle_poligoni, colori, xmin, ymin, xmax, ymax, filepath = path_obj.filepath)
if par.DISEGNA:
dsg.disegna_stanze(stanze, colori, xmin, ymin, xmax, ymax,filepath = path_obj.filepath)
#-------------------------------------------------------------------------------------
#------fine layout--------------------------------------------------------------------
#adesso questo mi conviene calcolarlo alla fine di tutto, dato che ho spostato il calcolo delle stanze reali dopo aver calcolato il grafo topologico
'''
#funzione per eliminare stanze che sono dei buchi interni
print 'PLEASE CAMBIARE QUESTA COSA :|'
#stanze = ac.elimina_stanze(stanze,estremi)
#funzione per calcolare accuracy fc e bc
print "Inizio a calcolare metriche"
results, stanze_gt = ac.calcola_accuracy(path_obj.nome_gt,estremi,stanze, path_obj.metricMap,path_obj.filepath, parametri_obj.flip_dataset)
if par.DISEGNA:
dsg.disegna_grafici_per_accuracy(stanze, stanze_gt, filepath = path_obj.filepath)
print "Fine calcolare metriche"
'''
#-------------------------------------------------------------------------------------
#------------------------------GRAFO TOPOLOGICO---------------------------------------
#costruisco il grafo
(stanze_collegate, doorsVertices, distanceMap, points, b3) = gtop.get_grafo(path_obj.metricMap, stanze, estremi, colori, parametri_obj)
(G, pos) = gtop.crea_grafo(stanze, stanze_collegate, estremi, colori)
#ottengo tutte quelle stanze che non sono collegate direttamente ad un'altra, con molta probabilita' quelle non sono stanze reali
stanze_non_collegate = gtop.get_stanze_non_collegate(stanze, stanze_collegate)
#ottengo le stanze reali, senza tutte quelle non collegate
stanze_reali, colori_reali = lay.get_stanze_reali(stanze, stanze_non_collegate, colori)
if par.DISEGNA:
#sto disegnando usando la lista di colori originale, se voglio la lista della stessa lunghezza sostituire colori con colori_reali
dsg.disegna_stanze(stanze_reali, colori_reali, xmin, ymin, xmax, ymax,filepath = path_obj.filepath, savename = '8_Stanze_reali')
#------------------------------------------------------------------------------------
if par.DISEGNA:
dsg.disegna_distance_transform(distanceMap, filepath = path_obj.filepath)
dsg.disegna_medial_axis(points, b3, filepath = path_obj.filepath)
dsg.plot_nodi_e_stanze(colori,estremi, G, pos, stanze, stanze_collegate, filepath = path_obj.filepath)
#------------------------CREO PICKLE--------------------------------------------------
#creo i file pickle per il layout delle stanze
print("creo pickle layout")
pk.crea_pickle((stanze, clustersCelle, estremi, colori, spazi, stanze_reali, colori_reali), path_obj.filepath_pickle_layout)
print("ho finito di creare i pickle del layout")
#creo i file pickle per il grafo topologico
print("creo pickle grafoTopologico")
pk.crea_pickle((stanze, clustersCelle, estremi, colori), path_obj.filepath_pickle_grafoTopologico)
print("ho finito di creare i pickle del grafo topologico")
#-----------------------CALCOLO ACCURACY----------------------------------------------
#L'accuracy e' da controllare, secondo me non e' corretta.
#funzione per eliminare stanze che sono dei buchi interni
print 'PLEASE CAMBIARE QUESTA COSA :|'
#stanze = ac.elimina_stanze(stanze,estremi)
#funzione per calcolare accuracy fc e bc
print "Inizio a calcolare metriche"
results, stanze_gt = ac.calcola_accuracy(path_obj.nome_gt,estremi,stanze_reali, path_obj.metricMap,path_obj.filepath, parametri_obj.flip_dataset)
if par.DISEGNA:
dsg.disegna_grafici_per_accuracy(stanze, stanze_gt, filepath = path_obj.filepath)
print "Fine calcolare metriche"
#in questa fase il grafo non e' ancora stato classificato con le label da dare ai vai nodi.
#-------------------------------------------------------------------------------------
#creo il file xml dei parametri
par.to_XML(parametri_obj, path_obj)
#-------------------------prova transitional kernels----------------------------------
#splitto una stanza e restituisto la nuova lista delle stanze
#stanze, colori = tk.split_stanza_verticale(2, stanze, colori,estremi)
#stanze, colori = tk.split_stanza_orizzontale(3, stanze, colori,estremi)
#stanze, colori = tk.slit_all_cell_in_room(spazi, 1, colori, estremi) #questo metodo e' stato fatto usando il concetto di Spazio, dunque fai attenzione perche' non restituisce la cosa giusta.
#stanze, colori = tk.split_stanza_reverce(2, len(stanze)-1, stanze, colori, estremi) #questo unisce 2 stanze precedentemente splittate, non faccio per ora nessun controllo sul fatto che queste 2 stanze abbiano almeno un muro in comune, se sono lontani succede un casino
#-----------------------------------------------------------------------------------
#-------------------------MAPPA SEMANTICA-------------------------------------------
'''
#in questa fase classifico i nodi del grafo e conseguentemente anche quelli della mappa.
#gli input di questa fase non mi sono ancora molto chiari
#per ora non la faccio poi se mi serve la copio/rifaccio, penso proprio sia sbagliata.
#stanze ground truth
(stanze_gt, nomi_stanze_gt, RC, RCE, FCES, spaces, collegate_gt) = sema.get_stanze_gt(nome_gt, estremi)
#corrispondenze tra gt e segmentate (backward e forward)
(indici_corrispondenti_bwd, indici_gt_corrispondenti_fwd) = sema.get_corrispondenze(stanze,stanze_gt)
#creo xml delle stanze segmentate
id_stanze = sema.crea_xml(nomeXML,stanze,doorsVertices,collegate,indici_gt_corrispondenti_fwd,RCE,nomi_stanze_gt)
#parso xml creato, va dalla cartella input alla cartella output/xmls, con feature aggiunte
xml_output = sema.parsa(dataset_name, nomeXML)
#classifico
predizioniRCY = sema.classif(dataset_name,xml_output,'RC','Y',30)
predizioniRCN = sema.classif(dataset_name,xml_output,'RC','N',30)
predizioniFCESY = sema.classif(dataset_name,xml_output,'RCES','Y',30)
predizioniFCESN = sema.classif(dataset_name,xml_output,'RCES','N',30)
#creo mappa semantica segmentata e ground truth e le plotto assieme
sema.creaMappaSemantica(predizioniRCY, G, pos, stanze, id_stanze, estremi, colori, clustersCelle, collegate)
sema.creaMappaSemanticaGt(stanze_gt, collegate_gt, RC, estremi, colori)
plt.show()
sema.creaMappaSemantica(predizioniRCN, G, pos, stanze, id_stanze, estremi, colori, clustersCelle, collegate)
sema.creaMappaSemanticaGt(stanze_gt, collegate_gt, RC, estremi, colori)
plt.show()
sema.creaMappaSemantica(predizioniFCESY, G, pos, stanze, id_stanze, estremi, colori, clustersCelle, collegate)
sema.creaMappaSemanticaGt(stanze_gt, collegate_gt, FCES, estremi, colori)
plt.show()
sema.creaMappaSemantica(predizioniFCESN, G, pos, stanze, id_stanze, estremi, colori, clustersCelle, collegate)
sema.creaMappaSemanticaGt(stanze_gt, collegate_gt, FCES, estremi, colori)
plt.show()
'''
#-----------------------------------------------------------------------------------
print "to be continued..."
return results
#TODO
def load_main(filepath_pickle_layout, filepath_pickle_grafoTopologico, parXML):
#carico layout
pkl_file = open(filepath_pickle_layout, 'rb')
data1 = pickle.load(pkl_file)
stanze = data1[0]
clustersCelle = data1[1]
estremi = data1[2]
colori = data1[3]
spazi = data1[4]
stanze_reali = data1[5]
colori_reali= data1[6]
#print "controllo che non ci sia nulla di vuoto", len(stanze), len(clustersCelle), len(estremi), len(spazi), len(colori)
#carico il grafo topologico
pkl_file2 = open( filepath_pickle_grafoTopologico, 'rb')
data2 = pickle.load(pkl_file2)
G = data2[0]
pos = data2[1]
stanze_collegate = data2[2]
doorsVertices = data2[3]
#creo dei nuovi oggetti parametri caricando i dati dal file xml
new_parameter_obj, new_path_obj = par.load_from_XML(parXML)
#continuare il metodo da qui
def makeFolders(location,datasetList):
for dataset in datasetList:
if not os.path.exists(location+dataset):
os.mkdir(location+dataset)
os.mkdir(location+dataset+"_pickle")
def main():
start = time.time()
print ''' PROBLEMI NOTI \n
1] LE LINEE OBLIQUE NON VANNO;\n
2] NON CLASSIFICA LE CELLE ESTERNE CHE STANNO DENTRO IL CONVEX HULL, CHE QUINDI VENGONO CONSIDERATE COME STANZE;\n
OK 3] ACCURACY NON FUNZIONA;\n
4] QUANDO VENGONO RAGGRUPPATI TRA DI LORO I CLUSTER COLLINEARI, QUESTO VIENE FATTO A CASCATA. QUESTO FINISCE PER ALLINEARE ASSIEME MURA MOLTO DISTANTI;\n
5] IL SISTEMA E' MOLTO SENSIBILE ALLA SCALA. BISOGNEREBBE INGRANDIRE TUTTE LE IMMAGINI FACENDO UN RESCALING E RISOLVERE QUESTO PROBLEMA. \n
[4-5] FANNO SI CHE I CORRIDOI PICCOLI VENGANO CONSIDERATI COME UNA RETTA UNICA\n
6] BISOGNEREBBE FILTRARE LE SUPERFICI TROPPO PICCOLE CHE VENGONO CREATE TRA DEI CLUSTER;\n
7] LE IMMAGINI DI STAGE SONO TROPPO PICCOLE; VANNO RIPRESE PIU GRANDI \n
>> LANCIARE IN BATCH SU ALIENWARE\n
>> RENDERE CODICE PARALLELO\n
8] MANCANO 30 DATASET DA FARE CON STAGE\n
9] OGNI TANTO NON FUNZIONA IL GET CONTORNO PERCHE SBORDA ALL'INTERNO\n
>> PROVARE CON SCAN BORDO (SU IMMAGINE COPIA)\n
>> PROVARE A SETTARE IL PARAMETRO O A MODIFICARE IL METODO DI SCAN BORDO\n
>> CERCARE SOLUZIONI ALTERNATIVE (ES IDENTIFICARE LE CELLE ESTERNE)\n
OK 10] VANNO TARATI MEGLIO I PARAMETRI PER IL CLUSTERING\n
>> I PARAMETRI DE CLUSTERING SONO OK; OGNI TANTO FA OVERSEGMENTAZIONE.\n
>>> EVENTUALMENTE SE SI VEDE CHE OVERSEGMENTAZIONE SONO UN PROBLEMA CAMBIARE CLUSTERING O MERGE CELLE\n
11] LE LINEE DELLA CANNY E HOUGH TALVOLTA SONO TROPPO GROSSE \n
>> IN REALTA SEMBRA ESSERE OK; PROVARE CON MAPPE PIU GRANDI E VEDERE SE CAMBIA.
12] BISOGNEREBBE AUMENTARE LA SEGMENTAZIONE CON UN VORONOI
OK 13] STAMPA L'IMMAGINE DELLA MAPPA AD UNA SCALA DIVERSA RISPETTO A QUELLA VERA.\n
OK 14] RISTAMPARE SCHOOL_GT IN GRANDE CHE PER ORA E' STAMPATO IN PICCOLO (800x600)\n
OK VEDI 10] 15] NOI NON CALCOLIAMO LA DIFFUSION DEL METODO DI MURA; PER ALCUNI VERSI E' UN BENE PER ALTRI NO\n
OK VEDI 4] 16] NON FACCIAMO IL CLUSTERING DEI SEGMENTI IN MANIERA CORRETTA; DOVREMMO SOLO FARE MEANSHIFT\n
17] LA FASE DEI SEGMENTI VA COMPLETAMENTE RIFATTA; MEANSHIFT NON FUNZIONA COSI'; I SEGMENTI HANNO UN SACCO DI "==" CHE VANNO TOLTI; SPATIAL CLUSTRING VA CAMBIATO;\n
18] OGNI TANTO IL GRAFO TOPOLOGICO CONNETTE STANZE CHE SONO ADIACENTI MA NON CONNESSE. VA RIVISTA LA PARTE DI MEDIALAXIS;\n
19] PROVARE A USARE L'IMMAGINE CON IL CONTORNO RICALCATO SOLO PER FARE GETCONTOUR E NON NEGLI ALTRI STEP.\n
20] TOGLIERE THRESHOLD + CANNY -> USARE SOLO CANNY.\n
21] TOGLIERE LE CELLE INTERNE CHE SONO BUCHI.\n
>> USARE VORONOI PER CONTROLLARE LA CONNETTIVITA.\n
>> USARE THRESHOLD SU SFONDO \n
>> COMBINARE I DUE METODI\n
22] RIMUOVERE LE STANZE ERRATE:\n
>> STANZE "ESTERNE" INTERNE VANNO TOLTE IN BASE ALLE CELLE ESTERNE\n
>> RIMUOVERE STANZE CON FORME STUPIDE (ES PARETI LUNGHE STRETTE), BISOGNA DECIDERE SE ELIMINARLE O INGLOBARLE IN UN ALTRA STANZA\n
23] RISOLVERE TUTTI I WARNING.\n
da chiedere: guardare il metodo clustering_dbscan_celle(...) in layout la riga
af = DBSCAN(eps, min_samples, metric="precomputed").fit(X) non dovrebbe essere cosi?
af = DBSCAN(eps= eps, min_samples = min_samples, metric="precomputed").fit(X)
'''
print '''
FUNZIONAMENTO:\n
SELEZIONARE SU QUALI DATASETs FARE ESPERIMENTI (variabile DATASETs -riga165- da COMMENTARE / DECOMMENTARE)\n
SPOSTARE LE CARTELLE CON I NOMI DEI DATASET CREATI DALL'ESPERIMENTO PRECEDENTE IN UNA SOTTO-CARTELLA (SE TROVA UNA CARTELLA CON LO STESSO NOME NON CARICA LA MAPPA)\n
SETTARE I PARAMERI \n
ESEGUIRE\n
OGNI TANTO IL METODO CRASHA IN FASE DI VALUTAZIONE DI ACCURATEZZA. NEL CASO, RILANCIARLO\n
SPOSTARE TUTTI I RISULTATI IN UNA CARTELLA IN RESULTS CON UN NOME SIGNIFICATIVO DEL TEST FATTO\n
SALVARE IL MAIN DENTRO QUELLA CARTELLA\n
'''
#-------------------PARAMETRI-------------------------------------------------------
#carico parametri di default
parametri_obj = par.Parameter_obj()
#carico path di default
path_obj = par.Path_obj()
#-----------------------------------------------------------------------------------
makeFolders(path_obj.OUTFOLDERS,path_obj.DATASETs)
skip_performed = True
#-----------------------------------------------------------------------------------
#creo la cartella di log con il time stamp
our_time = str(dt.datetime.now())[:-10].replace(' ','@') #get current time
SAVE_FOLDER = os.path.join('./log', our_time)
if not os.path.exists(SAVE_FOLDER):
os.mkdir(SAVE_FOLDER)
SAVE_LOGFILE = SAVE_FOLDER+'/log.txt'
#------------------------------------------------------------------------------------
with open(SAVE_LOGFILE,'w+') as LOGFILE:
print "AZIONE", par.AZIONE
print >>LOGFILE, "AZIONE", par.AZIONE
shutil.copy('./minibatch.py',SAVE_FOLDER+'/minibatch.py') #copio il file del main
shutil.copy('./parameters.py',SAVE_FOLDER+'/parameters.py') #copio il file dei parametri
if par.AZIONE == "batch":
if par.LOADMAIN==False:
print >>LOGFILE, "SONO IN MODALITA' START MAIN"
else:
print >>LOGFILE, "SONO IN MODALITA' LOAD MAIN"
print >>LOGFILE, "-----------------------------------------------------------"
for DATASET in path_obj.DATASETs :
print >>LOGFILE, "PARSO IL DATASET", DATASET
global_results = []
print 'INIZIO DATASET ' , DATASET
for metricMap in glob.glob(path_obj.INFOLDERS+'IMGs/'+DATASET+'/*.png') :
print >>LOGFILE, "---parso la mappa: ", metricMap
print 'INIZIO A PARSARE ', metricMap
path_obj.metricMap =metricMap
map_name = metricMap.split('/')[-1][:-4]
#print map_name
SAVE_FOLDER = path_obj.OUTFOLDERS+DATASET+'/'+map_name
SAVE_PICKLE = path_obj.OUTFOLDERS+DATASET+'_pickle/'+map_name.split('.')[0]
if par.LOADMAIN==False:
if not os.path.exists(SAVE_FOLDER):
os.mkdir(SAVE_FOLDER)
os.mkdir(SAVE_PICKLE)
else:
# evito di rifare test che ho gia fatto
if skip_performed :
print 'GIA FATTO; PASSO AL SUCCESSIVO'
continue
#print SAVE_FOLDER
path_obj.filepath = SAVE_FOLDER+'/'
path_obj.filepath_pickle_layout = SAVE_PICKLE+'/'+'Layout.pkl'
path_obj.filepath_pickle_grafoTopologico = SAVE_PICKLE+'/'+'GrafoTopologico.pkl'
add_name = '' if DATASET == 'SCHOOL' else ''
path_obj.nome_gt = path_obj.INFOLDERS+'XMLs/'+DATASET+'/'+map_name+add_name+'.xml'
#--------------------new parametri-----------------------------------
#setto i parametri differenti(ogni dataset ha parametri differenti)
parametri_obj.minLateralSeparation = 7 if 'SCHOOL' in DATASET else 15
parametri_obj.cv2thresh = 150 if DATASET == 'SCHOOL' else 200
parametri_obj.flip_dataset = True if DATASET == 'SURVEY' else False
#--------------------------------------------------------------------
#-------------------ESECUZIONE---------------------------------------
if par.LOADMAIN==False:
print "start main"
results = start_main(parametri_obj, path_obj)
global_results.append(results);
#calcolo accuracy finale dell'intero dataset
if metricMap == glob.glob(path_obj.INFOLDERS+'IMGs/'+DATASET+'/*.png')[-1]:
accuracy_bc_medio = []
accuracy_bc_in_pixels = []
accuracy_fc_medio = []
accuracy_fc_in_pixels=[]
for i in global_results :
accuracy_bc_medio.append(i[0])
accuracy_fc_medio.append(i[2])
accuracy_bc_in_pixels.append(i[4])
accuracy_fc_in_pixels.append(i[5])
filepath= path_obj.OUTFOLDERS+DATASET+'/'
print filepath
f = open(filepath+'accuracy.txt','a')
#f.write(filepath)
f.write('accuracy_bc = '+str(np.mean(accuracy_bc_medio))+'\n')
f.write('accuracy_bc_pixels = '+str(np.mean(accuracy_bc_in_pixels))+'\n')
f.write('accuracy_fc = '+str(np.mean(accuracy_fc_medio))+'\n')
f.write('accuracy_fc_pixels = '+str(np.mean(accuracy_fc_in_pixels))+'\n\n')
f.close()
LOGFILE.flush()
elif par.LOADMAIN==True:
print "load main"
print >>LOGFILE, "---parso la mappa: ", path_obj.metricMap
load_main(path_obj.filepath_pickle_layout, path_obj.filepath_pickle_grafoTopologico, path_obj.filepath+"parametri.xml")
LOGFILE.flush()
else :
continue
break
LOGFILE.flush()
elif par.AZIONE =='mappa_singola':
#-------------------ESECUZIONE singola mappa----------------------------------
if par.LOADMAIN==False:
print "start main"
print >>LOGFILE, "SONO IN MODALITA' START MAIN"
print >>LOGFILE, "---parso la mappa: ", path_obj.metricMap
start_main(parametri_obj, path_obj)
LOGFILE.flush()
else:
print "load main"
print >>LOGFILE, "SONO IN MODALITA' LOAD MAIN"
print >>LOGFILE, "---parso la mappa: ", path_obj.metricMap
load_main(path_obj.filepath_pickle_layout, path_obj.filepath_pickle_grafoTopologico, path_obj.filepath+"parametri.xml")
LOGFILE.flush()
#-------------------TEMPO IMPIEGATO-------------------------------------------------
fine = time.time()
elapsed = fine-start
print "la computazione ha impiegato %f secondi" % elapsed
if __name__ == '__main__':
main()
|
[
"matteo.luperto@polimi.it"
] |
matteo.luperto@polimi.it
|
336c5523fc52ed7484adf81479a520b8cc202c4d
|
86864403fed2e21c48aa928568ed44f3479771b3
|
/server/database/__init__.py
|
6cdd23796807782dbd869c502f893e77a62d0525
|
[] |
no_license
|
Fourier-Transformation/Bookstore-sales-management-system
|
4c30f343c5f2cd45cc33ed1d89397bc63bc468ac
|
bf7d99c13b3c4b350da08daac0b88f2cb0312724
|
refs/heads/master
| 2023-01-08T15:04:59.442770
| 2019-10-27T11:05:10
| 2019-10-27T11:05:10
| 206,248,848
| 9
| 3
| null | 2023-01-04T09:52:57
| 2019-09-04T06:34:57
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 49
|
py
|
"""
package __init__ file
"""
__all__ = ['bsdb']
|
[
"oowsxq@users.noreply.github.com"
] |
oowsxq@users.noreply.github.com
|
ef978f568bf798274942cd80a49d72ff9f6d761f
|
71e29718412db28a59a5d32bcfdd7e00c5ea66f4
|
/099_Empacotamento.py
|
973fed36e1f2f13f835f3769fc006c047504496a
|
[] |
no_license
|
wesley-998/Python-Exercicies
|
71e518ff45839473757bd73eb28ff72d877ed9ab
|
bc42feb1ed6e09ad659ffd35ee9d1198611f7b84
|
refs/heads/main
| 2023-05-11T02:04:29.536874
| 2023-05-02T19:27:36
| 2023-05-02T19:27:36
| 360,686,029
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 524
|
py
|
'''
Programa com uma função chamado maior que identifica
o maior valor entre vários valores/parâmetros.
'''
def maior(*num):
print('Analisando os valores passados...')
print(f'{num} Foram informados {len(num)} valores ao todo.')
print(f'O maior valor informado foi {max(num)}')
def linha():
print('__' * 50)
linha()
maior(4,6,9,8,7,4,6,5,2,9,8,7,6)
linha()
maior(6,9,8,7,4,5,1,3,6,9)
linha()
maior(5,6,9,8,7)
linha()
maior(4,1,2,)
linha()
maior(6,5)
linha()
maior(0)
linha()
|
[
"noreply@github.com"
] |
wesley-998.noreply@github.com
|
c368d7695148f2b8661d8b1af6beb4ed17f894d2
|
2f8c9e95514e4ff43ee0ecd73eec60633db4427e
|
/src/track.py
|
31aa833b2f010cb51ce5bce33757710bbd015705
|
[
"MIT"
] |
permissive
|
zhangmo123/MCMOT
|
bbda421bf98abd09ab95c5f571fe28b407ff46e6
|
c4979c676264bfa1939cd39b650dd7a4864df61d
|
refs/heads/master
| 2022-11-11T11:03:12.664934
| 2020-06-29T09:57:33
| 2020-06-29T09:57:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,214
|
py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from numpy.core._multiarray_umath import ndarray
import _init_paths
import os
import os.path as osp
import shutil
import cv2
import logging
import argparse
import motmetrics as mm
import numpy as np
import torch
from collections import defaultdict
from lib.tracker.multitracker import JDETracker, id2cls
from lib.tracking_utils import visualization as vis
from lib.tracking_utils.log import logger
from lib.tracking_utils.timer import Timer
from lib.tracking_utils.evaluation import Evaluator
import lib.datasets.dataset.jde as datasets
from lib.tracking_utils.utils import mkdir_if_missing
from lib.opts import opts
def write_results(filename, results, data_type):
if data_type == 'mot':
save_format = '{frame},{id},{x1},{y1},{w},{h},1,-1,-1,-1\n'
elif data_type == 'kitti':
save_format = '{frame} {id} pedestrian 0 0 -10 {x1} {y1} {x2} {y2} -10 -10 -10 -1000 -1000 -1000 -10\n'
else:
raise ValueError(data_type)
with open(filename, 'w') as f:
for frame_id, tlwhs, track_ids in results:
if data_type == 'kitti':
frame_id -= 1
for tlwh, track_id in zip(tlwhs, track_ids):
if track_id < 0:
continue
x1, y1, w, h = tlwh
x2, y2 = x1 + w, y1 + h
line = save_format.format(
frame=frame_id, id=track_id, x1=x1, y1=y1, x2=x2, y2=y2, w=w, h=h)
f.write(line)
logger.info('save results to {}'.format(filename))
# def write_detect_imgs()
def write_results_dict(file_name, results_dict, data_type, num_classes=2):
"""
:param file_name:
:param results_dict:
:param data_type:
:param num_classes:
:return:
"""
if data_type == 'mot':
save_format = '{frame},{id},{x1},{y1},{w},{h},1,-1,-1,-1\n'
elif data_type == 'kitti':
save_format = '{frame} {id} pedestrian 0 0 -10 {x1} {y1} {x2} {y2} -10 -10 -10 -1000 -1000 -1000 -10\n'
else:
raise ValueError(data_type)
with open(file_name, 'w') as f:
for cls_id in range(num_classes):
if cls_id == 0: # 背景类不处理
continue
# 处理每一个目标检测类别的结果
results = results_dict[cls_id]
for frame_id, tlwhs, track_ids in results:
if data_type == 'kitti':
frame_id -= 1
for tlwh, track_id in zip(tlwhs, track_ids):
if track_id < 0:
continue
x1, y1, w, h = tlwh
x2, y2 = x1 + w, y1 + h
line = save_format.format(frame=frame_id, id=track_id, x1=x1, y1=y1, x2=x2, y2=y2, w=w, h=h)
f.write(line)
logger.info('save results to {}'.format(file_name))
def format_dets_dict2dets_list(dets_dict, w, h):
"""
:param dets_dict:
:param w: input image width
:param h: input image height
:return:
"""
dets_list = []
for k, v in dets_dict.items():
for det_obj in v:
x1, y1, x2, y2, score, cls_id = det_obj
center_x = (x1 + x2) * 0.5 / float(w)
center_y = (y1 + y2) * 0.5 / float(h)
bbox_w = (x2 - x1) / float(w)
bbox_h = (y2 - y1) / float(h)
dets_list.append([int(cls_id), score, center_x, center_y, bbox_w, bbox_h])
return dets_list
def eval_seq_and_output_dets(opt,
data_loader,
data_type,
result_f_name,
out_dir,
save_dir=None,
show_image=True):
"""
:param opt:
:param data_loader:
:param data_type:
:param result_f_name:
:param out_dir:
:param save_dir:
:param show_image:
:return:
"""
if save_dir:
mkdir_if_missing(save_dir)
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
else:
shutil.rmtree(out_dir)
os.makedirs(out_dir)
tracker = JDETracker(opt, frame_rate=30)
timer = Timer()
results_dict = defaultdict(list)
frame_id = 0 # 帧编号
for path, img, img_0 in data_loader:
if frame_id % 20 == 0:
logger.info('Processing frame {} ({:.2f} fps)'.format(frame_id, 1. / max(1e-5, timer.average_time)))
# --- run tracking
timer.tic()
blob = torch.from_numpy(img).to(opt.device).unsqueeze(0)
# update detection results of this frame(or image)
dets_dict = tracker.update_detection(blob, img_0)
timer.toc()
# plot detection results
if show_image or save_dir is not None:
online_im = vis.plot_detects(image=img_0,
dets_dict=dets_dict,
num_classes=opt.num_classes,
frame_id=frame_id,
fps=1.0 / max(1e-5, timer.average_time))
if frame_id > 0:
# 是否显示中间结果
if show_image:
cv2.imshow('online_im', online_im)
if save_dir is not None:
cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)), online_im)
# ----- 格式化并输出detection结果(txt)到指定目录
# 格式化
dets_list = format_dets_dict2dets_list(dets_dict, w=img_0.shape[1], h=img_0.shape[0])
# 输出到指定目录
out_f_name = os.path.split(path)[-1].replace('.jpg', '.txt')
out_f_path = out_dir + '/' + out_f_name
with open(out_f_path, 'w', encoding='utf-8') as w_h:
w_h.write('class prob x y w h total=' + str(len(dets_list)) + '\n')
for det in dets_list:
w_h.write('%d %f %f %f %f %f\n' % (det[0], det[1], det[2], det[3], det[4], det[5]))
print('{} written'.format(out_f_path))
# 处理完一帧, 更新frame_id
frame_id += 1
# 写入最终结果save results
write_results_dict(result_f_name, results_dict, data_type)
# 返回结果
return frame_id, timer.average_time, timer.calls
def eval_seq(opt,
data_loader,
data_type,
result_f_name,
save_dir=None,
show_image=True,
frame_rate=30,
mode='track'):
"""
:param opt:
:param data_loader:
:param data_type:
:param result_f_name:
:param save_dir:
:param show_image:
:param frame_rate:
:param mode: track or detect
:return:
"""
if save_dir:
mkdir_if_missing(save_dir)
tracker = JDETracker(opt, frame_rate=frame_rate)
timer = Timer()
results_dict = defaultdict(list)
frame_id = 0 # 帧编号
for path, img, img_0 in data_loader:
if frame_id % 20 == 0:
logger.info('Processing frame {} ({:.2f} fps)'.format(
frame_id, 1. / max(1e-5, timer.average_time)))
# --- run tracking
timer.tic()
# blob = torch.from_numpy(img).cuda().unsqueeze(0)
blob = torch.from_numpy(img).to(opt.device).unsqueeze(0)
if mode == 'track': # process tracking
# --- track updates of each frame
online_targets_dict = tracker.update_tracking(blob, img_0)
# 聚合每一帧的结果
online_tlwhs_dict = defaultdict(list)
online_ids_dict = defaultdict(list)
for cls_id in range(opt.num_classes):
# 处理每一个目标检测类
online_targets = online_targets_dict[cls_id]
for track in online_targets:
tlwh = track.tlwh
t_id = track.track_id
# vertical = tlwh[2] / tlwh[3] > 1.6 # box宽高比判断:w/h不能超过1.6?
if tlwh[2] * tlwh[3] > opt.min_box_area: # and not vertical:
online_tlwhs_dict[cls_id].append(tlwh)
online_ids_dict[cls_id].append(t_id)
timer.toc()
# 保存每一帧的结果
for cls_id in range(opt.num_classes):
results_dict[cls_id].append((frame_id + 1, online_tlwhs_dict[cls_id], online_ids_dict[cls_id]))
# 绘制每一帧的结果
if show_image or save_dir is not None:
if frame_id > 0:
online_im: ndarray = vis.plot_tracks(image=img_0,
tlwhs_dict=online_tlwhs_dict,
obj_ids_dict=online_ids_dict,
num_classes=opt.num_classes,
frame_id=frame_id,
fps=1.0 / timer.average_time)
elif mode == 'detect': # process detections
# update detection results of this frame(or image)
dets_dict = tracker.update_detection(blob, img_0)
timer.toc()
# plot detection results
if show_image or save_dir is not None:
online_im = vis.plot_detects(image=img_0,
dets_dict=dets_dict,
num_classes=opt.num_classes,
frame_id=frame_id,
fps=1.0 / max(1e-5, timer.average_time))
else:
print('[Err]: un-recognized mode.')
# # 可视化中间结果
# if frame_id > 0:
# cv2.imshow('Frame {}'.format(str(frame_id)), online_im)
# cv2.waitKey()
if frame_id > 0:
# 是否显示中间结果
if show_image:
cv2.imshow('online_im', online_im)
if save_dir is not None:
cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)), online_im)
# 处理完一帧, 更新frame_id
frame_id += 1
# 写入最终结果save results
write_results_dict(result_f_name, results_dict, data_type)
return frame_id, timer.average_time, timer.calls
def main(opt,
data_root='/data/MOT16/train',
det_root=None, seqs=('MOT16-05',),
exp_name='demo',
save_images=False,
save_videos=False,
show_image=True):
"""
"""
logger.setLevel(logging.INFO)
result_root = os.path.join(data_root, '..', 'results', exp_name)
mkdir_if_missing(result_root)
data_type = 'mot'
# run tracking
accs = []
n_frame = 0
timer_avgs, timer_calls = [], []
for seq in seqs:
output_dir = os.path.join(
data_root, '..', 'outputs', exp_name, seq) if save_images or save_videos else None
logger.info('start seq: {}'.format(seq))
dataloader = datasets.LoadImages(
osp.join(data_root, seq, 'img1'), opt.img_size)
result_filename = os.path.join(result_root, '{}.txt'.format(seq))
meta_info = open(os.path.join(data_root, seq, 'seqinfo.ini')).read()
frame_rate = int(meta_info[meta_info.find(
'frameRate') + 10:meta_info.find('\nseqLength')])
nf, ta, tc = eval_seq(opt, dataloader, data_type, result_filename,
save_dir=output_dir, show_image=show_image, frame_rate=frame_rate)
n_frame += nf
timer_avgs.append(ta)
timer_calls.append(tc)
# eval
logger.info('Evaluate seq: {}'.format(seq))
evaluator = Evaluator(data_root, seq, data_type)
accs.append(evaluator.eval_file(result_filename))
if save_videos:
output_video_path = osp.join(output_dir, '{}.mp4'.format(seq))
cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg -c:v copy {}'.format(
output_dir, output_video_path)
os.system(cmd_str)
timer_avgs = np.asarray(timer_avgs)
timer_calls = np.asarray(timer_calls)
all_time = np.dot(timer_avgs, timer_calls)
avg_time = all_time / np.sum(timer_calls)
logger.info('Time elapsed: {:.2f} seconds, FPS: {:.2f}'.format(
all_time, 1.0 / avg_time))
# get summary
metrics = mm.metrics.motchallenge_metrics
mh = mm.metrics.create()
summary = Evaluator.get_summary(accs, seqs, metrics)
strsummary = mm.io.render_summary(
summary,
formatters=mh.formatters,
namemap=mm.io.motchallenge_metric_names
)
print(strsummary)
Evaluator.save_summary(summary, os.path.join(
result_root, 'summary_{}.xlsx'.format(exp_name)))
if __name__ == '__main__':
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
opt = opts().init()
if not opt.val_mot16:
seqs_str = '''KITTI-13
KITTI-17
ADL-Rundle-6
PETS09-S2L1
TUD-Campus
TUD-Stadtmitte'''
data_root = os.path.join(opt.data_dir, 'MOT15/images/train')
else:
seqs_str = '''MOT16-02
MOT16-04
MOT16-05
MOT16-09
MOT16-10
MOT16-11
MOT16-13'''
data_root = os.path.join(opt.data_dir, 'MOT16/train')
if opt.test_mot16:
seqs_str = '''MOT16-01
MOT16-03
MOT16-06
MOT16-07
MOT16-08
MOT16-12
MOT16-14'''
data_root = os.path.join(opt.data_dir, 'MOT16/test')
if opt.test_mot15:
seqs_str = '''ADL-Rundle-1
ADL-Rundle-3
AVG-TownCentre
ETH-Crossing
ETH-Jelmoli
ETH-Linthescher
KITTI-16
KITTI-19
PETS09-S2L2
TUD-Crossing
Venice-1'''
data_root = os.path.join(opt.data_dir, 'MOT15/images/test')
if opt.test_mot17:
seqs_str = '''MOT17-01-SDP
MOT17-03-SDP
MOT17-06-SDP
MOT17-07-SDP
MOT17-08-SDP
MOT17-12-SDP
MOT17-14-SDP'''
data_root = os.path.join(opt.data_dir, 'MOT17/images/test')
if opt.val_mot17:
seqs_str = '''MOT17-02-SDP
MOT17-04-SDP
MOT17-05-SDP
MOT17-09-SDP
MOT17-10-SDP
MOT17-11-SDP
MOT17-13-SDP'''
data_root = os.path.join(opt.data_dir, 'MOT17/images/train')
if opt.val_mot15:
seqs_str = '''KITTI-13
KITTI-17
ETH-Bahnhof
ETH-Sunnyday
PETS09-S2L1
TUD-Campus
TUD-Stadtmitte
ADL-Rundle-6
ADL-Rundle-8
ETH-Pedcross2
TUD-Stadtmitte'''
data_root = os.path.join(opt.data_dir, 'MOT15/images/train')
if opt.val_mot20:
seqs_str = '''MOT20-01
MOT20-02
MOT20-03
MOT20-05
'''
data_root = os.path.join(opt.data_dir, 'MOT20/images/train')
if opt.test_mot20:
seqs_str = '''MOT20-04
MOT20-06
MOT20-07
MOT20-08
'''
data_root = os.path.join(opt.data_dir, 'MOT20/images/test')
seqs = [seq.strip() for seq in seqs_str.split()]
main(opt,
data_root=data_root,
seqs=seqs,
exp_name='MOT15_val_all_dla34',
show_image=False,
save_images=False,
save_videos=False)
|
[
"765305261@qq.com"
] |
765305261@qq.com
|
799a2860a7f3e0e3427380a6926982af973081dc
|
d0bf5ea5b1dc415e2e1032245c8be947f3cb670c
|
/courses/admin.py
|
4cb23f6a67d889f8f3fc14f5b289b0439f06004c
|
[] |
no_license
|
alfonsorodrigo/django_graphql_courses
|
1d83b18accf5894bfa1a3cc22b82b183952ed20e
|
b669d01b1b1c80bf9ff0fab22401f9669fb75535
|
refs/heads/master
| 2020-04-27T23:56:36.281636
| 2019-03-10T08:59:23
| 2019-03-10T08:59:23
| 174,797,586
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 141
|
py
|
from django.contrib import admin
from .models import *
# Register your models here.
admin.site.register(Student)
admin.site.register(Course)
|
[
"alfonso.feria@truehome.com.mx"
] |
alfonso.feria@truehome.com.mx
|
f795733d74d41d26fb9b2a2f6300e78a11afc3bc
|
20ab220c093b77a57ac14c1fbd06a589e38190de
|
/jogos.py
|
c459a291f64e670f7e03238c1a9ee5791c885ffa
|
[] |
no_license
|
andreyFernandoSoares/python-basic
|
7a59148816d8a193aec055072b784347900cc6d1
|
42bc95162b0ba77ab4358cb23a67fcf5bbeeb9ef
|
refs/heads/main
| 2023-01-03T10:18:39.093613
| 2020-10-26T19:10:30
| 2020-10-26T19:10:30
| 306,403,455
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 409
|
py
|
import forca
import adivinhacao
def escolhe_jogo():
print("********************************")
print("Escolha seu jogo!")
print("********************************")
print("{1} Forca")
print("{2} Advinhacao")
jogo = int(input("Qual jogo? : "))
if (jogo == 1):
forca.jogar()
elif (jogo == 2):
adivinhacao.jogar()
if (__name__ == "__main__"):
escolhe_jogo()
|
[
"andreysykez19@gmail.com"
] |
andreysykez19@gmail.com
|
c551d812975147d6f6366cdefc948c4098173643
|
3c6aeb458a8bec0671c1d8be18331072ac97e05f
|
/ohsn/networkminer/unit-tests/get_retweets-unit-test.py
|
b817b7fc7c8eba0bc0c2163bd31bcc972bb9905f
|
[] |
no_license
|
wtgme/ohsn
|
d7b17ad179a789be2325e0923026a681e343a40c
|
9c165d45eefa4058e7ed2c6bad348703e296362d
|
refs/heads/master
| 2021-08-29T06:01:20.165839
| 2021-08-12T08:51:46
| 2021-08-12T08:51:46
| 44,922,360
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,821
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 11 10:00:01 2015
@author: home
"""
import urllib
import imghdr
import os
import ConfigParser
import datetime
from pymongo import Connection
import time
from twython import Twython, TwythonRateLimitError, TwythonAuthError
config = ConfigParser.ConfigParser()
config.read('scraper.cfg')
# spin up twitter api
APP_KEY = config.get('credentials','app_key')
APP_SECRET = config.get('credentials','app_secret')
OAUTH_TOKEN = config.get('credentials','oath_token')
OAUTH_TOKEN_SECRET = config.get('credentials','oath_token_secret')
#twitter = Twython(app_key='Mfm5oNdGSPMvwhZcB8N4MlsL8',
# app_secret='C0rbmJP0uKbuF6xcT6aR5vFOV9fS4L1965TKOH97pSqj3NJ1mP',
# oauth_token='3034707280-wFGQAF4FGBviaiSguCUdeG36NIQG1uh8qqXTC1G',
# oauth_token_secret='HUWMfHKyPShE6nH5WXlI26izoQjNtV3US3mNpND1F9qrO')
timeline_twitter = Twython(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET)
timeline_twitter.verify_credentials()
# spin up database
#DBNAME = config.get('database', 'name')
#COLLECTION = config.get('database', 'collection')
#COLLECTION = 'manostimeline'
#print(DBNAME)
#print(COLLECTION)
#
#conn = Connection()
#db = conn[DBNAME]
#tweets = db[COLLECTION]
print("twitter connection and database connection configured")
orig_tweet_id = 608648346048303104
manos_tweet_id = 608658245352353792
"""
Returns fully-hydrated tweet objects
for up to 100 tweets per request, as
specified by comma-separated values
passed to the id parameter.
Requests / 15-min window (user auth) 180
Requests / 15-min window (app auth) 60
"""
params = {'id':orig_tweet_id}
response = timeline_twitter.lookup_status(**params)
#print response
for status in response:
print status['user']['screen_name']
print status['retweet_count']
"""
Returns a collection of up to 100 user IDs belonging
to users who have retweeted the tweet specified by the
id parameter.
you can cursor this...
Requests / 15-min window (user auth) 15
Requests / 15-min window (app auth) 60
"""
params = {'count':100, 'id':orig_tweet_id, 'cursor':-1}
response = timeline_twitter.get_retweeters_ids(**params)
#response['previous_cursor']
#response['previous_cursor_str']
print response['next_cursor']
#response['next_cursor_str']
for retweeter_id in response['ids']:
print retweeter_id
"""
Returns a collection of the 100 most
recent retweets of the tweet specified by the id parameter.
Requests / 15-min window (user auth) 15
Requests / 15-min window (app auth) 60
you CANNOT cursor this...
"""
params = {'count':100, 'id':orig_tweet_id}
response= timeline_twitter.get_retweets(**params)
# print response
for item in response:
print item['user']['screen_name']
|
[
"wtgmme@gmail.com"
] |
wtgmme@gmail.com
|
999a09a25e95cf82a2364e3c6fe8341518055d4e
|
8a70b361111cbd6ec9525e16bcec1969909d90cd
|
/dashboard/migrations/0006_auto_20160113_1558.py
|
8f56c640604c984b374b977a2c1743a57fccd953
|
[] |
no_license
|
dovimotors/mysite
|
7a5e71600794f2241cbc5f1230f6e5903d524892
|
aff32fa5d0a018e64201e5d5069956cdc4a055a4
|
refs/heads/master
| 2020-04-06T07:56:13.709331
| 2016-10-07T19:48:06
| 2016-10-07T19:48:06
| 48,855,954
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,007
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-01-13 20:58
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dashboard', '0005_auto_20160113_1557'),
]
operations = [
migrations.AlterField(
model_name='dailymetrics',
name='pa_count30to45',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='dailymetrics',
name='pa_count60to65',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='dailymetrics',
name='sv_old_ro_count',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='dailymetrics',
name='sv_ro_count',
field=models.IntegerField(blank=True, null=True),
),
]
|
[
"jesse@dovimotors.com"
] |
jesse@dovimotors.com
|
1125f3116aa019f2f36f7578e922ae06adf6ee09
|
5cf5ec4cec8c312c8a16efacceb3612e5d5c5fb4
|
/code2022/day12/p2.py
|
cfc724ff50a46bffda63b8ba9dde4376cd47462f
|
[] |
no_license
|
zac112/adventOfCode
|
a9523f4ff7dc4f3af0137807d6e09eb9692531bf
|
6f5955a4c827c3513e0d83877708175976ceb204
|
refs/heads/master
| 2022-12-26T02:17:41.554781
| 2022-12-25T23:45:53
| 2022-12-25T23:45:53
| 160,374,492
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,508
|
py
|
import string
from collections import deque
with open("data.txt") as f:
maze = [list(line) for line in f.read().splitlines()]
correction = {b:a for a,b in enumerate(string.ascii_lowercase)}
path = [[None for _ in range(len(maze[0]))]for _ in range(len(maze))]
possibleStarts = []
for y,row in enumerate(maze):
for x,cell in enumerate(row):
if cell == "S":
start=(y,x)
cell = 'a'
if cell == "E":
end=(y,x)
cell = 'z'
maze[y][x]=string.ascii_lowercase.index(cell)
if cell == 'a':
possibleStarts.append((y,x))
neighbors = [(1,0),(0,1),(-1,0),(0,-1)]
def BFS(maze,path,openCells):
while openCells:
y,x,level,from_y, from_x = openCells.popleft()
try: maze[y][x]
except:continue
if path[y][x] is not None: continue
if y < 0 or x < 0: continue
if maze[y][x] >= maze[from_y][from_x]+2: continue
path[y][x] = (from_y,from_x)
if (y,x) == end:
print(level)
return level
newCells = [(y+a,x+b) for a,b in neighbors]
for cell in newCells:
openCells.append((*cell,level+1,y,x))
lengths = []
for start in possibleStarts:
path = [[None for _ in range(len(maze[0]))]for _ in range(len(maze))]
cells = deque()
cells.append((*start,0,*start))
length = BFS(maze,path, cells)
if length:
lengths.append(length)
print(min(lengths))
|
[
"eolokk@utu.fi"
] |
eolokk@utu.fi
|
9fbc893e6efab9becd109783348b0a57664316ff
|
17e3234ab01fd93233cc453f1495d50424c3bd8f
|
/latte/latte_core/doctype/job_run/test_job_run.py
|
d0b9d9d95a655a44947e8b1fba1d3d08089abc30
|
[
"MIT"
] |
permissive
|
sunnyakaxd/latte
|
8943dbf70ce934e04e51b147a54e6dd02dfe43db
|
de74065122a1f858bd75f8e1a36fca3b23981f4c
|
refs/heads/master
| 2023-06-11T10:25:31.217047
| 2021-07-06T06:40:19
| 2021-07-06T06:40:19
| 383,363,137
| 0
| 0
|
NOASSERTION
| 2021-07-06T06:26:49
| 2021-07-06T06:26:49
| null |
UTF-8
|
Python
| false
| false
| 208
|
py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019, Sachin Mane and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
class TestJobRun(unittest.TestCase):
pass
|
[
"himanshu.mishra@elastic.run"
] |
himanshu.mishra@elastic.run
|
7d0f3bd51417d76ebf705faf39481b1395711c0e
|
4d376856f1513a8674d7ad276cbbcc98cb414726
|
/Problem44/PentagonNumbers.py
|
e5e79ed13a465a6ed805d4d355c6dbef551afeb5
|
[] |
no_license
|
nixondcoutho/Python
|
b31b79c6c60a514d5cf46efcfdf70b16a843a665
|
53ea76b0fc285929bd1da7b6ea44110c3b26b373
|
refs/heads/master
| 2020-03-17T03:42:07.386052
| 2018-05-13T14:03:20
| 2018-05-13T14:03:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 148
|
py
|
def pentagonalNumber(val):
return (n*(3*n-1))/2
pentagonalList = [pentagonalNumber(i) for i in range(1,1000000)]
for val in pentagonalList:
|
[
"adarshjaya12@gmail.com"
] |
adarshjaya12@gmail.com
|
0c3814e5faa036ed899f2c5cc7a7b040722273fa
|
05279ba5f444c38d71cb830bbe83451ab186e37c
|
/apps/network/urls.py
|
aac8018294f174b3d3e74159cc4b161277a90412
|
[
"MIT"
] |
permissive
|
zzmjohn/kontrolvm
|
98e22459603db95012bcf003849962f57cbe662a
|
5c567ae67c3f3e67e3132e025016685dd6b810f5
|
refs/heads/master
| 2021-01-15T20:03:46.132089
| 2013-05-14T22:12:34
| 2013-05-14T22:12:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 160
|
py
|
from django.conf.urls.defaults import *
urlpatterns = patterns('apps.network.views',
url(r'^add/', 'add'),
url(r'^edit/', 'edit'),
url(r'$', 'index'),
)
|
[
"jawr@jarrah"
] |
jawr@jarrah
|
0252e94435305b0b2f6efc349e988ccb1a01fa6b
|
863ba19ae0fcf7e7f4e17f878c5c0dbb82ceed14
|
/template.py
|
b712a81473879bb865aff28ddb198385e38ece7d
|
[] |
no_license
|
ptrk8/usyd-2019-ai-competition
|
75513ae77fbbdc4788337af39a476eb45f6c5fe3
|
386b66d441be22640f952eb485ba2474dc3ae2c0
|
refs/heads/master
| 2022-10-12T09:57:39.453088
| 2022-10-09T02:52:34
| 2022-10-09T02:52:34
| 199,142,115
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,025
|
py
|
import tensorflow as tf
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
session = tf.Session(config=config)
from keras import optimizers
from keras import layers, Sequential
from keras.applications.densenet import preprocess_input, DenseNet121
from keras.utils.np_utils import to_categorical
from keras.preprocessing.image import ImageDataGenerator
import numpy as np
import h5py
from keras.callbacks import Callback, LearningRateScheduler
from sklearn.metrics import cohen_kappa_score
from utils import get_custom_callback, to_multi_label, f1_m, best_lr_decay, f1_loss, multi_label_acc
import os
import sys
IMG_SIZE = 384 # this must correspond with what is in .h5 file
NUM_CLASSES = 5 # 5 output classes
NUM_EPOCHS = 50 # number of epochs
BATCH_SIZE = 3
def main():
# Name of this script
script_name = os.path.basename(__file__)[0:-3]
# Construct folder name using name of this script
output_path_name = '_{}_outputs'.format(script_name)
# Try to create a new folder
try:
# Make the output folder
os.mkdir(output_path_name)
except FileExistsError:
pass
# Model below this line ================================================
learn_rate = LearningRateScheduler(best_lr_decay, verbose=1)
custom_callback = get_custom_callback('multi_label', './{}'.format(output_path_name))
callbacks_list = [custom_callback, learn_rate]
file = h5py.File('./data/data_rgb_384_processed.h5', 'r')
x_train, y_train, x_test, y_test = file['x_train'], file['y_train'], file['x_test'], file['y_test']
y_train = to_categorical(y_train, NUM_CLASSES)
y_test = to_categorical(y_test, NUM_CLASSES)
y_train = to_multi_label(y_train)
y_test = to_multi_label(y_test)
datagen = ImageDataGenerator(
horizontal_flip=True,
vertical_flip=True,
rotation_range=360
)
model = Sequential()
densenet = DenseNet121(
weights='imagenet',
include_top=False,
input_shape=(IMG_SIZE, IMG_SIZE, 3)
)
model.add(densenet)
model.add(layers.GlobalAveragePooling2D())
model.add(layers.Dropout(0.5))
# model.add(layers.Dense(NUM_CLASSES, activation='softmax'))
model.add(layers.Dense(NUM_CLASSES, activation='sigmoid'))
model.summary()
model.compile(loss='binary_crossentropy',
# optimizer=optimizers.Adam(lr=0.0001,decay=1e-6),
optimizer=optimizers.SGD(lr=0.0001, momentum=0.9),
metrics=[multi_label_acc, f1_m])
# fits the model on batches with real-time data augmentation:
history = model.fit_generator(
datagen.flow(x_train, y_train, batch_size=BATCH_SIZE, seed=1),
steps_per_epoch=len(x_train) // BATCH_SIZE,
epochs=NUM_EPOCHS,
validation_data=(x_test, y_test),
callbacks=callbacks_list,
max_queue_size=2
)
if __name__ == '__main__':
main()
|
[
"patrick.hao95@gmail.com"
] |
patrick.hao95@gmail.com
|
22566527308ed7f74a4a7f9ca4e2df3a3c667e79
|
861e0cd03dc7f6a5bd0c470702b31c56340c84e4
|
/bin/poliapp
|
79ecd00e3f8f9e760a3f236c7002a35f4501fbfe
|
[] |
no_license
|
ben105/DecisionTime
|
d3ad540a787180f7893082be676dc7fc7dc5f9bd
|
a5ca52993c3e7606772eed5143574fd8927b5b4e
|
refs/heads/master
| 2021-04-29T11:00:49.030376
| 2017-01-05T06:11:43
| 2017-01-05T06:11:43
| 77,857,912
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,434
|
#!/usr/bin/env python3
from flask import Flask, request
import logging
import json
import sys
import psycopg2
# Modules for poliapp
sys.path.insert(0, '../lib')
import polistore
# import poliauth
# Set up the error and debug logging.
path = '/var/log/poliapp/poliapp.log'
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s: %(message)s',
filemode='a',
filename=path,
level=logging.DEBUG)
# Attempt to open up the settings configurations.
try:
f = open('settings.conf')
settings = json.load(f)
except Exception as exc:
logging.error("Cannot load the settings.\n{}".format(exc))
sys.exit(1)
db_server = settings['db_server']
port = settings['port']
app = Flask(__name__)
conn = None
try:
conn = psycopg2.connect("dbname=poli host=%s user=poli password=poli" % db_server)
conn.autocommit = True
except Exception as exc:
logging.error('exception raised trying to connect to database\n%s', str(exc))
quit()
cur = conn.cursor()
####### Comments ########
@app.route('/api/v1/questions', methods=['GET'])
def questions():
questions = polistore.questions(cur)
return json.dumps(questions)
@app.route('/api/v1/answer', methods=['POST'])
def answer():
body = json.loads(request.data)
question_id = body['question_id']
answer_id = body['answer_id']
importance = body['importance']
return json.dumps({
'political_favorability': -7
})
if __name__ == "__main__":
app.run(host='0.0.0.0', port=port)
|
[
"calgrove@ip-172-30-0-152.us-west-2.compute.internal"
] |
calgrove@ip-172-30-0-152.us-west-2.compute.internal
|
|
4c4308cd1a975cb17c8f6a652f4249908cf3a911
|
05cc3a62b95ff5e6d4b265912ba3609d506c6672
|
/BingYan/1.py
|
e07975d8d23ef470d4089ff4ed17a3daa81245aa
|
[] |
no_license
|
xiong35/my_code2242787668
|
260580645093a868439f12a082798d09c8910bbe
|
d11d90f08bfe51dbe3dda5f2fe2e23b448bb70f3
|
refs/heads/master
| 2020-08-26T15:20:10.980467
| 2020-03-16T02:01:12
| 2020-03-16T02:01:12
| 217,053,429
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 225
|
py
|
def add(num1, num2):
sum = 0
carry = 0
while (num2 != 0):
sum = num1 ^ num2
carry = (num1 & num2) << 1
num1 = sum
num2 = carry
return num1
input = (233,455)
print(add(*input))
|
[
"2242787668@qq.com"
] |
2242787668@qq.com
|
26cdbeaddf498b499551e36dcd9502c0a8fb2ed3
|
81e69cddb319516b4ebd92ed5df905f028412cd7
|
/test.py
|
381fdb36bc9de9499ae2d428e159696a9e592554
|
[
"MIT"
] |
permissive
|
jjpulidos/Programming-Languages-2020
|
3c76782ae642cb66f89062ae59c600c62ecd5224
|
42b9455b5697b07756f8c29e22409161f34bd283
|
refs/heads/master
| 2021-05-22T19:45:09.982604
| 2020-05-27T18:06:01
| 2020-05-27T18:06:01
| 253,063,712
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 113
|
py
|
def prueba():
s = 6
def prueba2():
print(s)
s = 4
prueba2()
print(s)
prueba()
|
[
"jjpulidos@unal.edu.co"
] |
jjpulidos@unal.edu.co
|
74fa69cc54bdacba5a85a799decf55e31a4a8f38
|
73de523bde0c9e8398c63a924b44aadc46d11202
|
/test/test_nfs_alias_extended.py
|
26d6921cf5970e7291ef4a348ae8b8ec80c317b9
|
[
"MIT"
] |
permissive
|
Feyd-Aran/isilon_sdk_python
|
1c2fae306c1a95a99024dd13dc0fc3b120f9c1de
|
24e85a5577d15ac3db06862d07d5a261658c67b7
|
refs/heads/v8.0.0
| 2020-09-23T00:16:36.684270
| 2019-12-02T13:45:12
| 2019-12-02T13:45:12
| 225,351,700
| 0
| 0
|
MIT
| 2019-12-02T10:51:54
| 2019-12-02T10:51:53
| null |
UTF-8
|
Python
| false
| false
| 904
|
py
|
# coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 3
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import isi_sdk_8_0
from isi_sdk_8_0.models.nfs_alias_extended import NfsAliasExtended # noqa: E501
from isi_sdk_8_0.rest import ApiException
class TestNfsAliasExtended(unittest.TestCase):
"""NfsAliasExtended unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testNfsAliasExtended(self):
"""Test NfsAliasExtended"""
# FIXME: construct object with mandatory attributes with example values
# model = isi_sdk_8_0.models.nfs_alias_extended.NfsAliasExtended() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"brandonfkrueger@gmail.com"
] |
brandonfkrueger@gmail.com
|
30777ecad10702e2914965fe062767b4cfc793be
|
d93a37864885a095f19256b011e5984797ca8d6e
|
/app/__init__.py
|
07c2838079ff6cc5bd0e522404fe4d108371d0c9
|
[] |
no_license
|
thesmith4734/gswebsite
|
b5c6373bf228b31d326d4ae2176c200d22b41470
|
52723728c0a40d835fd5719da357bf1ad0447ec1
|
refs/heads/master
| 2023-03-17T01:37:12.947799
| 2021-02-26T03:09:11
| 2021-02-26T03:09:11
| 342,079,722
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 136
|
py
|
from flask import Flask
from flask_bootstrap import Bootstrap
app = Flask(__name__)
from app import routes
bootstrap = Bootstrap(app)
|
[
"thehellhound4734@aim.com"
] |
thehellhound4734@aim.com
|
3c5cf103baa0e5a07248d93bb2d7996712be4dac
|
bc6af1797e2200fe649ef1a6517f032fd3bd2484
|
/main_app/views.py
|
cf42ba979bc461fbd82701ecdae833c124f1a512
|
[] |
no_license
|
zfinnan/catcollector
|
dd38f3a6c3adf044ef7495499711b2b18b0d03a4
|
e617b61191a63b72e35f7605a4e06157ad25e6eb
|
refs/heads/main
| 2023-02-18T16:47:37.755472
| 2021-01-15T17:22:22
| 2021-01-15T17:22:22
| 329,660,339
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,194
|
py
|
from django.shortcuts import render
from .models import Cat, CatToy
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib.auth.models import User
from django.contrib.auth.forms import AuthenticationForm, UserCreationForm
from django.contrib.auth import authenticate, login, logout
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required
# Create your views here.
def index(request):
return render(request, 'index.html')
def about(request):
return render(request, 'about.html')
######## USER ########
@login_required
def profile(request, username):
user = User.objects.get(username=username)
cats = Cat.objects.filter(user=user)
return render(request, 'profile.html', { 'username': username, 'cats': cats })
def login_view(request):
# if post, then authenticate (the user will be submitting a username and password)
if request.method == 'POST':
form = AuthenticationForm(request, request.POST)
if form.is_valid():
u = form.cleaned_data['username']
p = form.cleaned_data.get('password')
user = authenticate(username=u, password=p)
if user is not None:
if user.is_active:
login(request, user)
return HttpResponseRedirect('/user/' + u)
else:
print(f"The account for {u} has been disabled.")
else:
print('The username and/or password is incorrect.')
else:
form = AuthenticationForm()
return render(request, 'login.html', {'form': form})
else: # get request that sent up empty form
form = AuthenticationForm()
return render(request, 'login.html', {'form': form})
def logout_view(request):
logout(request)
return HttpResponseRedirect('/cats')
def signup(request):
if request.method == 'POST':
form = UserCreationForm(request.POST)
if form.is_valid():
user = form.save()
login(request, user)
return HttpResponseRedirect('/cats')
else:
form = UserCreationForm()
return render(request, 'signup.html', {'form': form})
else:
form = UserCreationForm()
return render(request, 'signup.html', {'form': form})
######## CATS ########
def cats_index(request):
cats = Cat.objects.all()
return render(request, 'cats/index.html', {'cats': cats})
def cats_show(request, cat_id):
cat = Cat.objects.get(id=cat_id)
return render(request, 'cats/show.html', { 'cat': cat })
@method_decorator(login_required, name="dispatch")
class CatCreate(CreateView):
model = Cat
fields = ['name', 'breed', 'description', 'age', 'cattoys']
success_url = '/cats'
def form_valid(self, form):
self.object = form.save(commit=False)
self.object.user = self.request.user
self.object.save()
return HttpResponseRedirect('/cats/' + str(self.object.pk))
@method_decorator(login_required, name="dispatch")
class CatUpdate(UpdateView):
model = Cat
fields = ['name', 'breed', 'description', 'age', 'cattoys']
def form_valid(self, form):
self.object = form.save(commit=False)
self.object.save()
return HttpResponseRedirect('/cats')
@method_decorator(login_required, name="dispatch")
class CatDelete(DeleteView):
model = Cat
success_url = '/cats'
######## CatToy ########
def cattoys_index(request):
cattoys = CatToy.objects.all()
return render(request, 'cattoys/index.html', { 'cattoys': cattoys })
def cattoys_show(request, cattoy_id):
cattoy = CatToy.objects.get(id=cattoy_id)
return render(request, 'cattoys/show.html', { 'cattoy': cattoy })
@method_decorator(login_required, name="dispatch")
class CatToyCreate(CreateView):
model = CatToy
fields = '__all__'
success_url = '/cattoys'
@method_decorator(login_required, name="dispatch")
class CatToyUpdate(UpdateView):
model = CatToy
fields = ['name', 'color']
success_url = '/cattoys'
@method_decorator(login_required, name="dispatch")
class CatToyDelete(DeleteView):
model = CatToy
success_url = '/cattoys'
|
[
"63361320+zfinnan@users.noreply.github.com"
] |
63361320+zfinnan@users.noreply.github.com
|
f3cc74839fd1e068538d6ef45bd477d14669beb1
|
8cdc63b549f5a7f1aca7b476a5a918e5c05e38c5
|
/app/notifier/views.py
|
f6290b484aa499c56d76805c924c8c55f4cbb991
|
[
"MIT"
] |
permissive
|
rogeriopaulos/gep
|
984e3bcd8bd4569031577e1d28a8c47c6aace91f
|
e56fd0450bdb8f572e2e35cc59a74ab0f0b372e2
|
refs/heads/main
| 2023-08-14T08:41:19.558899
| 2021-09-15T02:51:46
| 2021-09-15T02:51:46
| 402,270,601
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,337
|
py
|
import adm.models as adm
import core
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.models import User
from django.core import serializers
from django.core.exceptions import PermissionDenied
from django.db.models import Q
from django.shortcuts import get_object_or_404
from django.views.generic import FormView
from guardian.shortcuts import get_users_with_perms
from .forms import NotifiedUsersForm
from .tasks import notificar
from .utils import atos
class NotifierView(LoginRequiredMixin, FormView):
template_name = 'componentes/shares/NotifyForm.html'
form_class = NotifiedUsersForm
verb = 'notificou'
def dispatch(self, request, *args, **kwargs):
if hasattr(self, 'settings'):
self.settings(self, request, *args, **kwargs)
return super(NotifierView, self).dispatch(request, *args, **kwargs)
def form_valid(self, form):
self.check_permission()
self.notify_users(form)
return super().form_valid(form)
def get_context_data(self, **kwargs):
context = super(NotifierView, self).get_context_data(**kwargs)
context['obj'] = self.obj
context['success_url'] = self.success_url
return context
def get_form(self, form_class=None):
self.check_permission()
if form_class is None:
form_class = self.get_form_class()
return form_class(queryset=self.notificados(), **self.get_form_kwargs())
def check_permission(self):
user_has_perm = self.request.user in get_users_with_perms(self.processo)
user_has_authority = self.request.user.groups.filter(name=self.GRUPO_SUPERIOR)
user_at_orgao = self.request.user.profile.orgao_link == self.processo.orgao_processo
if user_has_authority and not (user_at_orgao or user_has_perm):
raise PermissionDenied
if not user_has_authority and not user_has_perm:
raise PermissionDenied
def notificados(self):
users_with_perms = get_users_with_perms(self.processo)
authorities = User.objects \
.filter((Q(groups__name=self.GRUPO_SUPERIOR)) & Q(profile__orgao_link=self.processo.orgao_processo)) \
.distinct()
users = users_with_perms | authorities
return users.filter(is_superuser__exact=False, is_active__exact=True).order_by('first_name')
def notify_users(self, form):
actor = self.request.user
users = form.cleaned_data['usuarios']
notification_context = {
'actor': serializers.serialize('json', [actor]),
'users': serializers.serialize('json', users),
'verb': self.verb,
'target': serializers.serialize('json', [self.processo]),
'action_object': serializers.serialize('json', [self.obj]),
'description': self.processo.get_absolute_url()
}
notificar.delay(notification_context)
def settings(self, request, *args, **kwargs):
self.set_obj(self, request, *args, **kwargs)
self.set_processo(self, request, *args, **kwargs)
self.set_success_url(self, request, *args, **kwargs)
def set_obj(self, request, *args, **kwargs):
self.obj = get_object_or_404(self.model, pk=self.kwargs['model_pk'])
def set_processo(self, request, *args, **kwargs):
self.processo = self.obj.processo
def set_success_url(self, request, *args, **kwargs):
self.success_url = self.obj.processo.get_absolute_url()
class NotifyAtoAdmView(NotifierView):
GRUPO_SUPERIOR = core.permissions.GRUPO_SUPERIOR_ADMINISTRATIVO
model = adm.AtoAdm
submodels = atos['adm']
def set_obj(self, request, *args, **kwargs):
super().set_obj(request)
self.obj = self.submodels[self.obj.tipo_ato].objects.get(pk=self.obj.pk)
notifica_ato_adm = NotifyAtoAdmView.as_view()
class NotifyOficioEmpresaView(NotifierView):
GRUPO_SUPERIOR = core.permissions.GRUPO_SUPERIOR_ADMINISTRATIVO
model = adm.OfEmpresas
def set_processo(self, request, *args, **kwargs):
self.processo = self.obj.controlempresas.processo
def set_success_url(self, request, *args, **kwargs):
self.success_url = self.obj.controlempresas.processo.get_absolute_url()
notifica_oficio_empresa = NotifyOficioEmpresaView.as_view()
|
[
"rogeriopaulos@gmail.com"
] |
rogeriopaulos@gmail.com
|
cc4b81158428b5039951c734cb8ff5b0800851a5
|
c31ec40b6dcb90861e8aee2dd4b9db641937ccf5
|
/src/PredictCreditCardDelinquency/inference.py
|
4ae19a4fa84e844343f1d98ba68d8f87e91783f2
|
[
"Apache-2.0"
] |
permissive
|
leeeejunnnn/PredictCreditCardDelinquency
|
158af84995c9491c1acb060ca12f83f7d6ee6fe5
|
ed4e599bed277d138bded3660d2b75c8e76d0d52
|
refs/heads/main
| 2023-08-27T16:34:16.359746
| 2021-10-09T00:03:30
| 2021-10-09T00:03:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,249
|
py
|
import argparse
import pandas as pd
if __name__ == "__main__":
path = "../../input/predict-credit-card-delinquency/"
parse = argparse.ArgumentParser("Ensemble")
parse.add_argument("-w1", type=float, default=0.98)
parse.add_argument("-w2", type=float, default=0.1)
parse.add_argument("-w3", type=float, default=0.05)
parse.add_argument("-w4", type=float, default=0.05)
parse.add_argument("-w5", type=float, default=0.1)
parse.add_argument("--file", type=str, default="ensemble_model.csv")
args = parse.parse_args()
lgb_preds = pd.read_csv("../../submission/lgbm_submit.csv")
xgb_preds = pd.read_csv("../../submission/xgb_submit.csv")
cat_preds = pd.read_csv("../../submission/cat_submit_test.csv")
rf_preds = pd.read_csv("../../submission/rf_submit.csv")
tab_preds = pd.read_csv("../../submission/tabnet_submit.csv")
submission = pd.read_csv(path + "sample_submission.csv")
submission.iloc[:, 1:] = (
args.w1 * cat_preds.iloc[:, 1:]
+ args.w2 * lgb_preds.iloc[:, 1:]
+ args.w3 * xgb_preds.iloc[:, 1:]
+ args.w4 * rf_preds.iloc[:, 1:]
+ args.w5 * tab_preds.iloc[:, 1:]
)
submission.to_csv("../../submission/" + args.file, index=False)
|
[
"leewook94@naver.com"
] |
leewook94@naver.com
|
eb9f92818f4a34f8b18330ac495349d7015c8939
|
ba09463fcf9144df29958026c82a4b5cb7090291
|
/siftNearestN2.py
|
2cc8e0d8885591db459665e6f6123e056899a5ca
|
[] |
no_license
|
EricCheng2222/BagOfSiftFeatures
|
62accf21acc39c5b1ba7c6366521dbc0ff84b6c3
|
c9d2e456e63ead7429d35722f7b836446fb6454d
|
refs/heads/master
| 2020-03-17T15:15:54.685896
| 2018-05-16T17:51:39
| 2018-05-16T17:51:39
| 133,703,937
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 974
|
py
|
import math
import cv2
import numpy as np
import glob, os
fileList = []
keypointList = []
dirList = getAllDirIn("")
#readDir
for dir in dirList:
tmp = getAllFileIn(dir)
fileList.append(tmp)
#readImg, extractFeature and use Kmean to extract important feature
importantFeatureList = []
for pictureList in fileList:
oneCateFeatureList = []
for picture in pictureList:
keypoint = extractKeypoint(picture)
tmpFeatureList = extractFeatureFrom(keypoint)
oneCateFeatureList.append(tmpFeatureList)
oneCateImportantFeature = kmean(oneCateFeatureList)
importantFeatureList.append(oneCateImportantFeature)
#readTestImage
for image in testImage:
cateLabel = 1
currentLabel = 0
currentScore = 255*10*10
for importantFeature in importantFeatureList:
cmpScore = score(image, importantFeature)
if currentScore > cmpScore:
currentLabel = cateLabel
currentScore = cmpScore
cateLabel = cateLabel + 1
print (currentLabel, currentScore)
|
[
"noreply@github.com"
] |
EricCheng2222.noreply@github.com
|
74d926edb734c14ed48d62777ea99cd69278b441
|
aa43c361cc3c99445166c05e17b0150ada991d55
|
/re.credit.py
|
d152c5dba1d145125a893b40267914338daa707c
|
[] |
no_license
|
surendra-3085/code_challenges
|
5a4eb8679a3edd39f5c3d877c32fbb1d02d7b4f6
|
f216275b19330e8594f51b1eb4a58c96fb7b2c8e
|
refs/heads/master
| 2020-04-11T22:08:38.300330
| 2018-12-17T19:44:54
| 2018-12-17T19:44:54
| 162,126,628
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 193
|
py
|
import re
credit_number = input("enter card number")
cardnumber = re.match('^[0-9]{4}[a-z]{4}',credit_number)
if cardnumber:
print("valid number")
else:
print("invalid number")
|
[
"surendrakumaryadav3085@gmail.com"
] |
surendrakumaryadav3085@gmail.com
|
2c3b30d76b84c1ec9910af37cc9a7eb984c7586d
|
10665bd276fee3e8734a8d8d0354d62af69c699e
|
/ISCE2StaMPS_run_steps.py
|
a137a49700d2e1a716a31f9becfb9ea124b59618
|
[] |
no_license
|
LuyenKhacBui/ISCE2StaMPS
|
0474c23ab13759f137e0b51c08bb0279d5fafede
|
6ae4c3717f760c834a20ebad23cb9175006dd76e
|
refs/heads/master
| 2021-09-08T09:43:50.452651
| 2021-09-02T18:48:57
| 2021-09-02T18:48:57
| 243,200,747
| 5
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,141
|
py
|
#!/usr/bin/env python3
'''#!/usr/bin/env python'''
'''This is a script used to run steps listed in directory "run_files" in order to adapt ISCE to StaMPS.
Parameters:
Input parameters
Returns:
Output results: multi outputs
'''
import os, sys
import datetime
import subprocess
import glob
import logging
appdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
print(appdir)
##sys.path.append(appdir)
sys.path.insert(0, appdir)
#from modules.basictools import LoggingGen
def LoggingGen(logfile):
''' A fn. used to generate logging appended to a file (i.e., "logfile")
parameters: logfile : a file used to log infor.
'''
#######################
##### CREATE A LOG FILE
logger = logging.getLogger('mylog')
logger.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
fh = logging.FileHandler(logfile, mode = 'w')
fh.setLevel(logging.DEBUG)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.ERROR)
# create formatter and add it to the handlers
#formatter = logging.Formatter('%(asctime)s | %(name)s | %(levelname)s: %(message)s')
formatter = logging.Formatter('%(asctime)s | %(levelname)s: %(message)s')
ch.setFormatter(formatter)
fh.setFormatter(formatter)
# add handlers to logger
logger.addHandler(ch)
logger.addHandler(fh)
# logging examples
#logger.debug('debug message')
#logger.info('info message')
#logger.warn('warn message')
#logger.error('error message')
#logger.critical('critical message')
return logger
sttm = datetime.datetime.now() # Luyen cmt: start time running for all prog.
print
print ('##########################################################################################################################')
print ('# #')
print ('# This is the app. used to run steps listed in directory "run_files" in order to adapt ISCE to StaMPS #')
print ('# (Script used: ISCE2StaMPS_run_steps.py) #')
print ('# #')
print ('##########################################################################################################################')
#######################
##### CREATE A LOG FILE
logfile = 'ISCE2StaMPS_run_steps_log.txt'
'''if os.path.isfile(logfile):
print ('\r\nThe logging file: ' + logfile + ' is already existent. Please delete or change its name.\r\n')
sys.exit()'''
logger = LoggingGen(logfile)
logger.info ('##########################################################################################################################')
logger.info ('# #')
logger.info ('# This is the app. used to run steps listed in directory "run_files" in order to adapt ISCE to StaMPS #')
logger.info ('# (Script used: ISCE2StaMPS_run_steps.py) #')
logger.info ('# #')
logger.info ('##########################################################################################################################')
crdir = os.getcwd() # Current dir
rfdir = 'run_files' # The dir of which "run_files" dir is included
rflog = 'run_files_reports' # The dir of which log/reports of "run_files" line-by-line are included
if os.path.isdir(rflog):
cmd = 'rm -r ' + rflog
subprocess.call(cmd, shell=True)
cmd = "mkdir " + rflog
subprocess.call(cmd, shell=True)
print ('\r\nSteps being run is listed in the directory ' + repr(rfdir))
logger.info ('Steps being run is listed in the directory ' + repr(rfdir))
rffle = [file for file in glob.glob(rfdir + '/' + "run_*")] # List all files included in 'run_files' dir
rffle = [os.path.split(f)[1] for f in rffle] # Split to keep just file names only (i.e., remove its directory ('run_files'))
rffle.sort(key=lambda f: int("".join(filter(str.isdigit, f[4:6])))) # Sort list of run files so that it will be: [run_1_...; run_2_..., ..., run_10_...]
print ('\r\nNumber of run files: ' + str(len(rffle)))
logger.info ('Number of run files: ' + str(len(rffle)))
for ii, file in enumerate(rffle):
print ('\r\nRun commands listed in run file number: ' + str(ii + 1).zfill(len(str(len(rffle)))) + ' / ' + str(len(rffle)) + '\t: ' + file)
logger.info ('Run commands listed in run file number: ' + str(ii + 1).zfill(len(str(len(rffle)))) + ' / ' + str(len(rffle)) + '\t: ' + file)
rfile = open(os.path.join(rfdir, file), "r")
cnt = rfile.readlines()
rfile.close()
for jj, line in enumerate(cnt):
print ('\tCall command from line number: ' + str(jj + 1).zfill(len(str(len(cnt)))) + ' / ' + str(len(cnt)) + '\t: ' + str(line).rstrip("\n\r"))
logger.info ('\tCall command from line number: ' + str(jj + 1).zfill(len(str(len(cnt)))) + ' / ' + str(len(cnt)) + '\t: ' + str(line).rstrip("\n\r"))
if ii+1 < 10:
cmd = line.rstrip("\n\r") + ' >> ' + rflog + '/' + file + '_line_' + str(jj + 1).zfill(len(str(len(cnt)))) + '_' + str(len(cnt)) + '.txt'
else:
cmd = line.rstrip("\n\r") + ' >> ' + rflog + '/' + file + '_line_' + str(jj + 1).zfill(len(str(len(cnt)))) + '_' + str(len(cnt)) + '.txt'
subprocess.call(cmd, shell=True)
print ('\r\nLogging/Report files of the above steps are saved in: ' + repr(rflog) + ' that should be carefully read to check if any error issued.')
logger.info ('Logging/Report files of the above steps are saved in: ' + repr(rflog) + ' that should be carefully read to check if any error issued.')
print ("\r\nCreate 'input_file' used for running in the next step: 'make_single_master_stack_isce'")
logger.info ("Create 'input_file' used for running in the next step: 'make_single_master_stack_isce'")
stkpth = os.path.abspath('merged/SLC')
stkmst = 'UNKNOWN'
geopth = os.path.abspath('merged/geom_master')
bslpth = os.path.abspath('merged/baselines')
rglook = 40
azlook = 10
asrtio = 4
lmbda = 0.056
slcsuf = '.full'
geosuf = '.full'
ofile = 'input_file'
print ('\tslc_stack_path : ' + stkpth)
print ('\tslc_stack_master : ' + stkmst)
print ('\tslc_stack_geom_path : ' + geopth)
print ('\tslc_stack_baseline_path: ' + bslpth)
print ('\trange_looks : ' + str(rglook))
print ('\tazimuth_looks : ' + str(azlook))
print ('\tlambda : ' + str(lmbda))
print ('\tslc_suffix : ' + slcsuf)
print ('\tgeo_suffix : ' + geosuf)
logger.info ('\tslc_stack_path : ' + stkpth)
logger.info ('\tslc_stack_master : ' + stkmst)
logger.info ('\tslc_stack_geom_path : ' + geopth)
logger.info ('\tslc_stack_baseline_path: ' + bslpth)
logger.info ('\trange_looks : ' + str(rglook))
logger.info ('\tazimuth_looks : ' + str(azlook))
logger.info ('\tlambda : ' + str(lmbda))
logger.info ('\tslc_suffix : ' + slcsuf)
logger.info ('\tgeo_suffix : ' + geosuf)
outputFile = open(ofile,'w')
outputFile.write('source_data\t\tslc_stack\n')
outputFile.write('slc_stack_path\t\t%s\n' % stkpth)
outputFile.write('slc_stack_master\t%s\n' % stkmst)
outputFile.write('slc_stack_geom_path\t%s\n' % geopth)
outputFile.write('slc_stack_baseline_path\t%s\n\n' % bslpth)
outputFile.write('range_looks\t\t%i\n' % rglook)
outputFile.write('azimuth_looks\t\t%i\n' % azlook)
outputFile.write('aspect_ratio\t\t%i\n\n' % asrtio)
outputFile.write('lambda\t\t\t%.3f\n' % lmbda)
outputFile.write('slc_suffix\t\t%s\n' % slcsuf)
outputFile.write('geom_suffix\t\t%s\n' % geosuf)
outputFile.close()
fntm = datetime.datetime.now() # Luyen cmt: finish time running for all prog.
logger.info('==============================================')
logger.info('----------------------------------------------')
logger.info('Prog. started at : ' + str(sttm))
logger.info('Prog. finished at : ' + str(fntm))
logger.info('total running time: ' + str(fntm - sttm))
logger.info('Program finished !')
logger.info('----------------------------------------------')
logger.info('==============================================')
print ('\r\n==========================================================================================================================')
print ('--------------------------------------------------------------------------------------------------------------------------')
print ('Prog. started at : ' + str(sttm))
print ('Prog. finished at : ' + str(fntm))
print ('total running time: ' + str(fntm - sttm))
print ('Program finished !')
print ('--------------------------------------------------------------------------------------------------------------------------')
print ('==========================================================================================================================')
|
[
"buikhacluyen@humg.edu.vn"
] |
buikhacluyen@humg.edu.vn
|
2e7f3da0cce5a64c561d84c71c1d0db03b83b50d
|
1076553541e344ff6f76f55ef8cf05c597fbc493
|
/Bishop.py
|
c4b6a466a6b0cb819b2e0d13dcd571fd5f2f686b
|
[] |
no_license
|
Jinsung1022/Chess
|
a6a84390143d4cf5108877fdc4d204ff4fb05dc1
|
3cfd4c0b32a84c817f8805e29bac8b084c33c180
|
refs/heads/master
| 2020-08-29T17:41:38.345480
| 2020-02-06T00:36:32
| 2020-02-06T00:36:32
| 218,114,888
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,062
|
py
|
from Piece import Piece
class Bishop(Piece):
def __init__(self, color, x, y):
super().__init__(color, x, y)
def possible(self, board):
move_array = []
directions = [[1, 1], [-1, 1], [-1, -1], [1, -1]]
for direction in directions:
blocked = False
pos_x = self.x
pos_y = self.y
for i in range(0, 8):
pos_x = pos_x + direction[0]
pos_y = pos_y + direction[1]
if self.inside_board(pos_x, pos_y):
if not blocked:
if board[pos_x][pos_y] != 0:
if board[pos_x][pos_y].color == self.opp_color(self.color):
move_array.append((pos_x, pos_y))
blocked = True
elif board[pos_x][pos_y].color == self.color:
blocked = True
else:
move_array.append((pos_x, pos_y))
return move_array
|
[
"57107998+Jinsung1022@users.noreply.github.com"
] |
57107998+Jinsung1022@users.noreply.github.com
|
12029d039aa01c3481d6b33bd0fb592149d486d9
|
fd173195d07b5a5ce229a0c1a20ee61884d8c8a1
|
/python_practice/Basic_programs/11_is_fibonacci_number.py
|
b01f3ff8f77b38095c2c85b79ada9315f3692ad7
|
[] |
no_license
|
anitrajpurohit28/PythonPractice
|
f7e71946144e04b7f9cb9682087e5d4f79839789
|
8b75b67c4c298a135a5f8ab0b3d15bf5738859f1
|
refs/heads/master
| 2023-04-12T07:04:12.150646
| 2021-04-24T19:52:24
| 2021-04-24T19:52:24
| 293,912,662
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 686
|
py
|
# 11 Python Program for How to check if a given number is Fibonacci number?
# Following is an interesting property about Fibonacci numbers that
# can also be used to check if a given number is Fibonacci or not.
# A number is Fibonacci if and only if one or both of
# (5*n2 + 4) or (5*n2 – 4) is a perfect square (Source: Wiki).
import math
def is_perfect_square(num):
s = int(math.sqrt(num))
return s*s == num
def is_fibonacci_number(n):
return is_perfect_square(5*n*n - 4) or is_perfect_square(5*n*n + 4)
for i in range(1, 11):
if is_fibonacci_number(i):
print(f"{i} is a fibonacci number")
else:
print(f"{i} is NOT a fibonacci number")
|
[
"anitrajpurohit28@gmail.com"
] |
anitrajpurohit28@gmail.com
|
11e95e1867e6a6d94774ee2e948e6019a0046f49
|
86d10b2e24863ab841280cb1a81b54f82ca8f480
|
/machamp/util.py
|
14551bba3d261b9bdf68cc18aedc8b6ccd9e9ff2
|
[
"MIT"
] |
permissive
|
ml-ai-nlp-ir/machamp
|
897a1d5769052ffafa874c5e9803853e6918d468
|
406cc0010dfd6eb370ada4658a6b899539321076
|
refs/heads/master
| 2022-09-28T07:01:18.935350
| 2020-06-04T11:44:56
| 2020-06-04T11:44:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,823
|
py
|
"""
A collection of handy utilities
"""
from typing import List, Tuple, Dict, Any
import os
import glob
import json
import logging
import tarfile
import traceback
import torch
import pprint
import copy
import numpy
from allennlp.common.checks import ConfigurationError
from allennlp.common import Params
from allennlp.common.params import with_fallback
from allennlp.commands.predict import _PredictManager
from allennlp.common.checks import check_for_gpu
from allennlp.models.archival import load_archive
from allennlp.predictors.predictor import Predictor
logger = logging.getLogger(__name__)
# count number of sentences in file, if it is a connlu-like
# file it counts the empty lines, otherwise it counts all
# lines
def countLines(path):
total = 0
empty = 0
for line in open(path):
total += 1
if line.strip() == '':
empty += 1
if empty < 10:
return total
else:
return empty
def merge_configs(parameters_config: str, dataset_config: str, overrides: Dict) -> Params:
"""
Merges a dataset config file with a parameters config file
"""
mergedSettings = Params.from_file(parameters_config).as_dict()
mergedSettings = with_fallback(overrides, mergedSettings)#.update(overrides)
#mergedSettings = Params(mergedSettings)
dataset_config = Params.from_file(dataset_config)
defaultDecoder = mergedSettings['model'].pop('default_decoder')
orderedStuff = {}
mergedSettings['dataset_reader']['datasets'] = {}
mergedSettings['model']['decoders'] = {}
for dataset in dataset_config:
dataReader = {}
dataReader['train'] = dataset_config[dataset]['train_data_path']
dataReader['dev'] = dataset_config[dataset]['validation_data_path']
if 'test_data_path' in dataset_config[dataset]:
dataReader['test'] = dataset_config[dataset]['test_data_path']
if 'word_idx' in dataset_config[dataset]:
dataReader['word_idx'] = dataset_config[dataset]['word_idx']
else:
dataReader['sent_idxs'] = dataset_config[dataset]['sent_idxs']
dataReader['tasks'] = {}
if 'copy_other_columns' in dataset_config[dataset]:
dataReader['copy_other_columns'] = dataset_config[dataset]['copy_other_columns']
else:
dataReader['copy_other_columns'] = mergedSettings['model']['default_dataset']['copy_other_columns']
for task in dataset_config[dataset]['tasks']:
taskOverride = dataset_config[dataset]['tasks'][task]
decoder = copy.deepcopy(defaultDecoder)
decoder.update(taskOverride)
decoder['dataset'] = dataset
decoder['task'] = task
dataReader['tasks'][task] = copy.deepcopy(decoder)
orderIdx = decoder['order']
if 'task_type' not in decoder:
logger.warning('Error, task ' + task + ' has no defined task_type')
exit(1)
curTrans = decoder['task_type']
curLayer = decoder['layer']
if decoder['task_type'] == 'dependency':
decoder['type'] = 'machamp_dependency_decoder'
if 'metric' not in dataReader['tasks'][task]:
decoder['metric'] = 'LAS'
if 'tag_representation_dim' not in dataReader['tasks'][task]:
decoder['tag_representation_dim'] = 256
if 'arc_representation_dim' not in dataReader['tasks'][task]:
decoder['arc_representation_dim'] = 768
elif decoder['task_type'] == 'classification':
decoder['type'] = 'machamp_sentence_classifier'
#ROB TODO why do we need empty kwargs?
decoder['kwargs'] = {}
elif decoder['task_type'] == 'multiseq':
decoder['type'] = 'multiseq_decoder'
elif decoder['task_type'] in ['seq', 'string2string']:
if 'decoder_type' in decoder and decoder['decoder_type'] == 'crf':
decoder['type'] = 'masked_crf_decoder'
del decoder['decoder_type']
del decoder['decoder_type']
else:
decoder['type'] = 'machamp_tag_decoder'
else:
logger.warning('task_type ' + str(dataReader['tasks'][task]['task_type']) + " not known")
exit(1)
if 'metric' not in decoder:
decoder['metric'] = 'acc'
if decoder['metric'] == 'span_f1':
decoder['metric'] = 'machamp_span_f1'
orderedStuff[task] = [orderIdx, curTrans, curLayer]
# save stuff in mergedSettings
mergedSettings['model']['decoders'][task] = decoder
dataReader['tasks'][task] = copy.deepcopy(decoder)
mergedSettings['dataset_reader']['datasets'][dataset] = dataReader
# Rob: we definitely do not want to cheat and add dev and test labels here
mergedSettings["datasets_for_vocab_creation"] = ["train"]
del mergedSettings['model']['default_dataset']
# to support reading from multiple files we add them to the datasetreader constructor instead
# the following ones are there just here to make allennlp happy
mergedSettings['train_data_path'] = 'train'
mergedSettings['validation_data_path'] = 'dev'
if 'test_data_path' in dataset_config[dataset]:
mergedSettings['test_data_path'] = 'test'
# generate ordered lists, which make it easier to use in the machamp model
orderedTasks = []
orderedTaskTypes = []
orderedLayers = []
for label, idx in sorted(orderedStuff.items(), key=lambda item: item[1]):
orderedTasks.append(label)
orderedTaskTypes.append(orderedStuff[label][1])
orderedLayers.append(orderedStuff[label][2])
mergedSettings['model']['tasks'] = orderedTasks
mergedSettings['model']['task_types'] = orderedTaskTypes
mergedSettings['model']['layers_for_tasks'] = orderedLayers
mergedSettings['model']['decoders'][orderedTasks[0]]['prev_task'] = None
for taskIdx, task in enumerate(orderedTasks[1:]):
mergedSettings['model']['decoders'][task]['prev_task'] = orderedTasks[taskIdx]
#TODO shouldnt this be -1?
for task in orderedTasks:
mergedSettings['model']['decoders'][task]['task_types'] = orderedTaskTypes
mergedSettings['model']['decoders'][task]['tasks'] = orderedTasks
#taskIdx is not +1, because first item is skipped
# remove items from tagdecoder, as they are not neccesary there
for item in ['task_type', 'dataset', 'column_idx', 'layer', 'order']:
for task in mergedSettings['model']['decoders']:
if item in mergedSettings['model']['decoders'][task]:
del mergedSettings['model']['decoders'][task][item]
if 'trainer' in overrides and 'cuda_device' in overrides['trainer']:
mergedSettings['trainer']['cuda_device'] = overrides['trainer']['cuda_device']
#import pprint
#pprint.pprint(mergedSettings.as_dict())
#exit(1)
numSents = 0
for dataset in mergedSettings['dataset_reader']['datasets']:
trainPath = mergedSettings['dataset_reader']['datasets'][dataset]['train']
numSents += countLines(trainPath)
warmup = int(numSents/mergedSettings['iterator']['batch_size'])
mergedSettings['trainer']['learning_rate_scheduler']['warmup_steps'] = warmup
mergedSettings['trainer']['learning_rate_scheduler']['start_step'] = warmup
mergedSettings['model']['bert_path'] = mergedSettings['dataset_reader']['token_indexers']['bert']['pretrained_model']
#TODO, this will result in the same as appending _tags , however, the
# warning will still be there... this can be circumvented by copying
# allennlp.data.fields.sequence_label_field and add a smarter check...
#mergedSettings['vocabulary'] = {'non_padded_namespaces': ['ne1']}
return Params(mergedSettings)
def predict_model_with_archive(predictor: str, params: Params, archive: str,
input_file: str, output_file: str, batch_size: int = 1):
cuda_device = params["trainer"]["cuda_device"]
check_for_gpu(cuda_device)
archive = load_archive(archive,
cuda_device=cuda_device)
for item in archive.config.duplicate():
archive.config.__delitem__(item)
for item in params:
archive.config[item] = params.as_dict()[item]
predictor = Predictor.from_archive(archive, predictor)
manager = _PredictManager(predictor,
input_file,
output_file,
batch_size,
print_to_console=False,
has_dataset_reader=True)
manager.run()
def predict_model(predictor: str, params: Params, archive_dir: str,
input_file: str, output_file: str, batch_size: int = 1):
"""
Predict output annotations from the given model and input file and produce an output file.
:param predictor: the type of predictor to use, e.g., "machamp_predictor"
:param params: the Params of the model
:param archive_dir: the saved model archive
:param input_file: the input file to predict
:param output_file: the output file to save
:param batch_size: the batch size, set this higher to speed up GPU inference
"""
archive = os.path.join(archive_dir, "model.tar.gz")
predict_model_with_archive(predictor, params, archive, input_file, output_file, batch_size)
def cleanup_training(serialization_dir: str, keep_archive: bool = False, keep_weights: bool = False):
"""
Removes files generated from training.
:param serialization_dir: the directory to clean
:param keep_archive: whether to keep a copy of the model archive
:param keep_weights: whether to keep copies of the intermediate model checkpoints
"""
if not keep_weights:
for file in glob.glob(os.path.join(serialization_dir, "*.th")):
os.remove(file)
if not keep_archive:
os.remove(os.path.join(serialization_dir, "model.tar.gz"))
def archive_bert_model(serialization_dir: str, config_file: str, output_file: str = None):
"""
Extracts BERT parameters from the given model and saves them to an archive.
:param serialization_dir: the directory containing the saved model archive
:param config_file: the configuration file of the model archive
:param output_file: the output BERT archive name to save
"""
archive = load_archive(os.path.join(serialization_dir, "model.tar.gz"))
model = archive.model
model.eval()
try:
bert_model = model.text_field_embedder.token_embedder_bert.model
except AttributeError:
logger.warning(f"Could not find the BERT model inside the archive {serialization_dir}")
traceback.print_exc()
return
weights_file = os.path.join(serialization_dir, "pytorch_model.bin")
torch.save(bert_model.state_dict(), weights_file)
if not output_file:
output_file = os.path.join(serialization_dir, "bert-finetune.tar.gz")
with tarfile.open(output_file, 'w:gz') as archive:
archive.add(config_file, arcname="bert_config.json")
archive.add(weights_file, arcname="pytorch_model.bin")
os.remove(weights_file)
def to_multilabel_sequence(predictions, vocab, task):
#TODO @AR: Hard-coded parameters for now
THRESH = 0.5
k = 2
outside_index = vocab.get_token_index("O", namespace=task)
# @AR: Get the thresholded matrix and prepare the prediction sequence
pred_over_thresh = (predictions >= THRESH) * predictions
sequence_token_labels = []
# @AR: For each label set, check if to apply argmax or sigmoid thresh
for pred in pred_over_thresh:
num_pred_over_thresh = numpy.count_nonzero(pred)
if num_pred_over_thresh < k:
pred_idx_list = [numpy.argmax(predictions, axis=-1)]
# print("argmax ->", pred_idx_list)
else:
pred_idx_list = [numpy.argmax(predictions, axis=-1)]
# pred_idx_list = list(numpy.argpartition(pred, -k)[-k:])
# # print("sigmoid ->", pred_idx_list)
# # If the first (i.e., second best) is "O", ignore/remove it
# if pred_idx_list[0] == outside_index:
# pred_idx_list = pred_idx_list[1:]
# # If the second (i.e., the best) is "O", ignore/remove the first
# elif pred_idx_list[1] == outside_index:
# pred_idx_list = pred_idx_list[1:]
# else:
# pass
sequence_token_labels.append(pred_idx_list)
return sequence_token_labels
|
[
"robvanderg@live.nl"
] |
robvanderg@live.nl
|
58b605d380f94963234ff94707b96eb6474edbc1
|
b2aa1fe639381fc7d6499b3e8b3cafb0930ecbb0
|
/Arrays/3.py
|
653974d08f8068474d173bf051d507bcefcb7e4e
|
[
"MIT"
] |
permissive
|
Nadeemk07/Python-Examples
|
7c02290778643776b434ea7eb824c229bf90ade6
|
07e44dd406048887920a8be92b12cbc685592117
|
refs/heads/main
| 2023-05-11T11:22:14.608875
| 2021-06-06T18:19:32
| 2021-06-06T18:19:32
| 350,406,303
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 505
|
py
|
# Python3
m, n = [int(i) for i in input().split()]
if n <= 1:
print(n)
quit()
lesser_n = (n+2) % 60
lesser_m = (m+1) % 60
def fibo(n):
a, b = 0, 1
for i in range(2, n+1):
c = a+b
c = c % 10
b, a = c, b
return (c-1)
if lesser_n <= 1:
a = lesser_n-1
else:
a = fibo(lesser_n)
if lesser_m <= 1:
b = lesser_m-1
else:
b = fibo(lesser_m)
# print(a)
# print(b)
if a >= b:
print(a-b)
else:
print(10+a-b)
|
[
"noreply@github.com"
] |
Nadeemk07.noreply@github.com
|
699fe08801a753c20beaa4d2bddb839dc645687e
|
d546e27891f3637336a7b9f2222d1c0b0edb7c87
|
/pycsvschema/checker.py
|
30f46879965c5ff811ad9337669683a9579be0cb
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
crowdskout/PyCSVSchema
|
66d40389e321550f88d091974b60dd728de4c4df
|
d5d44ace9bfe84e1001577caf4d933827143d124
|
refs/heads/master
| 2021-03-22T04:52:06.320896
| 2020-05-18T01:23:40
| 2020-05-18T01:23:40
| 119,094,452
| 13
| 3
|
MIT
| 2020-05-18T01:23:42
| 2018-01-26T19:32:37
|
Python
|
UTF-8
|
Python
| false
| false
| 7,699
|
py
|
#!/usr/bin/python
# -*-coding: utf-8 -*-
import csv
from itertools import chain
import json
import jsonschema
from pycsvschema.validators import header_validators
from pycsvschema import defaults, _utilities
from typing import Dict, Optional
class Validator:
_CSV_DEFAULT_PARS = {
'delimiter': ',',
'doublequote': True,
'escapechar': None,
'lineterminator': '\r\n',
'quotechar': '"',
'quoting': csv.QUOTE_MINIMAL,
'skipinitialspace': False,
'strict': False
}
def __init__(self, csvfile: str, schema: Dict, output: Optional[str] = None, errors: str = 'raise', **kwargs):
"""
:param csvfile: Path to CSV file
:param schema: CSV Schema in dict
:param output: Path to output file of errors. If output is None, print the error message. Default: None.
:param error: {'raise', 'coerce'} If error is 'raise', stop the validation when it meets the first error. If
error is 'coerce', output all errors.
Validator also accepts parameters of csv.reader, that includes delimiter, doublequote, escapechar,
lineterminator, quotechar, quoting, skipinitialspace and strict
See details on https://docs.python.org/3/library/csv.html#dialects-and-formatting-parameters
"""
self.csvfile = csvfile
self.schema = schema
self.output = output
if errors not in {'raise', 'coerce'}:
raise ValueError("Unknown value for parameter errors")
self.errors = errors
self.header = []
self.csv_pars = {
**self._CSV_DEFAULT_PARS,
**{k: kwargs[k]
for k in set(kwargs).intersection(self._CSV_DEFAULT_PARS)}
}
self.column_validators = {'columns': {}, 'unfoundfields': {}}
self.validate_schema()
self.update_schema()
def validate_schema(self):
meta_schema = json.load(open('pycsvschema/schema.json', 'r'))
jsonschema.validate(self.schema, meta_schema)
def update_schema(self):
# Convert list in schema into set
# missingValues
if 'missingValues' not in self.schema.keys():
self.schema['missingValues'] = defaults.MISSINGVALUES
self.schema['missingValues'] = set(self.schema['missingValues'])
# enum in fields, definitions and patternFields
fields_schema_with_array = (
self.schema['fields'], self.schema['definitions'].values(), self.schema['patternFields'].values()
)
array_keywords = ('trueValues', 'falseValues', 'enum')
for fields in fields_schema_with_array:
for field in fields:
for k in array_keywords:
if k in field.keys():
field[k] = set(field[k])
def validate(self):
with open(self.csvfile, 'r') as csvfile:
csv_reader = csv.reader(csvfile, **self.csv_pars)
# Read first line as header
self.header = next(csv_reader)
self.prepare_field_schema()
with _utilities.file_writer(self.output) as output:
# Concat errors from header checking and row checking
for error in chain(self.check_header(), self.check_rows(csv_reader)):
if self.errors == 'raise':
raise error
else:
output.write(str(error))
output.write('\n')
def prepare_field_schema(self):
"""
Prepare validators from `fields` option for every column
Sample self.column_validators
{
'columns':{
0: {
'column': '<COLUMN_NAME>',
'field_schema': {'name':'id', 'type': 'number'},
'validators': [
< function csvchecker._validators.validate_type >,
< function csvchecker._validators.validate_type >
],
'patternfields': {
'<PATTERN>': {
'field_schema': {'name':'id', 'type': 'number'},
'column': '<COLUMN_NAME>'
}
}
}
},
'unfoundfields': {
'<FIELD_NAME>': {
'field_schema': {'name':'id', 'type': 'number'},
'column': '<COLUMN_NAME>'
}
},
'definitions': {
'ref1': {
'validators': [
< function csvchecker._validators.validate_type >,
< function csvchecker._validators.validate_type >
],
'field_schema': {'name':'id', 'type': 'number'}
}
},
'patternfields': {
'ref1': {
'validators': [
< function csvchecker._validators.validate_type >,
< function csvchecker._validators.validate_type >
],
'field_schema': {'name':'id', 'type': 'number'}
}
}
}
"""
# Sample header_index {'col_1': [0, 1],}
# column names might not be unique
header_index = {}
for k, v in enumerate(self.header):
if v in header_index:
header_index[v].append(k)
else:
header_index[v] = [k]
for field_schema in self.schema.get('fields', defaults.FIELDS):
column_info = {'field_schema': field_schema, 'column': field_schema['name']}
_utilities.find_row_validators(column_info=column_info, field_schema=field_schema)
# Pass the validators to one or more than one columns
if field_schema['name'] in header_index.keys():
for column_index in header_index[field_schema['name']]:
self.column_validators['columns'][column_index] = column_info
# Store the unfound field names in column_validators.unfoundfields
else:
self.column_validators['unfoundfields'][field_schema['name']] = column_info
def check_header(self):
for validator_name, validator in header_validators.HEADER_OPTIONS.items():
if validator_name in self.schema:
yield from validator(self.header, self.schema, self.column_validators)
yield from header_validators.field_required(self.header, self.schema, self.column_validators)
def check_rows(self, csvreader, callback=lambda *args: None):
for line_num, row in enumerate(csvreader):
for index, column_info in self.column_validators['columns'].items():
c = {'value': row[index], 'row': line_num + 1, 'column': self.header[index]}
# Update c.value to None if value is in missingValues
yield from header_validators.missingvalues(c, self.schema, self.column_validators)
for validator in column_info['validators']:
# Type validator convert cell value into target type, other validators don't accept None value
# if validator is row_validators.field_type or c['value'] is not None:
yield from validator(c, self.schema, column_info['field_schema'])
callback(line_num, row)
class CSV2JSON(Validator):
def __init__(self, csvfile: str, schema: Dict, output: Optional[str], **kwargs):
super(CSV2JSON, self).__init__(csvfile, schema, output, **kwargs)
|
[
"bot@guangyangli.com"
] |
bot@guangyangli.com
|
3ee4399592389c469e7d560ccbd2ad7609a0a702
|
29db67145af7749df38a80b016253e8a8bf78941
|
/BooleanWeatherRecs.py
|
fdad5a1dcd0c115244bdf437643cc33eea6ddf97
|
[] |
no_license
|
holtsho1/CS1301xI
|
b42e78d80904641afb98b75c314ecc38cc075cab
|
e87a11ceb4e77cb10745ab072e723c50ad8bfd0f
|
refs/heads/main
| 2023-07-30T20:39:44.985990
| 2021-09-22T17:57:40
| 2021-09-22T17:57:40
| 406,101,627
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,515
|
py
|
hot = True
cold = False
rainy = True
windy = False
snowy = False
#You may modify the lines of code above, but don't move them!
#When you Submit your code, we'll change these lines to
#assign different values to the variables.
#Imagine you're writing a clothing-recommendation app that
#makes suggestions based on the weather. It has booleans
#representing five different kinds of weather: hot, cold,
#rainy, windy, snowy.
#
#The app recommends four kinds of clothing:
#
# - a jacket, if it's either cold or windy.
# - boots, if it's cold and snowy.
# - flip flops, if it's hot, unless it's rainy.
# - a t-shirt, if it's hot, unless it's rainy or windy.
#
#Write some code below that will print four lines, one for
#each of the four types of clothing. Under the original
#values for the variables above, the lines should look
#like this:
#
#Jacket: False
#Boots: False
#Flip-Flops: False
#T-shirt: False
#
#The values (True and False) will differ based on the
#values assigned to hot, cold, windy, snowy, and rainy
#at the start of the program.
#
#Hint: To print these lines, you'll need to add the
#result of the expression to a string of the clothing item.
#To do that, we'll need to convert the boolean from the
#expression into a string.
#Add your code here!
print("Jacket: " + (str(cold or (windy))))
print("Boots: " + (str(cold and (snowy))))
print("Flip-Flops: " + (str(hot and ((not rainy)))))
print("T-shirt: " + (str(hot and ((not rainy) or (not windy)))))
|
[
"noreply@github.com"
] |
holtsho1.noreply@github.com
|
99fcf0218276618b74ea510ce25cbc262f3247aa
|
545b6fbd3d6e3ad62e0eaaa294bffba3a0335f23
|
/securityheaders/checkers/xframeoptions/checker.py
|
8be95b97597714bfa42bc5fa7d9964706a7ce82d
|
[
"Apache-2.0"
] |
permissive
|
xl-sec/securityheaders
|
f345071f2cabe0b57ab9bc5cfb40e8e2ac7f5196
|
e2f40f888b8f476dd98f939e1b624e4b7621f4a0
|
refs/heads/master
| 2023-08-09T08:09:01.650477
| 2022-06-02T22:53:10
| 2022-06-02T22:53:10
| 151,466,324
| 0
| 0
|
Apache-2.0
| 2018-10-03T19:11:10
| 2018-10-03T19:11:10
| null |
UTF-8
|
Python
| false
| false
| 285
|
py
|
from securityheaders.models.xframeoptions import XFrameOptions
from securityheaders.checkers import Checker
class XFrameOptionsChecker(Checker):
def __init__(self):
pass
def getxframeoptions(self, headers):
return self.extractheader(headers, XFrameOptions)
|
[
"koen@buyens.org"
] |
koen@buyens.org
|
b5edce9801df1c032936c25cd34f33b8c4ed1dc2
|
343dc9d92b4f7be849209965c12c2920240afd9e
|
/migrations/versions/38aa8ac3902e_.py
|
f40cc88ae3bbb6ca9adfe2d31b00158ab8c63dcd
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
CodeForFoco/volunteercore
|
f64d92072c64c348553a662226fd8e9fbe7decd5
|
d6bea86e0478982a557b4cd8cc0e7c87aefe9e9a
|
refs/heads/master
| 2021-06-26T23:10:03.620131
| 2020-01-11T16:16:26
| 2020-01-11T16:16:26
| 150,109,394
| 2
| 4
|
MIT
| 2020-09-18T02:22:09
| 2018-09-24T13:46:43
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 894
|
py
|
"""empty message
Revision ID: 38aa8ac3902e
Revises: 911f36815f83
Create Date: 2019-05-05 10:34:40.692805
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '38aa8ac3902e'
down_revision = '911f36815f83'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table("opportunity") as batch_op:
batch_op.drop_column('job_number')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table("opportunity") as batch_op:
batch_op.add_column(sa.Column('job_number', sa.VARCHAR(length=50), autoincrement=False, nullable=True))
batch_op.create_unique_constraint('opportunity_job_number_key', ['job_number'])
# ### end Alembic commands ###
|
[
"steven@anothernewthing.com"
] |
steven@anothernewthing.com
|
d517bf7a86a6bb8f4e1fea27fe749fd4ad2c761f
|
e10a6d844a286db26ef56469e31dc8488a8c6f0e
|
/darc/darc_agent.py
|
2b939e99ed8352d2240d70709327f8fdd8866a42
|
[
"Apache-2.0",
"CC-BY-4.0"
] |
permissive
|
Jimmy-INL/google-research
|
54ad5551f97977f01297abddbfc8a99a7900b791
|
5573d9c5822f4e866b6692769963ae819cb3f10d
|
refs/heads/master
| 2023-04-07T19:43:54.483068
| 2023-03-24T16:27:28
| 2023-03-24T16:32:17
| 282,682,170
| 1
| 0
|
Apache-2.0
| 2020-07-26T15:50:32
| 2020-07-26T15:50:31
| null |
UTF-8
|
Python
| false
| false
| 8,358
|
py
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for implementing the DARC agent."""
import collections
import gin
import tensorflow as tf
from tf_agents.agents.sac import sac_agent
from tf_agents.trajectories import trajectory
from tf_agents.utils import common
DarcLossInfo = collections.namedtuple(
"DarcLossInfo",
(
"critic_loss",
"actor_loss",
"alpha_loss",
"sa_classifier_loss",
"sas_classifier_loss",
),
)
@gin.configurable
class DarcAgent(sac_agent.SacAgent):
"""An agent that implements the DARC algorithm."""
def __init__(self, *args,
classifier=None,
classifier_optimizer=None,
classifier_loss_weight=1.0,
use_importance_weights=False,
unnormalized_delta_r=False,
**kwargs):
self._classifier = classifier
self._classifier_optimizer = classifier_optimizer
self._classifier_loss_weight = classifier_loss_weight
self._use_importance_weights = use_importance_weights
self._unnormalized_delta_r = unnormalized_delta_r
super(DarcAgent, self).__init__(*args, **kwargs)
def _train(self, experience, weights, real_experience=None):
assert real_experience is not None
if self._use_importance_weights:
assert weights is None
sas_input = tf.concat(
[
experience.observation[:, 0],
experience.action[:, 0],
experience.observation[:, 1],
],
axis=-1,
)
# Set training=False so no input noise is added.
sa_probs, sas_probs = self._classifier(sas_input, training=False)
weights = (
sas_probs[:, 1] * sa_probs[:, 0] / (sas_probs[:, 0] * sa_probs[:, 1]))
loss_info = super(DarcAgent, self)._train(experience, weights)
trainable_classifier_variables = self._classifier.trainable_variables
with tf.GradientTape(watch_accessed_variables=False) as tape:
assert (trainable_classifier_variables
), "No trainable classifier variables to optimize."
tape.watch(trainable_classifier_variables)
(
classifier_loss,
sa_classifier_loss,
sas_classifier_loss,
) = self.classifier_loss(experience, real_experience)
classifier_loss = self._classifier_loss_weight * classifier_loss
tf.debugging.check_numerics(classifier_loss,
"classifier loss is inf or nan.")
tf.debugging.check_numerics(sa_classifier_loss,
"sa classifier loss is inf or nan.")
tf.debugging.check_numerics(sas_classifier_loss,
"sas classifier loss is inf or nan.")
critic_grads = tape.gradient(classifier_loss,
trainable_classifier_variables)
self._apply_gradients(critic_grads, trainable_classifier_variables,
self._classifier_optimizer)
darc_loss_info = DarcLossInfo(
critic_loss=loss_info.extra.critic_loss,
actor_loss=loss_info.extra.actor_loss,
alpha_loss=loss_info.extra.alpha_loss,
sa_classifier_loss=sa_classifier_loss,
sas_classifier_loss=sas_classifier_loss,
)
loss_info = loss_info._replace(extra=darc_loss_info)
return loss_info
def _experience_to_sas(self, experience):
squeeze_time_dim = not self._critic_network_1.state_spec
(
time_steps,
policy_steps,
next_time_steps,
) = trajectory.experience_to_transitions(experience, squeeze_time_dim)
actions = policy_steps.action
return tf.concat(
[time_steps.observation, actions, next_time_steps.observation], axis=-1)
def classifier_loss(self, experience, real_experience):
with tf.name_scope("classifier_loss"):
sim_sas_input = self._experience_to_sas(experience)
real_sas_input = self._experience_to_sas(real_experience)
sas_input = tf.concat([sim_sas_input, real_sas_input], axis=0)
batch_size = tf.shape(real_sas_input)[0]
y_true = tf.concat(
[
tf.zeros(batch_size, dtype=tf.int32),
tf.ones(batch_size, dtype=tf.int32),
],
axis=0,
)
tf.debugging.assert_equal(
tf.reduce_mean(tf.cast(y_true, tf.float32)),
0.5,
"Classifier labels should be 50% ones.",
)
# Must enable training=True to use input noise.
sa_probs, sas_probs = self._classifier(sas_input, training=True)
sa_classifier_loss = tf.keras.losses.sparse_categorical_crossentropy(
y_true, sa_probs)
sas_classifier_loss = tf.keras.losses.sparse_categorical_crossentropy(
y_true, sas_probs)
classifier_loss = sa_classifier_loss + sas_classifier_loss
sa_correct = tf.argmax(sa_probs, axis=1, output_type=tf.int32) == y_true
sa_accuracy = tf.reduce_mean(tf.cast(sa_correct, tf.float32))
sas_correct = tf.argmax(sas_probs, axis=1, output_type=tf.int32) == y_true
sas_accuracy = tf.reduce_mean(tf.cast(sas_correct, tf.float32))
tf.compat.v2.summary.scalar(
name="classifier_loss",
data=tf.reduce_mean(classifier_loss),
step=self.train_step_counter,
)
tf.compat.v2.summary.scalar(
name="sa_classifier_loss",
data=tf.reduce_mean(sa_classifier_loss),
step=self.train_step_counter,
)
tf.compat.v2.summary.scalar(
name="sas_classifier_loss",
data=tf.reduce_mean(sas_classifier_loss),
step=self.train_step_counter,
)
tf.compat.v2.summary.scalar(
name="sa_classifier_accuracy",
data=sa_accuracy,
step=self.train_step_counter,
)
tf.compat.v2.summary.scalar(
name="sas_classifier_accuracy",
data=sas_accuracy,
step=self.train_step_counter,
)
return classifier_loss, sa_classifier_loss, sas_classifier_loss
@gin.configurable
def critic_loss(
self,
time_steps,
actions,
next_time_steps,
td_errors_loss_fn,
gamma=1.0,
reward_scale_factor=1.0,
weights=None,
training=False,
delta_r_scale=1.0,
delta_r_warmup=0,
):
sas_input = tf.concat(
[time_steps.observation, actions, next_time_steps.observation], axis=-1)
# Set training=False so no input noise is added.
sa_probs, sas_probs = self._classifier(sas_input, training=False)
sas_log_probs = tf.math.log(sas_probs)
sa_log_probs = tf.math.log(sa_probs)
if self._unnormalized_delta_r: # Option for ablation experiment.
delta_r = sas_log_probs[:, 1] - sas_log_probs[:, 0]
else: # Default option (i.e., the correct version).
delta_r = (
sas_log_probs[:, 1] - sas_log_probs[:, 0] - sa_log_probs[:, 1] +
sa_log_probs[:, 0])
common.generate_tensor_summaries("delta_r", delta_r,
self.train_step_counter)
is_warmup = tf.cast(self.train_step_counter < delta_r_warmup, tf.float32)
tf.compat.v2.summary.scalar(
name="is_warmup", data=is_warmup, step=self.train_step_counter)
next_time_steps = next_time_steps._replace(reward=next_time_steps.reward +
delta_r_scale *
(1 - is_warmup) * delta_r)
return super(DarcAgent, self).critic_loss(
time_steps,
actions,
next_time_steps,
td_errors_loss_fn,
gamma=gamma,
reward_scale_factor=reward_scale_factor,
weights=weights,
training=training,
)
def _check_train_argspec(self, kwargs):
"""Overwrite to avoid checking that real_experience has right dtype."""
del kwargs
return
|
[
"copybara-worker@google.com"
] |
copybara-worker@google.com
|
a2c93f0c69b633cb865672b1f91cd082bed3ff02
|
d22b5c06488ba2930238d2676e1d84ddcf32f60f
|
/project.py
|
9aa923cb5f6bb097fa72f5db28b8e7ef7eacab77
|
[] |
no_license
|
ninad-41/p-127
|
10974cf2adceb5c46a4582a52a9e7f4d42c88581
|
be51f199cd32e472c3395d2769a94a0feaf0d5a4
|
refs/heads/main
| 2023-07-01T02:01:08.288094
| 2021-08-10T09:28:41
| 2021-08-10T09:28:41
| 394,596,136
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 935
|
py
|
from selenium import webdriver
from bs4 import BeautifulSoup
import time
import csv
import requests
import pandas as pd
start_url="https://en.wikipedia.org/wiki/List_of_brightest_stars_and_other_record_stars"
page=requests.get(start_url)
print(page)
soup=BeautifulSoup(page.text,"html.parser")
star_table=soup.find("table")
temp_list=[]
table_rows=star_table.find_all("tr")
for tr in table_rows:
td=tr.find_all("td")
row=[i.text.rstrip()for i in td]
temp_list.append(row)
names=[]
distance=[]
mass=[]
radius=[]
lum=[]
for i in range(1,len(temp_list)):
names.append(temp_list[i][1])
distance.append(temp_list[i][3])
mass.append(temp_list[i][5])
radius.append(temp_list[i][6])
lum.append(temp_list[i][7])
df2=pd.DataFrame(list(zip(names,distance,mass,radius,lum)),columns=["star_name","Distance","Mass","Radius","Luminisity"])
print(df2)
df2.to_csv("brightstars.csv")
|
[
"noreply@github.com"
] |
ninad-41.noreply@github.com
|
1a4079aced3f8fe3308367ca5f2560c677775692
|
290dbf0107d93ebc8d50790b267f9552c13d810f
|
/generator/char_manager.py
|
6e451e0b1574291507ce9073b03287ba2c4477ca
|
[
"MIT"
] |
permissive
|
codebox/homoglyph
|
a01cb0de3570b6ad8a0bdb8ff7a50e10374a4a51
|
0209d35fe8ad79348b520401da8affe8df188909
|
refs/heads/master
| 2022-09-30T13:17:52.559898
| 2022-09-07T09:45:54
| 2022-09-07T09:45:54
| 45,297,276
| 472
| 63
|
MIT
| 2022-09-07T10:20:25
| 2015-10-31T10:14:22
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,038
|
py
|
class CharacterManager:
def __init__(self):
self.chars_to_sets = {}
def add_pair(self, a, b):
a_set = self.get_set_for_char(a)
b_set = self.get_set_for_char(b)
if a_set and b_set:
if a_set is b_set:
pass # do nothing, this pair of chars are already associated
else:
a_set.update(b_set)
for b_member in b_set:
self.chars_to_sets[b_member] = a_set
elif a_set:
a_set.add(b)
self.chars_to_sets[b] = a_set
elif b_set:
b_set.add(a)
self.chars_to_sets[a] = b_set
else:
self.chars_to_sets[a] = self.chars_to_sets[b] = set([a,b])
def get_set_for_char(self, c):
return self.chars_to_sets[c] if c in self.chars_to_sets else None
def get_list_of_sets(self):
l = []
for s in map(sorted, self.chars_to_sets.values()):
if s not in l:
l.append(s)
return sorted(l)
|
[
"rob@codebox.org.uk"
] |
rob@codebox.org.uk
|
3faa37a4fbec6153f7d042d062f13089108f84d8
|
0cfe2999149e1e9baf527b0b47e2a4cd6b862ade
|
/lcf/migrations/0093_auto_20170506_1651.py
|
2584477384fad2d4c0a43db376b9a583d41a744a
|
[] |
no_license
|
gamzatti/lcf
|
c7234ab96aa349013c3d90bd482c83087071e219
|
ebff02b1e6c95b653f027f0e05125b754c6af5af
|
refs/heads/master
| 2021-01-22T18:07:52.286504
| 2017-07-20T09:41:17
| 2017-07-20T09:41:17
| 85,061,092
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,084
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-05-06 15:51
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lcf', '0092_auto_20170506_1407'),
]
operations = [
migrations.AddField(
model_name='scenario',
name='csv_inc_notes',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='pot',
name='name',
field=models.CharField(choices=[('FIT', 'Feed-in-tariff'), ('E', 'Emerging'), ('M', 'Mature'), ('SN', 'Separate negotiations')], default='E', max_length=3),
),
migrations.AlterField(
model_name='technology',
name='name',
field=models.CharField(choices=[('NW', 'Negawatts'), ('PVLS', 'Solar PV'), ('TS', 'Tidal stream'), ('ONW', 'Onshore wind'), ('NU', 'Nuclear'), ('TL', 'Tidal lagoon'), ('WA', 'Wave'), ('OFW', 'Offshore wind')], default='OFW', max_length=4),
),
]
|
[
"emilycoats@riseup.net"
] |
emilycoats@riseup.net
|
6ccf52a98e441ab08c92a13eed7214827dd041dd
|
1a487105a8a4d243da17dad0b08b695ed34df01a
|
/config.py
|
2408ce4fecdb706acfc0a2f4b9d82d537aafd5be
|
[] |
no_license
|
AlgoMathITMO/cashback-calculator
|
2a397d061bbd0b6ed8707da3a747e6871e88d4bf
|
caefc85e4c6bc5c220205423b87039e52341aef6
|
refs/heads/master
| 2023-06-09T15:10:35.304776
| 2021-06-28T10:11:05
| 2021-06-28T10:11:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 188
|
py
|
from pathlib import Path
ROOT_DIR = Path(__file__).absolute().parent
DATA_DIR = ROOT_DIR / 'data'
assert DATA_DIR.exists()
TESTS_DIR = DATA_DIR / 'tests'
TESTS_DIR.mkdir(exist_ok=True)
|
[
"jurasicus@gmail.com"
] |
jurasicus@gmail.com
|
ef32aacc9511c647540dc788179a9be704e20e44
|
b8bafaece6c194a34b30189a3d141aa02314907a
|
/DynamoQueryLambda.py
|
01b1ceb41906f352b3697575d253430921c83405
|
[] |
no_license
|
jacobnpeterson/s3ImageSearch
|
75b2482aa5d92c063af18df53282441960763d3e
|
7ed95bbcde8790db2d1e92cbd55a3ff1944aae36
|
refs/heads/master
| 2020-04-12T04:59:22.165017
| 2018-12-18T16:12:16
| 2018-12-18T16:12:16
| 162,312,738
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 409
|
py
|
import json
import boto3
def lambda_handler(event, context):
# # TODO implement
dynamo = boto3.resource('dynamodb')
table = dynamo.Table('group-project')
response = table.query(
KeyConditionExpression = "tag = :t",
ExpressionAttributeValues = {
":t": event['tag']
}
)
return {
'statusCode': 200,
'body': response['Items']
}
|
[
"jacobnpeterson@gmail.com"
] |
jacobnpeterson@gmail.com
|
4acec193263138ac1651bf0f9f7c57922ac476d4
|
d9b3289354d8f75ae8dd9988a89b08596bd4cae9
|
/forms.py
|
592959eee5e797b4187808e7b99722b3236eb761
|
[] |
no_license
|
DataCraft-AI/pgdevops
|
8827ab8fb2f60d97a22c03317903b71a12a49611
|
f489bfb22b5b17255f85517cb1443846133dc378
|
refs/heads/master
| 2023-02-10T05:44:00.117387
| 2020-01-22T13:40:58
| 2020-01-22T13:40:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,436
|
py
|
from wtforms import StringField, ValidationError, HiddenField
from flask_security.forms import RegisterForm, EqualTo, password_required
from wtforms.validators import InputRequired, Email, Length
from Components import Components as pgc
import os, sys
import subprocess
import json
PGC_HOME = os.getenv("PGC_HOME", "")
PGC_LOGS = os.getenv("PGC_LOGS", "")
pgc_scripts_path = os.path.join(PGC_HOME, 'hub', 'scripts')
if pgc_scripts_path not in sys.path:
sys.path.append(pgc_scripts_path)
def check_ami(ami_id="pgdevops"):
cmd = os.path.join(PGC_HOME, "pgc")
pgcCmd = "{0} {1} {2} {3}".format(cmd, "verify-ami", ami_id, "--json")
rc = 0
msg = ""
try:
process = subprocess.check_output(pgcCmd,
shell=True)
except Exception as e:
rc = e.returncode
if rc > 0:
final_out = json.loads(e.output.strip().decode('ascii'))[0]
msg = str(final_out['msg'])
return {"rc": rc, "msg": msg}
class RegisterForm(RegisterForm):
checkAMI = check_ami()
if checkAMI.get('rc') != 2:
ami_form = True
ami_id = StringField('AMI Instance ID', validators=[Length(max=50)])
def validate_ami_id(form, field):
validationData = check_ami(str(field.data))
if validationData['rc'] != 0:
raise ValidationError(validationData['msg'])
else:
pass
|
[
"denis@lussier.io"
] |
denis@lussier.io
|
5081e077eb0dae2d1b8968d7690c6ecb973aaa5d
|
08cd91baf0179885c43e1c1e24ec6f93aad25914
|
/Part 2 - Regression/Section 5 - Multiple Linear Regression/MultipleLinearRegression.py
|
a42b5a3511da3407bfe8344578e69e43838007aa
|
[] |
no_license
|
iamsid2/Machine-Learning-using-Python
|
f34fe253af04280d28b93daa378d55c37e65673d
|
f60d0b972f0ba4e991e81c3fb20aff679910befb
|
refs/heads/master
| 2020-03-19T02:54:07.703055
| 2018-07-28T12:12:59
| 2018-07-28T12:12:59
| 135,672,983
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,858
|
py
|
import numpy as np
#import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('50_Startups.csv')
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, 4].values
# Encoding the Independent Variable
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
labelencoder_X = LabelEncoder()
X[:, 3] = labelencoder_X.fit_transform(X[:, 3])
onehotencoder = OneHotEncoder(categorical_features = [3])
X = onehotencoder.fit_transform(X).toarray()
#Removing Dummy Variable Trap
X=X[:,1:]
# Splitting the dataset into the Training set and Test set
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
# Feature Scaling
"""from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
sc_y = StandardScaler()
y_train = sc_y.fit_transform(y_train)"""
#FItting linear regresion model in the Training Set
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(X_train,y_train)
#Predicting the Test Set Results
y_pred = regressor.predict(X_test)
#Buildig the optimal model using Backward Elemination
import statsmodels.formula.api as sm
X=np.append(arr=np.ones((50,1)).astype(int), values = X, axis = 1)
X_opt = X[:,[0,1,2,3,4,5]]
regressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()
regressor_OLS.summary()
X_opt = X[:,[0,1,3,4,5]]
regressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()
regressor_OLS.summary()
X_opt = X[:,[0,3,4,5]]
regressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()
regressor_OLS.summary()
X_opt = X[:,[0,3,5]]
regressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()
regressor_OLS.summary()
X_opt = X[:,[0,3]]
regressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()
regressor_OLS.summary()
|
[
"shaktimund97@gmail.com"
] |
shaktimund97@gmail.com
|
b47801f335526193b4673fd6f1d466b8847a845f
|
3ecfd36938e3202ea64087f01c8afeaa1baf7e29
|
/am_conf.py
|
48cf0f3b05ce160d1d458560f84e972062b9e038
|
[] |
no_license
|
zycsmile/IRP
|
32b6bd1d8019d8f47c4f487ea51f65c6d74f62ad
|
592cf8f006edf80e5ad9d0adc7dd18f823ca9523
|
refs/heads/master
| 2020-07-01T08:14:39.114904
| 2016-11-18T02:57:15
| 2016-11-18T02:57:15
| 74,089,403
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,660
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""method that read global conf
"""
__author__ = 'guoshaodan01@baidu.com (Guo Shaodan)'
import ConfigParser
import logging
g_conf = None
class AmConf(object):
def __init__(self, parser):
self.zk_server = parser.get("zookeeper", "zk_server")
logging.info("got conf zookeeper.zk_server: [%s]", self.zk_server)
self.zk_root = parser.get("zookeeper", "zk_root")
logging.info("got conf zookeeper.zk_root: [%s]", self.zk_root)
self.zk_user = parser.get("zookeeper", "zk_username")
logging.info("got conf zookeeper.zk_user: [%s]", self.zk_user)
self.zk_pass = parser.get("zookeeper", "zk_password")
logging.info("got conf zookeeper.zk_pass: [%s]", self.zk_pass)
self.bh_loc = parser.get("bhcli", "bh_location")
logging.info("got conf bhcli.bh_location: [%s]", self.bh_loc)
self.bh_cluster = parser.get("bhcli", "bh_cluster")
logging.info("got conf bhcli.bh_cluster: [%s]", self.bh_cluster)
self.matrix_cluster = parser.get("bhcli", "matrix_cluster")
logging.info("got conf matrix_cluster: [%s]", self.matrix_cluster)
self.bh_debug = parser.getint("bhcli", "debug")
logging.info("got conf bhcli.debug: [%d]", self.bh_debug)
self.task_timeout = {}
for k, v in parser.items("task_timeout_sec"):
self.task_timeout[k] = int(v)
logging.info("got conf task_timeout_sec.%s: [%d]", k, self.task_timeout[k])
#default is necessary
self.task_timeout["default"] = parser.getint("task_timeout_sec", "default")
logging.info("got conf task_timeout_sec.default: [%d]", self.task_timeout['default'])
self.check_task_interval = parser.getint("task_manager", "check_task_interval")
logging.info("got conf task_manager.check_task_interval: [%d]", self.check_task_interval)
def LoadConf(conf_file):
global g_conf
ret = 0
conf_parser = ConfigParser.ConfigParser()
try:
ret = conf_parser.read(conf_file)
except Exception as e:
logging.error("read config file failed: config_file=[%s]", conf_file)
return 1
if len(ret) == 0:
logging.error("read config file failed: config_file=[%s]", conf_file)
return 1
logging.info("read conf file successfully, config_file=[%s]", conf_file)
try:
g_conf = AmConf(conf_parser)
except Exception, e:
logging.error("read config file failed: config_file=[%s], error=[%s]", conf_file, e)
return 1
logging.info("read conf key-value successfully, config_file=[%s]", conf_file)
return 0
|
[
"zycsmile@163.com"
] |
zycsmile@163.com
|
f5e13893b6f731b53d834d8641a0aba4d7dad8b9
|
81fddd04069c358f90b4e1788f5aec9151112147
|
/pybullet_tools/pr2_problems.py
|
978a2c2c2d48644d96ba2ad93c785a940dbb5fd8
|
[] |
no_license
|
yijiangh/ss-pybullet
|
f61a83b139155d195d105fd6d00c3e10c2b963be
|
8eb8fd468b4364f85b037ab5b24738b255a2418c
|
refs/heads/master
| 2020-04-02T22:07:03.264780
| 2019-02-08T16:38:34
| 2019-02-08T16:38:34
| 154,822,036
| 1
| 0
| null | 2018-10-26T11:09:06
| 2018-10-26T11:09:06
| null |
UTF-8
|
Python
| false
| false
| 7,565
|
py
|
import numpy as np
from .pr2_utils import set_arm_conf, REST_LEFT_ARM, open_arm, \
close_arm, get_carry_conf, arm_conf, get_other_arm, set_group_conf
from .utils import create_box, set_base_values, set_point, set_pose, get_pose, \
get_bodies, z_rotation, load_model, load_pybullet, HideOutput
class Problem(object):
def __init__(self, robot, arms=tuple(), movable=tuple(), grasp_types=tuple(),
surfaces=tuple(), sinks=tuple(), stoves=tuple(), buttons=tuple(),
goal_conf=None, goal_holding=tuple(), goal_on=tuple(),
goal_cleaned=tuple(), goal_cooked=tuple(), body_names={}):
self.robot = robot
self.arms = arms
self.movable = movable
self.grasp_types = grasp_types
self.surfaces = surfaces
self.sinks = sinks
self.stoves = stoves
self.buttons = buttons
self.goal_conf = goal_conf
self.goal_holding = goal_holding
self.goal_on = goal_on
self.goal_cleaned = goal_cleaned
self.goal_cooked = goal_cooked
self.body_names = body_names
def __repr__(self):
return repr(self.__dict__)
def get_fixed_bodies(problem): # TODO: move to problem?
#return []
movable = [problem.robot] + list(problem.movable)
return list(filter(lambda b: b not in movable, get_bodies()))
def create_pr2(use_drake=True, fixed_base=True):
if use_drake:
pr2_path = "models/drake/pr2_description/urdf/pr2_simplified.urdf"
else:
pr2_path = "models/pr2_description/pr2.urdf"
with HideOutput():
pr2 = load_model(pr2_path, fixed_base=fixed_base)
set_group_conf(pr2, 'torso', [0.2])
return pr2
def create_floor():
return load_pybullet("plane.urdf")
def create_table():
# TODO: table URDF
raise NotImplementedError()
def create_door():
return load_pybullet("data/door.urdf")
# https://github.com/bulletphysics/bullet3/search?l=XML&q=.urdf&type=&utf8=%E2%9C%93
TABLE_MAX_Z = 0.6265 # TODO: the table legs don't seem to be included for collisions?
def holding_problem(arm='left', grasp_type='side'):
other_arm = get_other_arm(arm)
initial_conf = get_carry_conf(arm, grasp_type)
pr2 = create_pr2()
set_base_values(pr2, (0, -2, 0))
set_arm_conf(pr2, arm, initial_conf)
open_arm(pr2, arm)
set_arm_conf(pr2, other_arm, arm_conf(other_arm, REST_LEFT_ARM))
close_arm(pr2, other_arm)
plane = create_floor()
table = load_pybullet("table/table.urdf")
#table = load_pybullet("table_square/table_square.urdf")
box = create_box(.07, .05, .15)
set_point(box, (0, 0, TABLE_MAX_Z + .15/2))
return Problem(robot=pr2, movable=[box], arms=[arm], grasp_types=[grasp_type], surfaces=[table],
goal_conf=get_pose(pr2), goal_holding=[(arm, box)])
def stacking_problem(arm='left', grasp_type='top'):
other_arm = get_other_arm(arm)
initial_conf = get_carry_conf(arm, grasp_type)
pr2 = create_pr2()
set_base_values(pr2, (0, -2, 0))
set_arm_conf(pr2, arm, initial_conf)
open_arm(pr2, arm)
set_arm_conf(pr2, other_arm, arm_conf(other_arm, REST_LEFT_ARM))
close_arm(pr2, other_arm)
plane = create_floor()
table1 = load_pybullet("table/table.urdf")
#table = load_pybullet("table_square/table_square.urdf")
block = create_box(.07, .05, .15)
set_point(block, (0, 0, TABLE_MAX_Z + .15/2))
table2 = load_pybullet("table/table.urdf")
set_base_values(table2, (2, 0, 0))
return Problem(robot=pr2, movable=[block], arms=[arm],
grasp_types=[grasp_type], surfaces=[table1, table2],
#goal_on=[(block, table1)])
goal_on=[(block, table2)])
def create_kitchen(w=.5, h=.7):
floor = create_floor()
table = create_box(w, w, h, color=(.75, .75, .75, 1))
set_point(table, (2, 0, h/2))
mass = 1
#mass = 0.01
#mass = 1e-6
cabbage = create_box(.07, .07, .1, mass=mass, color=(0, 1, 0, 1))
#cabbage = load_model(BLOCK_URDF, fixed_base=False)
set_point(cabbage, (2, 0, h + .1/2))
sink = create_box(w, w, h, color=(.25, .25, .75, 1))
set_point(sink, (0, 2, h/2))
stove = create_box(w, w, h, color=(.75, .25, .25, 1))
set_point(stove, (0, -2, h/2))
return table, cabbage, sink, stove
def cleaning_problem(arm='left', grasp_type='top'):
other_arm = get_other_arm(arm)
initial_conf = get_carry_conf(arm, grasp_type)
pr2 = create_pr2()
set_arm_conf(pr2, arm, initial_conf)
open_arm(pr2, arm)
set_arm_conf(pr2, other_arm, arm_conf(other_arm, REST_LEFT_ARM))
close_arm(pr2, other_arm)
table, cabbage, sink, stove = create_kitchen()
#door = create_door()
#set_point(door, (2, 0, 0))
return Problem(robot=pr2, movable=[cabbage], arms=[arm], grasp_types=[grasp_type],
surfaces=[table, sink, stove], sinks=[sink], stoves=[stove],
goal_cleaned=[cabbage])
def cooking_problem(arm='left', grasp_type='top'):
other_arm = get_other_arm(arm)
initial_conf = get_carry_conf(arm, grasp_type)
pr2 = create_pr2()
set_arm_conf(pr2, arm, initial_conf)
open_arm(pr2, arm)
set_arm_conf(pr2, other_arm, arm_conf(other_arm, REST_LEFT_ARM))
close_arm(pr2, other_arm)
table, cabbage, sink, stove = create_kitchen()
return Problem(robot=pr2, movable=[cabbage], arms=[arm], grasp_types=[grasp_type],
surfaces=[table, sink, stove], sinks=[sink], stoves=[stove],
goal_cooked=[cabbage])
def cleaning_button_problem(arm='left', grasp_type='top'):
other_arm = get_other_arm(arm)
initial_conf = get_carry_conf(arm, grasp_type)
pr2 = create_pr2()
set_arm_conf(pr2, arm, initial_conf)
open_arm(pr2, arm)
set_arm_conf(pr2, other_arm, arm_conf(other_arm, REST_LEFT_ARM))
close_arm(pr2, other_arm)
table, cabbage, sink, stove = create_kitchen()
d = 0.1
sink_button = create_box(d, d, d, color=(0, 0, 0, 1))
set_pose(sink_button, ((0, 2-(.5+d)/2, .7-d/2), z_rotation(np.pi/2)))
stove_button = create_box(d, d, d, color=(0, 0, 0, 1))
set_pose(stove_button, ((0, -2+(.5+d)/2, .7-d/2), z_rotation(-np.pi/2)))
return Problem(robot=pr2, movable=[cabbage], arms=[arm], grasp_types=[grasp_type],
surfaces=[table, sink, stove], sinks=[sink], stoves=[stove],
buttons=[(sink_button, sink), (stove_button, stove)],
goal_conf=get_pose(pr2), goal_holding=[(arm, cabbage)], goal_cleaned=[cabbage])
def cooking_button_problem(arm='left', grasp_type='top'):
other_arm = get_other_arm(arm)
initial_conf = get_carry_conf(arm, grasp_type)
pr2 = create_pr2()
set_arm_conf(pr2, arm, initial_conf)
open_arm(pr2, arm)
set_arm_conf(pr2, other_arm, arm_conf(other_arm, REST_LEFT_ARM))
close_arm(pr2, other_arm)
table, cabbage, sink, stove = create_kitchen()
d = 0.1
sink_button = create_box(d, d, d, color=(0, 0, 0, 1))
set_pose(sink_button, ((0, 2-(.5+d)/2, .7-d/2), z_rotation(np.pi/2)))
stove_button = create_box(d, d, d, color=(0, 0, 0, 1))
set_pose(stove_button, ((0, -2+(.5+d)/2, .7-d/2), z_rotation(-np.pi/2)))
return Problem(robot=pr2, movable=[cabbage], arms=[arm], grasp_types=[grasp_type],
surfaces=[table, sink, stove], sinks=[sink], stoves=[stove],
buttons=[(sink_button, sink), (stove_button, stove)],
goal_conf=get_pose(pr2), goal_holding=[(arm, cabbage)], goal_cooked=[cabbage])
|
[
"caelan@mit.edu"
] |
caelan@mit.edu
|
faba4ffd24bf024a2248fe641d65689e162ec028
|
ab189562f36bc1cbd723964795020864f0cee979
|
/twisted-benchmarks/pb.py
|
efc8c65e00f2c97dbc81d69824ca8b93b9c829b9
|
[
"Apache-2.0"
] |
permissive
|
mksh/greenreactor
|
20a007b51ad182e20ceefdd0b607992997cf1eb9
|
99e9ed7a5d7ebb8a8cbd1307009749bb08907c10
|
refs/heads/master
| 2020-09-19T21:51:51.944632
| 2019-08-07T04:21:37
| 2019-08-07T04:21:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,679
|
py
|
"""
Benchmark for Twisted Spread.
"""
from twisted.python.compat import _PY3
if _PY3:
raise ImportError("Doesn't work on Py3 yet")
from twisted.spread.pb import PBServerFactory, PBClientFactory, Root
from benchlib import Client, driver
class BenchRoot(Root):
def remote_discard(self, argument):
pass
class Client(Client):
_structure = [
'hello' * 100,
{'foo': 'bar',
'baz': 100,
u'these are bytes': (1, 2, 3)}]
def __init__(self, reactor, port):
super(Client, self).__init__(reactor)
self._port = port
def run(self, *args, **kwargs):
def connected(reference):
self._reference = reference
return super(Client, self).run(*args, **kwargs)
client = PBClientFactory()
d = client.getRootObject()
d.addCallback(connected)
self._reactor.connectTCP('127.0.0.1', self._port, client)
return d
def cleanup(self):
self._reference.broker.transport.loseConnection()
def _request(self):
d = self._reference.callRemote('discard', self._structure)
d.addCallback(self._continue)
d.addErrback(self._stop)
def main(reactor, duration):
concurrency = 15
server = PBServerFactory(BenchRoot())
port = reactor.listenTCP(0, server)
client = Client(reactor, port.getHost().port)
d = client.run(concurrency, duration)
def cleanup(passthrough):
d = port.stopListening()
d.addCallback(lambda ignored: passthrough)
return d
d.addCallback(cleanup)
return d
if __name__ == '__main__':
import sys
import pb
driver(pb.main, sys.argv)
|
[
"859905874@qq.com"
] |
859905874@qq.com
|
9896d9e937f830fdecc5cc3f58a94b7412122654
|
fa93a9c8db537b819349234b17d0c3ffe4824a28
|
/docs/code-completion/uio.py
|
0a3071f423a62295538b51d799b98aaa23dc0b9e
|
[
"MIT"
] |
permissive
|
yanshanqingyuan/micropython
|
b37fa36b09196f25ee9fef76a8368e598aa285da
|
bb57a344967391d22f3430f720bcd0a5882e4a1a
|
refs/heads/master
| 2020-07-11T15:57:07.922531
| 2019-08-27T02:51:15
| 2019-08-27T02:51:15
| 204,589,958
| 0
| 0
|
MIT
| 2019-08-27T09:04:44
| 2019-08-27T01:04:43
| null |
UTF-8
|
Python
| false
| false
| 1,297
|
py
|
"""
uio 模块包含流类型 (类似文件) 对象和帮助函数。
"""
def open(name, mode='r', **kwargs) -> None:
"""打开一个文件,关联到内建函数open()。所有端口 (用于访问文件系统) 需要支持模式参数,但支持其他参数不同的端口。"""
...
class FileIO(...):
"""这个文件类型用二进制方式打开文件,等于使用open(name, “rb”)。 不应直接使用这个实例。"""
...
class TextIOWrapper(...):
"""这个类型以文本方式打开文件,等同于使用open(name, “rt”)不应直接使用这个实例。"""
...
class StringIO(string):
"""这个类型以文本方式打开文件,等同于使用open(name, “rt”)不应直接使用这个实例。"""
...
class BytesIO(string):
"""
内存文件对象。StringIO 用于文本模式 I/O (用 “t” 打开文件),BytesIO 用于二进制方式 (用 “b” 方式)。
文件对象的初始内容可以用字符串参数指定(stringio用普通字符串,bytesio用bytes对象)。
所有的文件方法,如 read(), write(), seek(), flush(), close() 都可以用在这些对象上。
"""
def __init__(self) -> None:
...
def getvalue(self) -> None:
"""获取缓存区内容。"""
...
|
[
"SummerGift@qq.com"
] |
SummerGift@qq.com
|
fe19fe390ebd3184b79651ce1040786507a3415d
|
4bd4bacecee33cada173e427b5ecb1d758bafaad
|
/tests/acceptance/reset_password_redis/reset_password_redis.py
|
c5a945c74fc5c1a666f3b673ebc6546cc26afcd7
|
[] |
no_license
|
kenorb-contrib/scalarizr
|
3f2492b20910c42f6ab38749545fdbb79969473f
|
3cc8b64d5a1b39c4cf36f5057f1a6a84a9a74c83
|
refs/heads/master
| 2022-11-26T10:00:58.706301
| 2017-11-02T16:41:34
| 2017-11-02T16:41:34
| 108,550,233
| 0
| 2
| null | 2020-07-24T11:05:36
| 2017-10-27T13:33:46
|
Python
|
UTF-8
|
Python
| false
| false
| 712
|
py
|
# -*- coding: utf-8 -*-
from lettuce import step
from lettuce import world
import redis
from scalarizr.api.binding.jsonrpc_http import HttpServiceProxy
@step(u'Given I am connected to Redis server')
def given_i_have_mysql_server(step):
world.conn = HttpServiceProxy('http://localhost:8010',
'/etc/scalr/private.d/keys/default')
@step(u'When I call reset password')
def when_i_call_reset_password(step):
world.conn.redis.reset_password(new_password='test_pwd')
@step(u'Then password should be changed')
def then_password_should_be_changed(step):
conn = redis.StrictRedis(host='localhost', port=6379, password='test_pwd')
assert conn is not None
|
[
"kenorb@users.noreply.github.com"
] |
kenorb@users.noreply.github.com
|
3832bf0b5e08f61420b382a1239102a482dc651f
|
9c2f8ee697d8b764a58aa8c9a85df5ad019a4b77
|
/homelyapp/migrations/0004_renter.py
|
fafadd6bb307ef910c03ae4c5f806ec73a2813ee
|
[] |
no_license
|
asifvs447/code_test
|
9ef133edf2eed3ce409661390552bc66c5a817e2
|
c2a10b6298962f00379d27b51a4ab348e76fbb7d
|
refs/heads/master
| 2021-08-29T01:27:53.779903
| 2017-12-13T09:05:09
| 2017-12-13T09:05:09
| 114,096,269
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 919
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-12-13 02:36
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('homelyapp', '0003_rentoutproperties_house_rented'),
]
operations = [
migrations.CreateModel(
name='Renter',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('house_rented', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='homelyapp.RentoutProperties')),
('tenant', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"asif.vs447@gmail.com"
] |
asif.vs447@gmail.com
|
16af6ef192e459b123626a361db1399c2838121e
|
c8da432f164ef398dece0dcbf5f22b070f8f4c40
|
/migrations/versions/db2b1f6271e5_.py
|
5c54c5b7f0e98fce21398952275807952b976758
|
[] |
no_license
|
OmarYehia/Bank
|
33ae12bd048877472e4174bea7377773da5f78d8
|
5c694489ed87052a0cb409dddf11d457666d96ea
|
refs/heads/master
| 2023-03-10T09:44:47.991345
| 2021-02-20T16:55:40
| 2021-02-20T16:55:40
| 283,049,935
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 987
|
py
|
"""empty message
Revision ID: db2b1f6271e5
Revises: aab43e821e31
Create Date: 2020-07-28 00:35:07.538771
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'db2b1f6271e5'
down_revision = 'aab43e821e31'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('accounts', 'key',
existing_type=sa.VARCHAR(),
nullable=True)
op.alter_column('accounts', 'salt',
existing_type=sa.VARCHAR(),
nullable=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('accounts', 'salt',
existing_type=sa.VARCHAR(),
nullable=False)
op.alter_column('accounts', 'key',
existing_type=sa.VARCHAR(),
nullable=False)
# ### end Alembic commands ###
|
[
"oyehia94@gmail.com"
] |
oyehia94@gmail.com
|
83af5547275e89119ec1508ac22cb24b2d333c5b
|
82bffecc5c829a9ae7f3cf825bd1334954c3d59f
|
/0x00-python-hello_world/2-print.py
|
6381f3b6abf9b1cf24f169876636f77e59f5d975
|
[] |
no_license
|
Bereket-ferde/alx-higher_level_programming
|
38f1af4cbcf64ade603c03a8c0b9844beb6cdb4b
|
ad3b1baccb14aec9d83b928ac5d8fb5e8a81b24c
|
refs/heads/main
| 2023-07-22T08:37:20.306632
| 2021-09-08T18:28:02
| 2021-09-08T18:28:02
| 404,119,606
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 84
|
py
|
#!/usr/bin/python3
print('"Programming is like building a multilingual puzzle\n')
|
[
"noreply@github.com"
] |
Bereket-ferde.noreply@github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.