blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c7765791badddd5eb5ca6d8c9a4bc2d036baee7a
|
261dfac24ea95253ac630ad0695d97fc63414178
|
/setup.py
|
b3428a79457b67b40b6d896e1bfdd17f4676df62
|
[
"MIT"
] |
permissive
|
aliabd/GEM-metrics
|
2cbfdab27d9d10da988f2cdb8e7b04efad40498f
|
4f5b389c447bcbecb5a407df93ec8c8ace1e9ce8
|
refs/heads/main
| 2023-04-10T04:26:41.920549
| 2021-03-30T16:09:06
| 2021-03-30T16:09:06
| 353,037,176
| 0
| 1
|
MIT
| 2021-03-30T14:48:33
| 2021-03-30T14:48:33
| null |
UTF-8
|
Python
| false
| false
| 1,018
|
py
|
#!/usr/bin/env python3
from setuptools import setup
from setuptools import find_packages
install_requires = []
dependency_links = []
for package in [l.strip() for l in open('requirements.txt').readlines()]:
if package.startswith('git+'):
pk_name = package.split('/')[-1][:-4]
install_requires.append(f'{pk_name} @ {package}')
else:
install_requires.append(package)
setup(
name='gem_metrics',
version='0.1dev',
description='GEM Challenge metrics',
author='Ondrej Dusek, Aman Madaan, Emiel van Miltenburg, Sebastian Gehrmann, Nishant Subramani, Dhruv Kumar, Miruna Clinciu',
author_email='odusek@ufal.mff.cuni.cz',
url='https://github.com/GEM-benchmark/GEM-metrics',
download_url='https://github.com/GEM-benchmark/GEM-metrics.git',
license='MIT License',
install_requires=install_requires,
dependency_links=dependency_links,
packages=find_packages(),
entry_points = {
'console_scripts': ['gem_metrics=gem_metrics:main']
}
)
|
[
"odusek@ufal.mff.cuni.cz"
] |
odusek@ufal.mff.cuni.cz
|
e0b0c82e0cb6d63eb83dc518df6312878a49e050
|
8ac362dd20ada13d2882c31c491a586b6cc44ed2
|
/inventoryanalytics/simulation/stochastic/des.py
|
54df9eafd4a3a69df0c3165721dd3afefe880b64
|
[
"MIT"
] |
permissive
|
emilio-garcia-ie/inventoryanalytics
|
54be13ee63399f231b7a9ccd372ddb9f0583bd0f
|
dd2a49386441bd51287d08ee73059cac7748c898
|
refs/heads/master
| 2023-03-17T02:56:46.309779
| 2020-12-27T00:27:37
| 2020-12-27T00:27:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,849
|
py
|
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from queue import PriorityQueue
from collections import defaultdict
def plot_inventory(values, label):
# data
df=pd.DataFrame({'x': np.array(values)[:,0], 'fx': np.array(values)[:,1]})
# plot
plt.xticks(range(len(values)),
range(1,len(values)+1))
plt.xlabel("t")
plt.ylabel("items")
plt.plot( 'x', 'fx', data=df, linestyle='-', marker='', label=label)
#########################
## DES ##
#########################
class EventWrapper():
def __init__(self, event):
self.event = event
def __lt__(self, other):
return self.event.priority < other.event.priority
class DES():
def __init__(self, end):
self.events, self.end, self.time = PriorityQueue() , end, 0
def start(self):
while True:
event = self.events.get()
self.time = event[0]
if self.time <= self.end:
event[1].event.end()
else:
break
def schedule(self, event: EventWrapper, time_lag: int):
self.events.put((self.time + time_lag, event))
##########################
## WAREHOUSE ##
##########################
class Warehouse:
def __init__(self, inventory_level, fixed_ordering_cost, holding_cost, penalty_cost):
self.i, self.K, self.h, self.p = inventory_level, fixed_ordering_cost, holding_cost, penalty_cost
self.o = 0 # outstanding_orders
self.period_costs = defaultdict(int) # a dictionary recording cost in each period
def receive_order(self, Q, time):
self.review_inventory(time)
self.i, self.o = self.i + Q, self.o - Q
self.review_inventory(time)
def order(self, Q, time):
self.review_inventory(time)
self.period_costs[time] += self.K # incur ordering cost and store it in a dictionary
self.o += Q
self.review_inventory(time)
def on_hand_inventory(self):
return max(0,self.i)
def backorders(self):
return max(0,-self.i)
def issue(self, demand, time):
self.review_inventory(time)
self.i = self.i-demand
def inventory_position(self):
return self.o+self.i
def review_inventory(self, time):
try:
self.levels.append([time, self.i])
self.on_hand.append([time, self.on_hand_inventory()])
self.positions.append([time, self.inventory_position()])
except AttributeError:
self.levels, self.on_hand = [[0, self.i]], [[0, self.on_hand_inventory()]]
self.positions = [[0, self.inventory_position()]]
def incur_end_of_period_costs(self, time): # incur holding and penalty costs
self._incur_holding_cost(time)
self._incur_penalty_cost(time)
def _incur_holding_cost(self, time): # incur holding cost and store it in a dictionary
self.period_costs[time] += self.on_hand_inventory()*self.h
def _incur_penalty_cost(self, time): # incur penalty cost and store it in a dictionary
self.period_costs[time] += self.backorders()*self.p
##########################
## EVENTS ##
##########################
class CustomerDemand:
def __init__(self, des: DES, demand_rate: float, warehouse: Warehouse):
self.d = demand_rate # the demand rate per period
self.w = warehouse # the warehouse
self.des = des # the Discrete Event Simulation engine
self.priority = 2 # denotes a low priority
def end(self):
self.w.issue(1, self.des.time)
self.des.schedule(EventWrapper(self), np.random.exponential(1/self.d)) # schedule another demand
class EndOfPeriod:
def __init__(self, des: DES, warehouse: Warehouse):
self.w = warehouse # the warehouse
self.des = des # the Discrete Event Simulation engine
self.priority = 0 # denotes a low priority
def end(self):
self.w.incur_end_of_period_costs(self.des.time-1)
self.des.schedule(EventWrapper(EndOfPeriod(self.des, self.w)), 1)
class Order:
def __init__(self, des: DES, Q: float, warehouse: Warehouse, lead_time: float):
self.Q = Q # the order quantity
self.w = warehouse # the warehouse
self.des = des # the Discrete Event Simulation engine
self.lead_time = lead_time
self.priority = 1 # denotes a medium priority
def end(self):
self.w.order(self.Q, self.des.time)
self.des.schedule(EventWrapper(ReceiveOrder(self.des, self.Q, self.w)), self.lead_time)
class ReceiveOrder:
def __init__(self, des: DES, Q: float, warehouse: Warehouse):
self.Q = Q # the order quantity
self.w = warehouse # the warehouse
self.des = des # the Discrete Event Simulation engine
self.priority = 1 # denotes a medium priority
def end(self):
self.w.receive_order(self.Q, self.des.time)
np.random.seed(1234)
instance = {"inventory_level": 10, "fixed_ordering_cost": 100, "holding_cost": 1, "penalty_cost": 5}
w = Warehouse(**instance)
N = 20 # planning horizon length
des = DES(N)
d = CustomerDemand(des, 10, w)
des.schedule(EventWrapper(d), 0) # schedule a demand immediately
lead_time = 1
o = Order(des, 50, w, lead_time)
for t in range(0,20,5):
des.schedule(EventWrapper(o), t) # schedule orders
des.schedule(EventWrapper(EndOfPeriod(des, w)), 1) # schedule EndOfPeriod at the end of the first period
des.start()
print("Period costs: "+str([w.period_costs[e] for e in w.period_costs]))
print("Average cost per period: "+ '%.2f' % (sum([w.period_costs[e] for e in w.period_costs])/len(w.period_costs)))
plot_inventory(w.positions, "inventory position")
plot_inventory(w.levels, "inventory level")
plt.legend(loc="lower right")
plt.show()
|
[
"robros@gmail.com"
] |
robros@gmail.com
|
cc5fc62c9759d253cf7a93ef0a60d9ff82f46004
|
31722378f12d231d820bc5b68af1575266385f74
|
/data_analysis/draw-policy-graph.py
|
0c75017cfb43170b5be3c55b4e2122493b73067a
|
[
"MIT"
] |
permissive
|
msgpo/trade-dst
|
c8213e820ba89d625bcdcf1d81e04a3769ea8dc8
|
b32562545b9ebf7e1ac358466146a15146bcd42f
|
refs/heads/master
| 2022-05-31T01:38:03.614691
| 2020-04-27T17:17:18
| 2020-04-27T17:17:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,987
|
py
|
#!/usr/bin/env python3
import json
import sys
from collections import Counter
from utils.fix_label import fix_general_label_error
EXPERIMENT_DOMAINS = ["none", "hotel", "train", "restaurant", "attraction", "taxi"]
DOMAIN_INDICES = dict()
for domain in EXPERIMENT_DOMAINS:
DOMAIN_INDICES[domain] = len(DOMAIN_INDICES)
def get_slot_information():
ontology = json.load(open("data/multi-woz/MULTIWOZ2.1/ontology.json", 'r'))
ontology_domains = dict([(k, v) for k, v in ontology.items() if k.split("-")[0] in EXPERIMENT_DOMAINS])
SLOTS = [k.replace(" ","").lower() if ("book" not in k) else k.lower() for k in ontology_domains.keys()]
return SLOTS
ALL_SLOTS = get_slot_information()
def fix_none_typo(value):
if value in ("not men", "not", "not mentioned", "", "not mendtioned", "fun", "art"):
return 'none'
else:
return value
def get_node_key_slot_names(label_dict):
slots = []
domains = set()
for slot_key, slot_value in label_dict.items():
slot_value = fix_none_typo(slot_value)
if slot_value == 'none':
continue
domains.add(slot_key.split('-')[0])
slots.append(slot_key.replace(' ', '-'))
if len(slots) == 0:
return 'none', 'none'
return ','.join(domains), ','.join(slots)
def get_node_key_slot_names_delta(turn_label):
slots = []
domains = set()
for slot_key, slot_value in turn_label:
slot_value = fix_none_typo(slot_value)
if slot_value == 'none':
continue
domains.add(slot_key.split('-')[0])
slots.append(slot_key.replace(' ', '-'))
if len(slots) == 0:
return 'none', 'none'
return ','.join(domains), ','.join(slots)
def get_node_key_slot_counts(label_dict):
slots = Counter()
for slot_key, slot_value in label_dict.items():
slot_value = fix_none_typo(slot_value)
if slot_value == 'none':
continue
slot_domain = slot_key.split('-')[0]
slots[slot_domain] += 1
if len(slots) == 0:
return 'none'
return ','.join(domain + '=' + str(count) for domain, count in slots.items())
def get_node_key_slot_domains(label_dict):
slots = set()
for slot_key, slot_value in label_dict.items():
slot_value = fix_none_typo(slot_value)
if slot_value == 'none':
continue
slot_domain = slot_key.split('-')[0]
slots.add(slot_domain)
if len(slots) == 0:
return 'none'
return ','.join(slots)
def get_node_key_slot_counts_delta(turn_label):
slots = Counter()
for slot_key, slot_value in turn_label:
slot_value = fix_none_typo(slot_value)
if slot_value == 'none':
continue
slot_domain = slot_key.split('-')[0]
slots[slot_domain] += 1
if len(slots) == 0:
return 'none'
return ','.join(domain + '=' + str(count) for domain, count in slots.items())
def get_node_key_domains_delta(turn_label):
slots = set()
for slot_key, slot_value in turn_label:
slot_value = fix_none_typo(slot_value)
if slot_value == 'none':
continue
slot_domain = slot_key.split('-')[0]
slots.add(slot_domain)
if len(slots) == 0:
return 'none'
return ','.join(slots)
def load_data():
filename = 'data/train_dials.json'
if len(sys.argv) >= 2:
filename = sys.argv[1]
with open(filename) as fp:
data = json.load(fp)
nodes = Counter()
edges = Counter()
for dialogue in data:
prev_node_key = 'none'
for turn in dialogue['dialogue']:
label_dict = fix_general_label_error(turn['belief_state'], False, ALL_SLOTS)
#domains, node_key = get_node_key_slot_names(label_dict)
domains, node_key = get_node_key_slot_names_delta(turn['turn_label'])
if domains not in ('none', 'restaurant'):
continue
#node_key = get_node_key_domains_delta(turn['turn_label'])
#node_key = get_node_key_slot_domains(label_dict)
nodes[node_key] += 1
edges[(prev_node_key, node_key)] += 1
if prev_node_key == 'train' and node_key == 'attraction':
print(json.dumps(dialogue, indent=2), file=sys.stderr)
prev_node_key = node_key
return nodes, edges
def main():
nodes, edges = load_data()
print('strict digraph states {')
node_num = dict()
for i, (node, node_count) in enumerate(nodes.most_common()):
#if ',' in node:
# continue
node_num[node] = i
node_label = node.replace('restaurant-', '')
#node_count = nodes[node]
print(f's{i} [label="{node_label}"]; # {node_count}')
for (_from, _to), count in edges.items():
#if ',' in _from or ',' in _to:
# continue
print(f's{node_num[_from]} -> s{node_num[_to]} [label="{count}"];')
print('}')
if __name__ == '__main__':
main()
|
[
"gcampagn@cs.stanford.edu"
] |
gcampagn@cs.stanford.edu
|
4217ffcfb6d14595b20ca22faca926672b8147a9
|
8ae806da6b57a1bbd16e32a96a19cec94501be07
|
/recursionDigitSum.py
|
6516daba7ed0f9c4cf9b8bf2ffb93b2905e0de76
|
[] |
no_license
|
vivek28111992/python-for-algo-and-ds
|
9bc6914a527c13380abe0d5193c15cc9d93a8220
|
d58dbe55277316ddb07c6947bccc8496396158fe
|
refs/heads/master
| 2021-09-05T11:25:37.981814
| 2018-01-26T22:15:00
| 2018-01-26T22:15:00
| 111,090,307
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 170
|
py
|
# recursion digit sum
def sum_func(n):
# Base Case
if len(str(n)) == 1:
return n
else:
return n%10 + sum_func(n//10)
print(sum_func(1235))
|
[
"pawarvivek29@gmail.com"
] |
pawarvivek29@gmail.com
|
5ffb1a37ae4eb1db69fe7d02e736def239a0d403
|
729a5764d957ae0e208773222fbdf6bc2da9dcc5
|
/origin.py
|
12dafc882ebf1d60efc1e4511b7fcb281707d3bb
|
[] |
no_license
|
KongDeyu1532/Helmet_Face_Detection
|
0f198cca6bceae18c65bf63fcfdcefa0722e6eef
|
b98506fb8c90df9b352ce4133867b87ab86c53e6
|
refs/heads/master
| 2023-04-30T01:09:44.470828
| 2023-04-17T06:52:14
| 2023-04-17T06:52:14
| 259,599,305
| 3
| 0
| null | 2022-06-22T01:51:10
| 2020-04-28T10:04:01
|
Python
|
UTF-8
|
Python
| false
| false
| 39,153
|
py
|
# encoding: utf-8
# author: KongDeyu
import argparse
import time
import mxnet as mx
from gluoncv import model_zoo, utils, data
import telegram
import cv2
import dlib
from PyQt5.QtCore import QTimer, QThread, pyqtSignal, QRegExp, Qt
from PyQt5.QtGui import QImage, QPixmap, QIcon, QTextCursor, QRegExpValidator
from PyQt5.QtWidgets import QDialog, QApplication, QMainWindow, QMessageBox
from PyQt5.uic import loadUi
import os
import webbrowser
import logging
import logging.config
import sqlite3
import sys
import threading
import queue
import multiprocessing
import winsound
from configparser import ConfigParser
from datetime import datetime
# 找不到已训练的人脸数据文件
class TrainingDataNotFoundError(FileNotFoundError):
pass
# 找不到数据库文件
class DatabaseNotFoundError(FileNotFoundError):
pass
ImgQueue= multiprocessing.Queue()
ResultQueue = multiprocessing.Queue()
class CoreUI(QMainWindow):
database = './FaceBase.db'
trainingData = './recognizer/trainingData.yml'
cap = cv2.VideoCapture()
captureQueue = queue.Queue() # 图像队列
alarmQueue = queue.LifoQueue() # 报警队列,后进先出
logQueue = multiprocessing.Queue() # 日志队列
receiveLogSignal = pyqtSignal(str) # LOG信号
def __init__(self):
super(CoreUI, self).__init__()
loadUi('./ui/Core.ui', self)
self.setWindowIcon(QIcon('./icons/icon.png'))
self.setFixedSize(1161, 623)
# 图像捕获
self.isExternalCameraUsed = False
self.useExternalCameraCheckBox.stateChanged.connect(
lambda: self.useExternalCamera(self.useExternalCameraCheckBox))
# 初始化通信队列
# ImgQueue= multiprocessing.Queue
# ResultQueue = multiprocessing.Queue
self.faceProcessingThread = FaceProcessingThread()
self.faceProcessingThread1 = FaceProcessingThread2()
self.startWebcamButton.clicked.connect(self.startWebcam)
# 数据库
self.initDbButton.setIcon(QIcon('./icons/warning.png'))
self.initDbButton.clicked.connect(self.initDb)
self.timer = QTimer(self) # 初始化一个定时器
self.timer.timeout.connect(self.updateFrame)
# 功能开关
self.faceTrackerCheckBox.stateChanged.connect(
lambda: self.faceProcessingThread.enableFaceTracker(self))
self.faceTrackerCheckBox.stateChanged.connect(
lambda: self.faceProcessingThread1.enableFaceTracker(self))
self.faceRecognizerCheckBox.stateChanged.connect(
lambda: self.faceProcessingThread.enableFaceRecognizer(self))
self.panalarmCheckBox.stateChanged.connect(lambda: self.faceProcessingThread.enablePanalarm(self))
# 直方图均衡化
self.equalizeHistCheckBox.stateChanged.connect(
lambda: self.faceProcessingThread.enableEqualizeHist(self))
# 调试模式
self.debugCheckBox.stateChanged.connect(lambda: self.faceProcessingThread.enableDebug(self))
self.confidenceThresholdSlider.valueChanged.connect(
lambda: self.faceProcessingThread.setConfidenceThreshold(self))
self.autoAlarmThresholdSlider.valueChanged.connect(
lambda: self.faceProcessingThread.setAutoAlarmThreshold(self))
# 报警系统
self.alarmSignalThreshold = 10
self.panalarmThread = threading.Thread(target=self.recieveAlarm, daemon=True)
self.isBellEnabled = True
self.bellCheckBox.stateChanged.connect(lambda: self.enableBell(self.bellCheckBox))
self.isTelegramBotPushEnabled = False
self.telegramBotPushCheckBox.stateChanged.connect(
lambda: self.enableTelegramBotPush(self.telegramBotPushCheckBox))
self.telegramBotSettingsButton.clicked.connect(self.telegramBotSettings)
# 帮助与支持
self.viewGithubRepoButton.clicked.connect(
lambda: webbrowser.open('https://github.com/winterssy/face_recognition_py'))
self.contactDeveloperButton.clicked.connect(lambda: webbrowser.open('https://t.me/winterssy'))
# 日志系统
self.receiveLogSignal.connect(lambda log: self.logOutput(log))
self.logOutputThread = threading.Thread(target=self.receiveLog, daemon=True)
self.logOutputThread.start()
# 检查数据库状态
def initDb(self):
try:
if not os.path.isfile(self.database):
raise DatabaseNotFoundError
if not os.path.isfile(self.trainingData):
raise TrainingDataNotFoundError
conn = sqlite3.connect(self.database)
cursor = conn.cursor()
cursor.execute('SELECT Count(*) FROM users')
result = cursor.fetchone()
dbUserCount = result[0]
except DatabaseNotFoundError:
logging.error('系统找不到数据库文件{}'.format(self.database))
self.initDbButton.setIcon(QIcon('./icons/error.png'))
self.logQueue.put('Error:未发现数据库文件,你可能未进行人脸采集')
except TrainingDataNotFoundError:
logging.error('系统找不到已训练的人脸数据{}'.format(self.trainingData))
self.initDbButton.setIcon(QIcon('./icons/error.png'))
self.logQueue.put('Error:未发现已训练的人脸数据文件,请完成训练后继续')
except Exception as e:
logging.error('读取数据库异常,无法完成数据库初始化')
self.initDbButton.setIcon(QIcon('./icons/error.png'))
self.logQueue.put('Error:读取数据库异常,初始化数据库失败')
else:
cursor.close()
conn.close()
if not dbUserCount > 0:
logging.warning('数据库为空')
self.logQueue.put('warning:数据库为空,人脸识别功能不可用')
self.initDbButton.setIcon(QIcon('./icons/warning.png'))
else:
self.logQueue.put('Success:数据库状态正常,发现用户数:{}'.format(dbUserCount))
self.initDbButton.setIcon(QIcon('./icons/success.png'))
self.initDbButton.setEnabled(False)
self.faceRecognizerCheckBox.setToolTip('须先开启人脸跟踪')
self.faceRecognizerCheckBox.setEnabled(True)
# 是否使用外接摄像头
def useExternalCamera(self, useExternalCameraCheckBox):
if useExternalCameraCheckBox.isChecked():
self.isExternalCameraUsed = True
else:
self.isExternalCameraUsed = False
# 打开/关闭摄像头
def startWebcam(self):
if not self.cap.isOpened():
if self.isExternalCameraUsed:
camID = 1
else:
camID = 0
self.cap.open(camID)
self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
ret, frame = self.cap.read()
if not ret:
logging.error('无法调用电脑摄像头{}'.format(camID))
self.logQueue.put('Error:初始化摄像头失败')
self.cap.release()
self.startWebcamButton.setIcon(QIcon('./icons/error.png'))
else:
# self.myprocess = multiprocessing.Process(target=self.MyCore, args=())
# self.myprocess.start()
self.faceProcessingThread.start() # 启动OpenCV图像处理线程
self.faceProcessingThread1.start() # 头盔
self.timer.start(5) # 启动定时器
self.panalarmThread.start() # 启动报警系统线程
self.startWebcamButton.setIcon(QIcon('./icons/success.png'))
self.startWebcamButton.setText('关闭摄像头')
else:
text = '如果关闭摄像头,须重启程序才能再次打开。'
informativeText = '<b>是否继续?</b>'
ret = CoreUI.callDialog(QMessageBox.Warning, text, informativeText, QMessageBox.Yes | QMessageBox.No,
QMessageBox.No)
if ret == QMessageBox.Yes:
self.faceProcessingThread.stop()
self.faceProcessingThread1.stop()
# self.myprocess.stop()
if self.cap.isOpened():
if self.timer.isActive():
self.timer.stop()
self.cap.release()
self.realTimeCaptureLabel.clear()
self.realTimeCaptureLabel.setText('<font color=red>摄像头未开启</font>')
self.startWebcamButton.setText('摄像头已关闭')
self.startWebcamButton.setEnabled(False)
self.startWebcamButton.setIcon(QIcon())
# 定时器,实时更新画面
def updateFrame(self):
if self.cap.isOpened():
# ret, frame = self.cap.read()
# if ret:
# self.showImg(frame, self.realTimeCaptureLabel)
if not self.captureQueue.empty():
captureData = self.captureQueue.get()
realTimeFrame = captureData.get('realTimeFrame')
self.displayImage(realTimeFrame, self.realTimeCaptureLabel)
# 显示图片
def displayImage(self, img, qlabel):
# BGR -> RGB
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# default:The image is stored using 8-bit indexes into a colormap, for example:a gray image
qformat = QImage.Format_Indexed8
if len(img.shape) == 3: # rows[0], cols[1], channels[2]
if img.shape[2] == 4:
# The image is stored using a 32-bit byte-ordered RGBA format (8-8-8-8)
# A: alpha channel,不透明度参数。如果一个像素的alpha通道数值为0%,那它就是完全透明的
qformat = QImage.Format_RGBA8888
else:
qformat = QImage.Format_RGB888
# img.shape[1]:图像宽度width,img.shape[0]:图像高度height,img.shape[2]:图像通道数
# QImage.__init__ (self, bytes data, int width, int height, int bytesPerLine, Format format)
# 从内存缓冲流获取img数据构造QImage类
# img.strides[0]:每行的字节数(width*3),rgb为3,rgba为4
# strides[0]为最外层(即一个二维数组所占的字节长度),strides[1]为次外层(即一维数组所占字节长度),strides[2]为最内层(即一个元素所占字节长度)
# 从里往外看,strides[2]为1个字节长度(uint8),strides[1]为3*1个字节长度(3即rgb 3个通道)
# strides[0]为width*3个字节长度,width代表一行有几个像素
outImage = QImage(img, img.shape[1], img.shape[0], img.strides[0], qformat)
qlabel.setPixmap(QPixmap.fromImage(outImage))
qlabel.setScaledContents(True) # 图片自适应大小
# 报警系统:是否允许设备响铃
def enableBell(self, bellCheckBox):
if bellCheckBox.isChecked():
self.isBellEnabled = True
self.statusBar().showMessage('设备发声:开启')
else:
if self.isTelegramBotPushEnabled:
self.isBellEnabled = False
self.statusBar().showMessage('设备发声:关闭')
else:
self.logQueue.put('Error:操作失败,至少选择一种报警方式')
self.bellCheckBox.setCheckState(Qt.Unchecked)
self.bellCheckBox.setChecked(True)
# print('isBellEnabled:', self.isBellEnabled)
# 报警系统:是否允许TelegramBot推送
def enableTelegramBotPush(self, telegramBotPushCheckBox):
if telegramBotPushCheckBox.isChecked():
self.isTelegramBotPushEnabled = True
self.statusBar().showMessage('TelegramBot推送:开启')
else:
if self.isBellEnabled:
self.isTelegramBotPushEnabled = False
self.statusBar().showMessage('TelegramBot推送:关闭')
else:
self.logQueue.put('Error:操作失败,至少选择一种报警方式')
self.telegramBotPushCheckBox.setCheckState(Qt.Unchecked)
self.telegramBotPushCheckBox.setChecked(True)
# print('isTelegramBotPushEnabled:', self.isTelegramBotPushEnabled)
# TelegramBot设置
def telegramBotSettings(self):
cfg = ConfigParser()
cfg.read('./config/telegramBot.cfg', encoding='utf-8-sig')
read_only = cfg.getboolean('telegramBot', 'read_only')
# read_only = False
if read_only:
text = '基于安全考虑,系统拒绝了本次请求。'
informativeText = '<b>请联系设备管理员。</b>'
CoreUI.callDialog(QMessageBox.Critical, text, informativeText, QMessageBox.Ok)
else:
token = cfg.get('telegramBot', 'token')
chat_id = cfg.get('telegramBot', 'chat_id')
proxy_url = cfg.get('telegramBot', 'proxy_url')
message = cfg.get('telegramBot', 'message')
self.telegramBotDialog = TelegramBotDialog()
self.telegramBotDialog.tokenLineEdit.setText(token)
self.telegramBotDialog.telegramIDLineEdit.setText(chat_id)
self.telegramBotDialog.socksLineEdit.setText(proxy_url)
self.telegramBotDialog.messagePlainTextEdit.setPlainText(message)
self.telegramBotDialog.exec()
# 设备响铃进程
@staticmethod
def bellProcess(queue):
logQueue = queue
logQueue.put('Info:设备正在响铃...')
winsound.PlaySound('./alarm.wav', winsound.SND_FILENAME)
# TelegramBot推送进程
@staticmethod
def telegramBotPushProcess(queue, img=None):
logQueue = queue
cfg = ConfigParser()
try:
cfg.read('./config/telegramBot.cfg', encoding='utf-8-sig')
# 读取TelegramBot配置
token = cfg.get('telegramBot', 'token')
chat_id = cfg.getint('telegramBot', 'chat_id')
proxy_url = cfg.get('telegramBot', 'proxy_url')
message = cfg.get('telegramBot', 'message')
# 是否使用代理
if proxy_url:
proxy = telegram.utils.request.Request(proxy_url=proxy_url)
bot = telegram.Bot(token=token, request=proxy)
else:
bot = telegram.Bot(token=token)
bot.send_message(chat_id=chat_id, text=message)
# 发送疑似陌生人脸截屏到Telegram
if img:
bot.send_photo(chat_id=chat_id, photo=open(img, 'rb'), timeout=10)
except Exception as e:
logQueue.put('Error:TelegramBot推送失败')
else:
logQueue.put('Success:TelegramBot推送成功')
# 报警系统服务常驻,接收并处理报警信号
def recieveAlarm(self):
while True:
jobs = []
# print(self.alarmQueue.qsize())
if self.alarmQueue.qsize() > self.alarmSignalThreshold: # 若报警信号触发超出既定计数,进行报警
if not os.path.isdir('./unknown'):
os.makedirs('./unknown')
lastAlarmSignal = self.alarmQueue.get()
timestamp = lastAlarmSignal.get('timestamp')
img = lastAlarmSignal.get('img')
# 疑似陌生人脸,截屏存档
cv2.imwrite('./unknown/{}.jpg'.format(timestamp), img)
logging.info('报警信号触发超出预设计数,自动报警系统已被激活')
self.logQueue.put('Info:报警信号触发超出预设计数,自动报警系统已被激活')
# 是否进行响铃
if self.isBellEnabled:
p1 = multiprocessing.Process(target=CoreUI.bellProcess, args=(self.logQueue,))
p1.start()
jobs.append(p1)
# 是否进行TelegramBot推送
if self.isTelegramBotPushEnabled:
if os.path.isfile('./unknown/{}.jpg'.format(timestamp)):
img = './unknown/{}.jpg'.format(timestamp)
else:
img = None
p2 = multiprocessing.Process(target=CoreUI.telegramBotPushProcess, args=(self.logQueue, img))
p2.start()
jobs.append(p2)
# 等待本轮报警结束
for p in jobs:
p.join()
# 重置报警信号
with self.alarmQueue.mutex:
self.alarmQueue.queue.clear()
else:
continue
# 系统日志服务常驻,接收并处理系统日志
def receiveLog(self):
while True:
data = self.logQueue.get()
if data:
self.receiveLogSignal.emit(data)
else:
continue
# LOG输出
def logOutput(self, log):
# 获取当前系统时间
time = datetime.now().strftime('[%Y/%m/%d %H:%M:%S]')
log = time + ' ' + log + '\n'
self.logTextEdit.moveCursor(QTextCursor.End)
self.logTextEdit.insertPlainText(log)
self.logTextEdit.ensureCursorVisible() # 自动滚屏
# 系统对话框
@staticmethod
def callDialog(icon, text, informativeText, standardButtons, defaultButton=None):
msg = QMessageBox()
msg.setWindowIcon(QIcon('./icons/icon.png'))
msg.setWindowTitle('OpenCV Face Recognition System - Core')
msg.setIcon(icon)
msg.setText(text)
msg.setInformativeText(informativeText)
msg.setStandardButtons(standardButtons)
if defaultButton:
msg.setDefaultButton(defaultButton)
return msg.exec()
# 窗口关闭事件,关闭OpenCV线程、定时器、摄像头
def closeEvent(self, event):
if self.faceProcessingThread.isRunning:
self.faceProcessingThread.stop()
if self.faceProcessingThread1.isRunning:
self.faceProcessingThread1.stop()
# self.faceProcessingThread1.stop()
if self.timer.isActive():
self.timer.stop()
if self.cap.isOpened():
self.cap.release()
event.accept()
# TelegramBot设置对话框
class TelegramBotDialog(QDialog):
def __init__(self):
super(TelegramBotDialog, self).__init__()
loadUi('./ui/TelegramBotDialog.ui', self)
self.setWindowIcon(QIcon('./icons/icon.png'))
self.setFixedSize(550, 358)
chat_id_regx = QRegExp('^\d+$')
chat_id_validator = QRegExpValidator(chat_id_regx, self.telegramIDLineEdit)
self.telegramIDLineEdit.setValidator(chat_id_validator)
self.okButton.clicked.connect(self.telegramBotSettings)
def telegramBotSettings(self):
# 获取用户输入
token = self.tokenLineEdit.text().strip()
chat_id = self.telegramIDLineEdit.text().strip()
proxy_url = self.socksLineEdit.text().strip()
message = self.messagePlainTextEdit.toPlainText().strip()
# 校验并处理用户输入
if not (token and chat_id and message):
self.okButton.setIcon(QIcon('./icons/error.png'))
CoreUI.logQueue.put('Error:API Token、Telegram ID和消息内容为必填项')
else:
ret = self.telegramBotTest(token, proxy_url)
if ret:
cfg_file = './config/telegramBot.cfg'
cfg = ConfigParser()
cfg.read(cfg_file, encoding='utf-8-sig')
cfg.set('telegramBot', 'token', token)
cfg.set('telegramBot', 'chat_id', chat_id)
cfg.set('telegramBot', 'proxy_url', proxy_url)
cfg.set('telegramBot', 'message', message)
try:
with open(cfg_file, 'w', encoding='utf-8') as file:
cfg.write(file)
except:
logging.error('写入telegramBot配置文件发生异常')
CoreUI.logQueue.put('Error:写入配置文件时发生异常,更新失败')
else:
CoreUI.logQueue.put('Success:测试通过,系统已更新TelegramBot配置')
self.close()
else:
CoreUI.logQueue.put('Error:测试失败,无法更新TelegramBot配置')
# TelegramBot 测试
def telegramBotTest(self, token, proxy_url):
try:
# 是否使用代理
if proxy_url:
proxy = telegram.utils.request.Request(proxy_url=proxy_url)
bot = telegram.Bot(token=token, request=proxy)
else:
bot = telegram.Bot(token=token)
bot.get_me()
except Exception as e:
return False
else:
return True
class Img:
def __init__(self,name,img):
self.name = name
self.img = img
class Result:
def __init__(self,classname):
self.classname = classname
class FaceProcessingThread2(QThread):
def __init__(self):
super(FaceProcessingThread2, self).__init__()
self.isRunning = True
# self.imgqueue = imgqueue
# self.resultqueue = resultqueue
def run(self):
def parse_args():
parser = argparse.ArgumentParser(description='Train YOLO networks with random input shape.')
parser.add_argument('--network', type=str, default='yolo3_mobilenet0.25_voc',
# use yolo3_darknet53_voc, yolo3_mobilenet1.0_voc, yolo3_mobilenet0.25_voc
help="Base network name which serves as feature extraction base.")
parser.add_argument('--short', type=int, default=416,
help='Input data shape for evaluation, use 320, 416, 512, 608, '
'larger size for dense object and big size input')
parser.add_argument('--threshold', type=float, default=0.4,
help='confidence threshold for object detection')
parser.add_argument('--gpu', action='store_false',
help='use gpu or cpu.')
args = parser.parse_args()
return args
args = parse_args()
ctx = mx.cpu()
net = model_zoo.get_model(args.network, pretrained=False)
classes = ['hat', 'person']
for param in net.collect_params().values():
if param._data is not None:
continue
param.initialize()
net.reset_class(classes)
net.collect_params().reset_ctx(ctx)
if args.network == 'yolo3_darknet53_voc':
net.load_parameters('darknet.params', ctx=ctx)
print('use darknet to extract feature')
elif args.network == 'yolo3_mobilenet1.0_voc':
net.load_parameters('mobilenet1.0.params', ctx=ctx)
print('use mobile1.0 to extract feature')
else:
net.load_parameters('mobile0.25.params', ctx=ctx)
print('use mobile0.25 to extract feature')
while self.isRunning:
imgResult = ImgQueue.get()
frame = imgResult.img
start = time.clock()
x = mx.nd.array(frame)
x, orig_img = data.transforms.presets.yolo.transform_test(x)
x = x.as_in_context(ctx)
box_ids, scores, bboxes = net(x)
ax = utils.viz.cv_plot_bbox(orig_img, bboxes[0], scores[0], box_ids[0], class_names=net.classes,
thresh=args.threshold)
print(ax)
ResultQueue.put(Result(ax))
elapsed = (time.clock() - start)
print("耗时:", elapsed)
def stop(self):
self.isRunning = False
self.quit()
self.wait()
# OpenCV线程
class FaceProcessingThread(QThread):
def __init__(self):
super(FaceProcessingThread, self).__init__()
self.isRunning = True
self.isFaceTrackerEnabled = True
self.isFaceRecognizerEnabled = False
self.isPanalarmEnabled = True
self.isDebugMode = False
self.confidenceThreshold = 50
self.autoAlarmThreshold = 65
self.isEqualizeHistEnabled = False
# 是否开启人脸跟踪
def enableFaceTracker(self, coreUI):
if coreUI.faceTrackerCheckBox.isChecked():
self.isFaceTrackerEnabled = True
coreUI.statusBar().showMessage('人脸跟踪:开启')
else:
self.isFaceTrackerEnabled = False
coreUI.statusBar().showMessage('人脸跟踪:关闭')
# 是否开启人脸识别
def enableFaceRecognizer(self, coreUI):
if coreUI.faceRecognizerCheckBox.isChecked():
if self.isFaceTrackerEnabled:
self.isFaceRecognizerEnabled = True
coreUI.statusBar().showMessage('人脸识别:开启')
else:
CoreUI.logQueue.put('Error:操作失败,请先开启人脸跟踪')
coreUI.faceRecognizerCheckBox.setCheckState(Qt.Unchecked)
coreUI.faceRecognizerCheckBox.setChecked(False)
else:
self.isFaceRecognizerEnabled = False
coreUI.statusBar().showMessage('人脸识别:关闭')
# 是否开启报警系统
def enablePanalarm(self, coreUI):
if coreUI.panalarmCheckBox.isChecked():
self.isPanalarmEnabled = True
coreUI.statusBar().showMessage('报警系统:开启')
else:
self.isPanalarmEnabled = False
coreUI.statusBar().showMessage('报警系统:关闭')
# 是否开启调试模式
def enableDebug(self, coreUI):
if coreUI.debugCheckBox.isChecked():
self.isDebugMode = True
coreUI.statusBar().showMessage('调试模式:开启')
else:
self.isDebugMode = False
coreUI.statusBar().showMessage('调试模式:关闭')
# 设置置信度阈值
def setConfidenceThreshold(self, coreUI):
if self.isDebugMode:
self.confidenceThreshold = coreUI.confidenceThresholdSlider.value()
coreUI.statusBar().showMessage('置信度阈值:{}'.format(self.confidenceThreshold))
# 设置自动报警阈值
def setAutoAlarmThreshold(self, coreUI):
if self.isDebugMode:
self.autoAlarmThreshold = coreUI.autoAlarmThresholdSlider.value()
coreUI.statusBar().showMessage('自动报警阈值:{}'.format(self.autoAlarmThreshold))
# 直方图均衡化
def enableEqualizeHist(self, coreUI):
if coreUI.equalizeHistCheckBox.isChecked():
self.isEqualizeHistEnabled = True
coreUI.statusBar().showMessage('直方图均衡化:开启')
else:
self.isEqualizeHistEnabled = False
coreUI.statusBar().showMessage('直方图均衡化:关闭')
def run(self):
faceCascade = cv2.CascadeClassifier('./haarcascades/haarcascade_frontalface_default.xml')
# 帧数、人脸ID初始化
frameCounter = 0
currentFaceID = 0
# 人脸跟踪器字典初始化
faceTrackers = {}
isTrainingDataLoaded = False
isDbConnected = False
while self.isRunning:
if CoreUI.cap.isOpened():
ret, frame = CoreUI.cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# 是否执行直方图均衡化
if self.isEqualizeHistEnabled:
gray = cv2.equalizeHist(gray)
faces = faceCascade.detectMultiScale(gray, 1.3, 5, minSize=(90, 90))
# 预加载数据文件
if not isTrainingDataLoaded and os.path.isfile(CoreUI.trainingData):
recognizer = cv2.face.LBPHFaceRecognizer_create()
recognizer.read(CoreUI.trainingData)
isTrainingDataLoaded = True
if not isDbConnected and os.path.isfile(CoreUI.database):
conn = sqlite3.connect(CoreUI.database)
cursor = conn.cursor()
isDbConnected = True
captureData = {}
realTimeFrame = frame.copy()
alarmSignal = {}
if self.isFaceTrackerEnabled:
# 要删除的人脸跟踪器列表初始化
fidsToDelete = []
for fid in faceTrackers.keys():
# 实时跟踪
trackingQuality = faceTrackers[fid].update(realTimeFrame)
# 如果跟踪质量过低,删除该人脸跟踪器
if trackingQuality < 7:
fidsToDelete.append(fid)
# 删除跟踪质量过低的人脸跟踪器
for fid in fidsToDelete:
faceTrackers.pop(fid, None)
for (_x, _y, _w, _h) in faces:
isKnown = False
if self.isFaceRecognizerEnabled:
# cv2.rectangle(realTimeFrame, (_x, _y), (_x + _w+50, _y + _h+50), (232, 138, 30), 2)
cut_img = realTimeFrame[_y:_y+_h,_x:_x+_w]
face_id, confidence = recognizer.predict(gray[_y:_y + _h, _x:_x + _w])
logging.debug('face_id:{},confidence:{}'.format(face_id, confidence))
if self.isDebugMode:
CoreUI.logQueue.put('Debug -> face_id:{},confidence:{}'.format(face_id, confidence))
# 从数据库中获取识别人脸的身份信息
try:
cursor.execute("SELECT * FROM users WHERE face_id=?", (face_id,))
result = cursor.fetchall()
# print(result)
if result:
en_name = result[0][3]
ImgQueue.put(Img(en_name,cut_img))
else:
raise Exception
except Exception as e:
logging.error('读取数据库异常,系统无法获取Face ID为{}的身份信息'.format(face_id))
CoreUI.logQueue.put('Error:读取数据库异常,系统无法获取Face ID为{}的身份信息'.format(face_id))
en_name = ''
# 若置信度评分小于置信度阈值,认为是可靠识别
#帧数自增:
frameCounter += 1
if confidence < self.confidenceThreshold:
isKnown = True
cv2.putText(realTimeFrame, en_name, (_x - 5, _y - 10), cv2.FONT_HERSHEY_SIMPLEX, 1,
(0, 97, 255), 2)
if frameCounter == 10:
classnameresult = ResultQueue.get()
classname = classnameresult.classname
cv2.putText(realTimeFrame, classname, (_x + 150, _y - 10), cv2.FONT_HERSHEY_SIMPLEX,
1,(0, 97, 255), 2)
if classname == "hat":
cv2.rectangle(realTimeFrame, (_x, _y), (_x + _w + 50, _y + _h + 50),
(232, 138, 30), 2)
if self.isPanalarmEnabled:
alarmSignal['timestamp'] = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
alarmSignal['img'] = realTimeFrame
CoreUI.alarmQueue.put(alarmSignal)
logging.info('系统发出了报警信号')
in_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
out_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
try:
if int(datetime.now().strftime('%H%M%S')) < 155900 and list(cursor.execute(
"select count(*) from workers where name='{0}'".format(en_name)))[0][
0] == 0:
cursor.execute('INSERT INTO workers (name,in_time,status) VALUES (?, ?)',
(en_name, in_time,classname))
if int(datetime.now().strftime('%H%M%S')) > 160200 and list(cursor.execute(
"select count(*) from workers where name='{0}'".format(en_name)))[0][
0] == 1:
cursor.execute(
"update workers set out_time = '{0}' where name = '{1}'".format(out_time,
en_name))
finally:
conn.commit()
else:
# 若置信度评分大于置信度阈值,该人脸可能是陌生人
cv2.putText(realTimeFrame, 'unknown', (_x - 5, _y - 10), cv2.FONT_HERSHEY_SIMPLEX, 1,
(0, 0, 255), 2)
# 若置信度评分超出自动报警阈值,触发报警信号
if confidence > self.autoAlarmThreshold:
# 检测报警系统是否开启
if self.isPanalarmEnabled:
alarmSignal['timestamp'] = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
alarmSignal['img'] = realTimeFrame
CoreUI.alarmQueue.put(alarmSignal)
logging.info('系统发出了报警信号')
# 每读取10帧,检测跟踪器的人脸是否还在当前画面内
if frameCounter % 10 == 0:
frameCounter=0
# 这里必须转换成int类型,因为OpenCV人脸检测返回的是numpy.int32类型,
# 而dlib人脸跟踪器要求的是int类型
x = int(_x)
y = int(_y)
w = int(_w)
h = int(_h)
# 计算中心点
x_bar = x + 0.5 * w
y_bar = y + 0.5 * h
# matchedFid表征当前检测到的人脸是否已被跟踪
matchedFid = None
for fid in faceTrackers.keys():
# 获取人脸跟踪器的位置
# tracked_position 是 dlib.drectangle 类型,用来表征图像的矩形区域,坐标是浮点数
tracked_position = faceTrackers[fid].get_position()
# 浮点数取整
t_x = int(tracked_position.left())
t_y = int(tracked_position.top())
t_w = int(tracked_position.width())
t_h = int(tracked_position.height())
# 计算人脸跟踪器的中心点
t_x_bar = t_x + 0.5 * t_w
t_y_bar = t_y + 0.5 * t_h
# 如果当前检测到的人脸中心点落在人脸跟踪器内,且人脸跟踪器的中心点也落在当前检测到的人脸内
# 说明当前人脸已被跟踪
if ((t_x <= x_bar <= (t_x + t_w)) and (t_y <= y_bar <= (t_y + t_h)) and
(x <= t_x_bar <= (x + w)) and (y <= t_y_bar <= (y + h))):
matchedFid = fid
# 如果当前检测到的人脸是陌生人脸且未被跟踪
if not isKnown and matchedFid is None:
# 创建一个人脸跟踪器
tracker = dlib.correlation_tracker()
# 锁定跟踪范围
tracker.start_track(realTimeFrame, dlib.rectangle(x - 5, y - 10, x + w + 5, y + h + 10))
# 将该人脸跟踪器分配给当前检测到的人脸
faceTrackers[currentFaceID] = tracker
# 人脸ID自增
currentFaceID += 1
# 使用当前的人脸跟踪器,更新画面,输出跟踪结果
for fid in faceTrackers.keys():
tracked_position = faceTrackers[fid].get_position()
t_x = int(tracked_position.left())
t_y = int(tracked_position.top())
t_w = int(tracked_position.width())
t_h = int(tracked_position.height())
# 在跟踪帧中圈出人脸
cv2.rectangle(realTimeFrame, (t_x, t_y), (t_x + t_w, t_y + t_h), (0, 0, 255), 2)
cv2.putText(realTimeFrame, 'tracking...', (15, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255),
2)
captureData['originFrame'] = frame
captureData['realTimeFrame'] = realTimeFrame
CoreUI.captureQueue.put(captureData)
else:
continue
# 停止OpenCV线程
def stop(self):
self.isRunning = False
self.quit()
self.wait()
if __name__ == '__main__':
logging.config.fileConfig('./config/logging.cfg')
app = QApplication(sys.argv)
window = CoreUI()
window.show()
sys.exit(app.exec())
|
[
"noreply@github.com"
] |
KongDeyu1532.noreply@github.com
|
3dbaaeb9d0eb0464b6906c098494799fce81d1dc
|
afd390063f35cda064c5d91a1e5473e3ae273812
|
/FineDust to Server/multi_thread_server.py
|
16c9627def965acb2dc4309880dcbf38b4fc1ac6
|
[] |
no_license
|
devk0ng/Removing_Fine_dust
|
7c047080b95d3394364f0b6f75cfe470ddab5963
|
dfa3d45e9a53b8adbe04be7cbea83a1ddf60d1de
|
refs/heads/master
| 2022-01-16T21:46:57.890747
| 2019-08-07T08:13:47
| 2019-08-07T08:13:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,376
|
py
|
# multi_threaded server
import socketserver
import threading
HOST = ''
PORT = 9009
lock = threading.Lock() # syncronized 동기화 진행하는 스레드 생성
class UserManager: # 사용자관리 메세지 전송을 담당하는 클래스
def __init__(self):
self.users = {} # 사용자의 등록 정보를 담을 사전 {사용자 이름:(소켓,주소),...}
def addUser(self, username, conn, addr): # 사용자 ID를 self.users에 추가하는 함수
if username in self.users: # 이미 등록된 사용자라면
conn.send('이미 등록된 아두이노입니다.\n'.encode())
return None
# 새로운 사용자를 등록함
lock.acquire() # 스레드 동기화를 막기위한 락
self.users[username] = (conn, addr)
lock.release() # 업데이트 후 락 해제
#self.sendMessageToAll('아두이노[%s] 연결됨.' %username)
print('+++ 현재 연결된 아두이노 수: [%d]' %len(self.users))
return username
def removeUser(self, username): #사용자를 제거하는 함수
if username not in self.users:
return
lock.acquire()
del self.users[username]
lock.release()
#self.sendMessageToAll('아두이노 [%s] 종료' %username)
print('--- 현재 연결된 아두이노 수 : [%d]' %len(self.users))
def messageHandler(self, username, msg): # 전송한 msg를 처리하는 부분
if msg[0] != '/': # 보낸 메세지의 첫문자가 '/'가 아니면
#self.sendMessageToAll('[%s] %s' %(username, msg))
print('[%s] %s' %(username, msg))
return
if msg.strip() == '/quit': # 보낸 메세지가 'quit'이면
self.removeUser(username)
return -1
def sendMessageToAll(self, msg):
for conn, addr in self.users.values():
conn.send(msg.encode())
class MyTcpHandler(socketserver.BaseRequestHandler):
userman = UserManager()
def handle(self): # 클라이언트가 접속시 클라이언트 주소 출력
print('[%s] 연결됨' %self.client_address[0])
try:
username = self.registerUsername()
msg = self.request.recv(1024)
while msg:
print(msg.decode())
if self.userman.messageHandler(username, msg.decode()) == -1:
self.request.close()
break
msg = self.request.recv(1024)
except Exception as e:
print(e)
print('[%s] 접속종료' %self.client_address[0])
self.userman.removeUser(username)
def registerUsername(self):
while True:
self.request.send('Enter your Arduino ID:'.encode())
username = self.request.recv(1024)
username = username.decode().strip()
if self.userman.addUser(username, self.request, self.client_address):
return username
class ChatingServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
pass
def runServer():
print('+++ 서버 시작.')
print('+++ 서버를 끝내려면 Ctrl-C를 누르세요.')
try:
server = ChatingServer((HOST, PORT), MyTcpHandler)
server.serve_forever()
except KeyboardInterrupt:
print('--- 서버를 종료합니다.')
server.shutdown()
server.server_close()
runServer()
|
[
"gygacpu@naver.com"
] |
gygacpu@naver.com
|
32156869f1eead248fef32104f8ef2a838ccc3c7
|
58c0604f0ddd38a0cb7a8b8b9fa7c70abdc974b9
|
/setup.py
|
e012c83b78a10d467aca9d7e2f127db0069537ee
|
[
"MIT"
] |
permissive
|
yohann84L/plot_metric
|
fffd5cdb8ec11074f0440b1f9f2aa10183cd7924
|
52cae945276b808f829471bd28537d3e7718317c
|
refs/heads/master
| 2023-07-20T02:34:02.330775
| 2022-08-22T08:55:50
| 2022-08-22T08:55:50
| 195,234,816
| 58
| 9
|
MIT
| 2023-09-08T10:02:26
| 2019-07-04T12:09:45
|
Python
|
UTF-8
|
Python
| false
| false
| 940
|
py
|
import setuptools
with open("README.rst", "r") as fh:
long_description = fh.read()
setuptools.setup(
name='plot_metric',
version='0.0.6',
scripts=['plot_metric_package'],
install_requires=[
"scipy>=1.1.0",
"matplotlib>=3.0.2",
"colorlover>=0.3.0",
"pandas>=0.23.4",
"seaborn>=0.9.0",
"numpy>=1.15.4",
"scikit_learn>=0.21.2",
],
author="Yohann Lereclus",
author_email="lereclus84L@gmail.com",
description="A package with tools for plotting metrics",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/yohann84L/plot_metric/",
packages=setuptools.find_packages(),
py_modules=['plot_metric/functions'],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
|
[
"lereclus84L@gmail.com"
] |
lereclus84L@gmail.com
|
7d039a870ec27bb80f11107da20eb106f895ae64
|
49ca62eb4bdbe24aa09eaf51a42a5f5abe9c25a6
|
/noway/__init__.py
|
31a973a69d4347d35c52bb14253d0179f8d63d3e
|
[] |
no_license
|
emergencybutter/noway
|
6c2d49ba2192f338baf37ad993b50f14f0c14f3f
|
1ad0a16229b3c29de451a5505a29cf8ccaf35a7d
|
refs/heads/master
| 2020-04-05T18:03:16.311851
| 2018-11-11T22:14:26
| 2018-11-11T22:14:26
| 157,087,009
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 21
|
py
|
from .noway import *
|
[
"arnaud.cornet@gmail.com"
] |
arnaud.cornet@gmail.com
|
4bb90a6ca68e445eccebebed337a3cd0bead0c5d
|
8d44932bdd08424eed23e886525ca507e4267351
|
/back/api/urls.py
|
4cbd49cb0a38817f96b192032a63e7db79f88f77
|
[] |
no_license
|
Almanova/WebDevelopment-Project
|
df6ddf74d279744314f11555f71f992c48a44fab
|
80e0e0acb929b922597f9447c7543d7c10fff5e2
|
refs/heads/master
| 2023-05-13T07:24:18.052668
| 2020-04-26T17:04:13
| 2020-04-26T17:04:13
| 253,298,794
| 0
| 2
| null | 2023-05-10T07:00:54
| 2020-04-05T18:01:49
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 833
|
py
|
from django.urls import path
from api.views import views_cbv, views_fbv, views_auth
from rest_framework_jwt.views import obtain_jwt_token
urlpatterns = [
path('login/', obtain_jwt_token),
path('sections/', views_cbv.SectionsListAPIView.as_view()),
path('sections/<int:section_id>/topics/', views_fbv.topics_list),
path('sections/<int:section_id>/', views_fbv.section_details),
path('topics/<int:topic_id>/subtopics/', views_cbv.SubtopicsListAPIView.as_view()),
path('topics/<int:topic_id>/edit/', views_cbv.TopicDetailsAPIView.as_view()),
path('subtopics/<int:subtopic_id>/edit/', views_fbv.subtopic_details),
path('signup/', views_auth.sign_up),
path('users/<str:username>/', views_auth.get_user),
path('subtopics/', views_fbv.subtopics_list),
path('manager/', views_fbv.topics_count)
]
|
[
"almanovamadina@yahoo.com"
] |
almanovamadina@yahoo.com
|
25ae3799ea54378963bca701e93f18238e04cdca
|
ee162f79b7913c4434666f4b71e275e7310f7fb4
|
/efarmer/advertisements/admin.py
|
c2b95db5a508407ea41c7903de0429e25a223803
|
[] |
no_license
|
oskarsakol/eFarmer
|
a18c72723920b8497bbad29d9c1513cc3f353397
|
83943152471e35e07907a4fc5b2d469d1abbc2ba
|
refs/heads/master
| 2020-09-30T19:44:54.250216
| 2020-02-19T10:23:39
| 2020-02-19T10:23:39
| 227,359,460
| 4
| 0
| null | 2020-01-25T16:34:49
| 2019-12-11T12:20:39
|
Python
|
UTF-8
|
Python
| false
| false
| 104
|
py
|
from django.contrib import admin
from .models import Advertisement
admin.site.register(Advertisement)
|
[
"sakoloskar@gmail.com"
] |
sakoloskar@gmail.com
|
fcfa2095ffb3cfdbd3bdfa217f2ca38005c23e47
|
650c5dcf150820ac14d6fac3e10234a27a7827cf
|
/main/migrations/0063_alter_heroes_group.py
|
0a2358ced03c0e5f21871616d605b8583a552ca9
|
[] |
no_license
|
Nurik110/django-site
|
f5713e7f4e9f635b7b81591b7a04ae3754693d0e
|
746cd4eae523b93920505357305ccfecc51567e7
|
refs/heads/main
| 2023-06-15T04:34:33.719427
| 2021-06-28T11:26:21
| 2021-06-28T11:26:21
| 377,363,863
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 582
|
py
|
# Generated by Django 3.2.3 on 2021-06-25 17:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0062_auto_20210625_2312'),
]
operations = [
migrations.AlterField(
model_name='heroes',
name='group',
field=models.CharField(choices=[('fighter', 'fighter'), ('shooter', 'shooter'), ('support', 'support'), ('assassin', 'assassin'), ('magician', 'magician'), ('tank', 'tank')], default='fighter', max_length=20, verbose_name='Группа'),
),
]
|
[
"Naltynbek_1998@mail.ru"
] |
Naltynbek_1998@mail.ru
|
afd1dacedf308d638fdc5ded844094d8eccf879c
|
6df76f8a6fcdf444c3863e3788a2f4b2c539c22c
|
/django code/p69/manage.py
|
79b1d93f10fd7ae7bfe5d80fbfe7380a839f69cc
|
[] |
no_license
|
basantbhandari/DjangoProjectsAsDocs
|
068e4a704fade4a97e6c40353edb0a4299bd9678
|
594dbb560391eaf94bb6db6dc07702d127010b88
|
refs/heads/master
| 2022-12-18T22:33:23.902228
| 2020-09-22T13:11:01
| 2020-09-22T13:11:01
| 297,651,728
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 623
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'p69.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"36443209+basantbhandari@users.noreply.github.com"
] |
36443209+basantbhandari@users.noreply.github.com
|
2c1fdd4d765900888e77a4416496947d10c2917e
|
c61773a6ae76ae258589784ee02135c91d31b5bb
|
/cars/rental/migrations/0005_auto_20191127_1504.py
|
ea502731cf548175f627e39d144bbccf9c55ff1b
|
[] |
no_license
|
burbaljaka/cars_rental
|
437e53d3f72d2bf6c0201a46f83de6415ffd8e27
|
c83ffd60d9a48bc96b10fc1964c5ef0402a03bd5
|
refs/heads/master
| 2020-09-16T13:54:21.923668
| 2019-12-04T15:00:03
| 2019-12-04T15:00:03
| 223,789,750
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,018
|
py
|
# Generated by Django 2.2.7 on 2019-11-27 15:04
import datetime
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('rental', '0004_auto_20191127_1459'),
]
operations = [
migrations.AlterField(
model_name='car',
name='car_adding_date',
field=models.DateField(default=datetime.datetime(2019, 11, 27, 15, 4, 29, 824286)),
),
migrations.RemoveField(
model_name='loan',
name='loan_car',
),
migrations.AddField(
model_name='loan',
name='loan_car',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='rental.Car'),
),
migrations.AlterField(
model_name='loan',
name='loan_date_of_loan',
field=models.DateField(default=datetime.datetime(2019, 11, 27, 15, 4, 29, 825285)),
),
]
|
[
"kapitonov.timur@gmail.com"
] |
kapitonov.timur@gmail.com
|
a4136c31c7cf706db3e0de2db9535461a060f5fe
|
9945f91a1a677d8e8175dc33ec2a791bcdf7bc48
|
/ImbalancedClassificationMammographyMicrocalcification/mammographyLoadSplitEvaluateModel.py
|
7b930a7d28aad25f55dbca31abf7419a5407341f
|
[] |
no_license
|
AWhelan33/DataScience
|
0db84b2ae9194fb7efdbe1252ee507e30b811b14
|
439a244c6652bcea75d290cad8ca3c9e270c8f19
|
refs/heads/master
| 2021-01-09T16:25:35.338245
| 2020-03-28T18:18:22
| 2020-03-28T18:18:22
| 242,371,293
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,367
|
py
|
#test harness and baseline model evaluation
from collections import Counter
from numpy import mean
from numpy import std
from pandas import read_csv
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.dummy import DummyClassifier
#load the dataset
def load_dataset(full_path):
#load the dataset as a numpy array
data = read_csv(full_path, header=None)
#retrieve numpy array
data = data.values
#split into input and output elements
X, y = data[:, :-1], data[:, -1]
#label and encode the target variable to have the classes 0 and 1
y = LabelEncoder().fit_transform(y)
return X, y
#evalute a model
def evaluate_model(X, y, model):
#define evaluation procedure
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1)
#evaluate model
scores = cross_val_score(model, X, y, scoring='roc_auc', cv=cv, n_jobs=-1)
return scores
#define the location of the dataset
full_path = 'mammography.csv'
#load the dataset
X, y = load_dataset(full_path)
#summarize the loaded dataset
print(X.shape, y.shape, Counter(y))
#define the reference model
model = DummyClassifier(strategy='stratified')
#evaluate the model
scores = evaluate_model(X, y, model)
#summarize performance
print('Mean ROC AUC: %.3f (%.3f)' %(mean(scores), std(scores)))
|
[
"amiiwhelan@gmail.com"
] |
amiiwhelan@gmail.com
|
0cd9ff06bd48a6f67a5f13c9351fd7e7da1819c6
|
52e677932d7263d4ef6ea47f0a5f64829def290c
|
/customimg.py
|
658013d2c91b125c74e85ef2c1824bb98cb7f88c
|
[] |
no_license
|
ianmah/instasched.py
|
6a2cea4cf9151e4c46eeee016378fe40b397e308
|
0aff3622e16a015356db19563dbe4bca8c0ff600
|
refs/heads/master
| 2020-05-01T15:01:24.343790
| 2019-03-26T06:08:14
| 2019-03-26T06:08:14
| 177,535,826
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,263
|
py
|
from PIL import Image
import math
import io
from io import BytesIO
import requests
class Img:
__image = ''
def __init__(self, img):
if img[:4] == 'http':
imgGet = requests.get(img)
self.__image = Image.open(BytesIO(imgGet.content))
else:
self.__image = Image.open(img)
self.__image.load()
def crop(self):
height = self.__image.height
width = self.__image.width
mod_ratio = height/width-1.25
if mod_ratio > 0:
newheight = height-math.floor(height*mod_ratio)
hmid = height/2
bottom = hmid+newheight/2
top = hmid-newheight/2
area = (0, top, width, bottom)
self.__image = self.__image.crop(area)
def resize(self):
height = self.__image.height
width = self.__image.width
if height > 1080:
self.__image.thumbnail((1080, 1080), Image.ANTIALIAS)
def getImg(self):
self.crop()
self.resize()
return self.__image
def size(self):
return self.__image.size
def getByteArr(self):
imgByteArr = io.BytesIO()
self.__image.save(imgByteArr, format='JPEG')
return imgByteArr.getvalue()
|
[
"ianmmah@gmail.com"
] |
ianmmah@gmail.com
|
f866ff506707811bdf06b64d8846de512b2c0264
|
d63899ba9ce7f06841ce47b181bea34e1442f595
|
/learn-python/learn_python-ex10.py
|
a698f2bf074981cb6485934ba490563cd9e76d1b
|
[] |
no_license
|
tillyoswellwheeler/module-02_python-learning
|
ba419a0428cc0e6e3e3d8951a79bfba9c78891ae
|
a27a49d050d6526df3db93992f03b7157d8de5e1
|
refs/heads/master
| 2022-12-25T04:32:59.468417
| 2021-09-04T14:10:30
| 2021-09-04T14:10:30
| 169,213,767
| 0
| 0
| null | 2021-09-04T14:11:10
| 2019-02-05T09:05:14
|
CSS
|
UTF-8
|
Python
| false
| false
| 359
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 29 18:56:00 2018
@author: 612383362
"""
tabby_cat = "/tI'm tabbed in."
persian_cat = "I'm split\non a line"
backslash_cat = "I'm \\ a \\ cat."
#fat_cat = """"
#I'll do a list:
#\t* Cat food
#\t* Fishies
#\t* Catnip\n\t* Grass
#""""
print(tabby_cat)
print(persian_cat)
print(backslash_cat)
#print(fat_cat)
|
[
"tilly.oswellwheeler@hotmail.com"
] |
tilly.oswellwheeler@hotmail.com
|
d77fcded26296c4d07bfe63f33e2846953955337
|
7860ed6d27512c4601400f89c70c6ccbf654ff99
|
/claritick/qbuilder/management/commands/temps_rendu.py
|
7ba912aab3c9b74f998eb649683160d7549ccc36
|
[] |
no_license
|
zehome/claritick
|
a7e4ed39e535163bc54e58e9611b84122de298c6
|
69290d639df55aba6f17526c97868c2238cd962f
|
refs/heads/master
| 2020-06-03T05:03:22.433046
| 2014-02-07T23:20:37
| 2014-02-07T23:20:37
| 5,178,975
| 0
| 1
| null | 2022-10-26T08:16:16
| 2012-07-25T12:49:27
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 3,901
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand
from django.db import connection
from django.conf import settings
from optparse import make_option
import logging
import time
import datetime
import traceback
from tat.models import MemorisationTempsRendu, ModeleEvenement
logger = logging.getLogger("qbuilder.temps_rendu")
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option("--start",
dest="start_date_string",
help=u"Calculer les temps de rendu à partir de cette date (YYYY-mm-dd)",
),
make_option("--end",
dest="end_date_string",
help=u"Calculer les temps de rendu jusqu'à cette date (YYYY-mm-dd)",
),
)
def handle(self, *args, **kwargs):
allowed_hour = getattr(settings, 'QB_TEMPS_RENDU_HOUR', 1)
actual_hour = datetime.datetime.now().hour
if actual_hour != allowed_hour:
logger.warning(u"Only allowed to run at %s and it is %s", allowed_hour, actual_hour)
return
start_date = datetime.date.today() - datetime.timedelta(days = 1)
end_date = datetime.date.today()
start_date_string = kwargs.pop('start_date_string')
end_date_string = kwargs.pop('end_date_string')
if start_date_string:
start_date = datetime.datetime.strptime(start_date_string,'%Y-%m-%d')
if end_date_string:
end_date = datetime.datetime.strptime(end_date_string,'%Y-%m-%d')
SQL = """
SELECT
series.date,
mca3.vstats_tat.start_evt,
mca3.vstats_tat.end_evt,
mca3.vstats_tat.start,
mca3.vstats_tat."end",
AVG(mca3.vstats_tat.temps_reel),
MIN(mca3.vstats_tat.temps_reel),
MAX(mca3.vstats_tat.temps_reel),
COUNT(mca3.vstats_tat.temps_reel)
FROM mca3.vstats_tat,
(select start, "end" from mca3.delai) as const,
(select generate_series(%s, %s, '1 day'::interval) as date) as series
WHERE mca3.vstats_tat.end_date > series.date AND
mca3.vstats_tat.end_date < series.date + '1 day'::interval AND
mca3.vstats_tat.start = const.start AND
mca3.vstats_tat."end" = const."end" AND
mca3.vstats_tat.temps_reel > '0'::interval AND
mca3.vstats_tat.start != mca3.vstats_tat."end"
GROUP BY series.date,
mca3.vstats_tat.start_evt,
mca3.vstats_tat.end_evt,
mca3.vstats_tat.start,
mca3.vstats_tat."end"
"""
logger.info("Calcul des temps de rendu (%s - %s)", start_date, end_date,)
modeles = ModeleEvenement.objects.all()
modeles = dict([(modele.pk, modele) for modele in modeles])
now = time.time()
cursor = connection.cursor()
try:
cursor.execute(SQL, [start_date, end_date,])
except:
logger.error("Erreur d'exécution : %s" % (traceback.format_exc(),))
else:
logger.info("Temps d'exécution : %s" % (time.time() - now,))
for dataline in cursor.fetchall():
stev = modeles[dataline[3]]
edev = modeles[dataline[4]]
MemorisationTempsRendu.objects.create(
date = dataline[0],
start_evt = stev.evenement,
end_evt = edev.evenement,
start = stev,
end = edev,
avg_temps_rendu = dataline[5],
min_temps_rendu = dataline[6],
max_temps_rendu = dataline[7],
compte = dataline[8]
)
|
[
"gl@clarisys.fr"
] |
gl@clarisys.fr
|
524fcd5377baf5b11c259061610ad7632b9376d3
|
14e941b0d3b3b754c1b1476981b1f7a6a00804c8
|
/3raEntrega/Prototipo 6/GenGraphic1.py
|
18a60988623a9fd3ae215d9658476aad33d47e14
|
[] |
no_license
|
JoshuaMeza/CodePain_PE
|
ca8b4eea8ef0e4d39b1b8d2d540860267bb21189
|
b30cf36286b8709c20577ead8b5fb36f7a14a1db
|
refs/heads/master
| 2022-10-15T00:21:49.819120
| 2020-06-09T23:17:01
| 2020-06-09T23:17:01
| 256,617,299
| 0
| 1
| null | 2020-04-20T21:58:51
| 2020-04-17T21:52:22
|
C
|
UTF-8
|
Python
| false
| false
| 1,246
|
py
|
"""
Author Joshua Meza, Jonathan Gómez, and Irving Poot
Date 20/05/2020
Version 1.0.0
Program who generates the confirmed cases vs actual deaths graphic.
"""
from matplotlib import pyplot
def genGraphic1(casesAmount,deathsAmount):
"""
Function that generates the first graphic
Args:
casesAmount (int): Total amount of confirmed cases
deathsAmount (int): Total amount of deathsths
parts (list): List of the two titles of the graphic
slices (list): Parts of the graphic
colors (list): Colors that the graphic will use
values (list): Values with which size is defined
Returns:
Nothing
"""
if casesAmount[0]!=0 and deathsAmount[0]!=0:
parts = ('Confirmed Cases', 'Deaths')
slices = (casesAmount[0], deathsAmount[0])
colors = ('green', 'red')
values = (0.1, 0)
pyplot.rcParams['toolbar'] = 'None'
_, _, text = pyplot.pie(slices, colors = colors, labels = parts, autopct='%1.1f%%', explode=values, startangle = 90)
for tex in text:
tex.set_color('white')
pyplot.axis('equal')
pyplot.title('Graph of the data collected from the country')
pyplot.show()
|
[
"56287951+JoshuaMeza@users.noreply.github.com"
] |
56287951+JoshuaMeza@users.noreply.github.com
|
20ca5110f2ffd78b936237416ee7819f2352fa64
|
869d300c764911d468f5b1e5a98e5d07a27535ab
|
/python-basics/com/learn/David.py
|
77f3346b022c12ee9af0b39c59d63b8154c782ed
|
[] |
no_license
|
thananauto/python-test-frameworks
|
df5962996cd9c4cded9355fef6cb2a099c69e3b1
|
abaf9d11d8c65f2cd9f916b241898ad11e26bf43
|
refs/heads/master
| 2022-12-15T18:14:57.745930
| 2019-12-16T13:51:04
| 2019-12-16T13:51:04
| 228,375,228
| 0
| 0
| null | 2022-09-16T18:15:48
| 2019-12-16T11:47:11
|
Python
|
UTF-8
|
Python
| false
| false
| 257
|
py
|
from com.learn.Employee import Employee
from com.learn.Cars import Cars
class David(Employee, Cars):
def __init__(self, name , salary):
print('Calling the constrcutor')
def displayChildmethod(self):
print('Print calling the child methods')
|
[
"r.thananjayan@superp.nl"
] |
r.thananjayan@superp.nl
|
c5e0bd194a89e25927fb4e9f61e1b29307bb8af1
|
b19dfd6a3ba5d107d110fb936de2e91d1d92bb99
|
/venv/lib/python3.7/site-packages/Satchmo-0.9.3-py3.7.egg/satchmo_ext/productratings/listeners.py
|
a8498650d43e8f6a837bc7a63cdc45bc05f17ba3
|
[] |
no_license
|
siddhant3030/djangoecommerce
|
d8f5b21f29d17d2979b073fd9389badafc993b5c
|
b067cb1155c778fece4634d0a98631a0646dacff
|
refs/heads/master
| 2022-12-13T15:28:39.229377
| 2019-09-28T10:30:02
| 2019-09-28T10:30:02
| 207,240,716
| 2
| 1
| null | 2022-12-11T01:34:25
| 2019-09-09T06:35:36
|
Python
|
UTF-8
|
Python
| false
| false
| 3,257
|
py
|
"""Utility functions used by signals to attach Ratings to Comments"""
import logging
from django.contrib.sites.models import Site
from django.utils.encoding import smart_str
from django.conf import settings
try:
from django.core.urlresolvers import reverse
except ImportError:
from django.urls import reverse
try:
from django.contrib.comments.models import Comment
except ImportError:
from django_comments.models import Comment
from livesettings.functions import config_value
from product.models import Product
from satchmo_utils import url_join
from .models import ProductRating
log = logging.getLogger('productratings')
def save_rating(comment=None, request=None, **kwargs):
"""Create a rating and save with the comment"""
# should always be true
if request.method != "POST":
return
data = request.POST.copy()
if 'rating' not in data:
return
raw = data['rating']
try:
rating = int(raw)
except ValueError:
log.error('Could not parse rating from posted rating: %s', raw)
return
if comment.content_type.app_label == "product" and comment.content_type.model == "product":
ProductRating.objects.update_or_create(comment=comment, defaults = {'rating': rating})
else:
log.debug('Not saving rating for comment on a %s object', comment.content_type.model)
def one_rating_per_product(comment=None, request=None, **kwargs):
site = Site.objects.get_current()
product_ratings = ProductRating.objects.rated_products()
product_ratings = product_ratings.filter(comment__object_pk=comment.object_pk, comment__site=site,
comment__user=request.user).exclude(comment__pk=comment.pk).distinct()
for product_rating in product_ratings:
product_rating.comment.delete()
def check_with_akismet(comment=None, request=None, **kwargs):
if config_value("PRODUCT", "AKISMET_ENABLE"):
akismet_key = config_value("PRODUCT", "AKISMET_KEY")
if akismet_key:
site = Site.objects.get_current()
shop = reverse('satchmo_shop_home')
from akismet import Akismet
akismet = Akismet(
key=akismet_key,
blog_url='http://%s' % url_join(site.domain, shop))
if akismet.verify_key():
akismet_data = { 'comment_type': 'comment',
'referrer': request.META.get('HTTP_REFERER', ""),
'user_ip': comment.ip_address,
'user_agent': '' }
if akismet.comment_check(smart_str(comment.comment), data=akismet_data, build_data=True):
comment.is_public=False
comment.save()
log.info("Akismet marked comment #%i as spam", comment.id)
else:
log.debug("Akismet accepted comment #%i", comment.id)
else:
log.warn("Akismet key '%s' not accepted by akismet service.", akismet_key)
else:
log.info("Akismet enabled, but no key found. Please put in your admin settings.")
|
[
"ssiddhant3030@gmail.com"
] |
ssiddhant3030@gmail.com
|
ac4b6fc41af7dbfbb6de4811b8f291b765b914d1
|
96f179d4d8d8cd7eefa2c8b2d85dbf67e1e82434
|
/test/infrastructuration/print_components/formatted_text.py
|
b172a2f110b30808c42d21443f62d46c361f67a8
|
[
"MIT"
] |
permissive
|
Zhouhao12345/redcli
|
e7687f00638a52799e53fe6fede2ae50db635476
|
8a8260b0799e8524d0c339df8dfe6bcfb22f1841
|
refs/heads/master
| 2021-10-06T13:03:57.752655
| 2021-09-28T06:51:04
| 2021-09-28T06:51:04
| 224,829,460
| 6
| 1
| null | 2021-03-29T21:12:41
| 2019-11-29T10:14:04
|
Python
|
UTF-8
|
Python
| false
| false
| 2,233
|
py
|
import unittest
from pygments.token import Token
from redcli.infrastructuration.print_components.constant import PrintType
from redcli.infrastructuration.print_components.base import (
BasePrintComponents,
FormattedTextFactory,
)
class FormattedText(unittest.TestCase):
def test_1_formatted_plain_text(self):
type_ = PrintType.PLAIN_TEXT
formatted_cls: BasePrintComponents = FormattedTextFactory().produce(type_)
ins = formatted_cls()
ins.type_context(context="Hello World")
ins.to_print_formatted_text()
def test_2_formatted_ansi_text(self):
type_ = PrintType.FORMATTED_TEXT_ANSI
formatted_cls: BasePrintComponents = FormattedTextFactory().produce(type_)
ins = formatted_cls()
ins.type_context(context="\x1b[31mhello \x1b[32mworld")
ins.to_print_formatted_text()
def test_3_formatted_html_text(self):
type_ = PrintType.FORMATTED_TEXT_HTML
formatted_cls: BasePrintComponents = FormattedTextFactory().produce(type_)
ins = formatted_cls()
ins.type_context(context="<a url='www.baidu.com'>hello world</a>")
style_dict = {
"a": "#44ff00 italic"
}
ins.wrapper_style(style_dict=style_dict)
ins.to_print_formatted_text()
def test_4_formatted_token_text(self):
type_ = PrintType.FORMATTED_TEXT_TOKEN_TEXT
formatted_cls: BasePrintComponents = FormattedTextFactory().produce(type_)
ins = formatted_cls()
contexts = ("hello", "world")
ins.type_context(context=contexts)
ins.wrapper_style(style_dict=(Token.Keyword, Token.Punctuation))
ins.to_print_formatted_text()
def test_5_formatted_style_text(self):
type_ = PrintType.FORMATTED_TEXT_STYLE_TEXT
formatted_cls: BasePrintComponents = FormattedTextFactory().produce(type_)
ins = formatted_cls()
contexts = [
("class:a", "hello"),
("class:b", "world"),
]
ins.type_context(context=contexts)
style_dict = {
"a": "#ff0066",
"b": "#ff0066",
}
ins.wrapper_style(style_dict=style_dict)
ins.to_print_formatted_text()
|
[
"alex.zhou@gllue.com"
] |
alex.zhou@gllue.com
|
8dda670354d9119e5b7bb22b625403795b8035c8
|
c587ac33cbe9496f53e085273d2b18cf744961a6
|
/jupyterhub/files/jupyterhub_config.py
|
a392940f9c91ce39910a7060fc13763a42f733ca
|
[
"MIT"
] |
permissive
|
nathanhilbert/ScienceAnsible
|
606b9157a657c99e6f8c9b3e6453284f976afe21
|
cc12ebf153b8d186e31e6c1361fb024561a35759
|
refs/heads/master
| 2021-01-17T18:30:50.311788
| 2016-10-20T13:58:44
| 2016-10-20T13:58:44
| 71,468,400
| 0
| 0
| null | 2016-10-20T13:58:44
| 2016-10-20T13:54:40
|
Python
|
UTF-8
|
Python
| false
| false
| 14,815
|
py
|
# Configuration file for jupyterhub.
#------------------------------------------------------------------------------
# Configurable configuration
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# LoggingConfigurable configuration
#------------------------------------------------------------------------------
# A parent class for Configurables that log.
#
# Subclasses have a log trait, and the default behavior is to get the logger
# from the currently running Application.
#------------------------------------------------------------------------------
# SingletonConfigurable configuration
#------------------------------------------------------------------------------
# A configurable that only allows one instance.
#
# This class is for classes that should only have one instance of itself or
# *any* subclass. To create and retrieve such a class use the
# :meth:`SingletonConfigurable.instance` method.
#------------------------------------------------------------------------------
# Application configuration
#------------------------------------------------------------------------------
# This is an application.
# The date format used by logging formatters for %(asctime)s
# c.Application.log_datefmt = '%Y-%m-%d %H:%M:%S'
# The Logging format template
# c.Application.log_format = '[%(name)s]%(highlevel)s %(message)s'
# Set the log level by value or name.
# c.Application.log_level = 30
#------------------------------------------------------------------------------
# JupyterHub configuration
#------------------------------------------------------------------------------
# An Application for starting a Multi-User Jupyter Notebook server.
# Grant admin users permission to access single-user servers.
#
# Users should be properly informed if this is enabled.
# c.JupyterHub.admin_access = False
# DEPRECATED, use Authenticator.admin_users instead.
# c.JupyterHub.admin_users = set()
# Answer yes to any questions (e.g. confirm overwrite)
# c.JupyterHub.answer_yes = False
# Dict of token:username to be loaded into the database.
#
# Allows ahead-of-time generation of API tokens for use by services.
# c.JupyterHub.api_tokens = {}
# Class for authenticating users.
#
# This should be a class with the following form:
#
# - constructor takes one kwarg: `config`, the IPython config object.
#
# - is a tornado.gen.coroutine
# - returns username on success, None on failure
# - takes two arguments: (handler, data),
# where `handler` is the calling web.RequestHandler,
# and `data` is the POST form data from the login page.
# c.JupyterHub.authenticator_class = 'jupyterhub.auth.PAMAuthenticator'
# The base URL of the entire application
# c.JupyterHub.base_url = '/'
# Whether to shutdown the proxy when the Hub shuts down.
#
# Disable if you want to be able to teardown the Hub while leaving the proxy
# running.
#
# Only valid if the proxy was starting by the Hub process.
#
# If both this and cleanup_servers are False, sending SIGINT to the Hub will
# only shutdown the Hub, leaving everything else running.
#
# The Hub should be able to resume from database state.
# c.JupyterHub.cleanup_proxy = True
# Whether to shutdown single-user servers when the Hub shuts down.
#
# Disable if you want to be able to teardown the Hub while leaving the single-
# user servers running.
#
# If both this and cleanup_proxy are False, sending SIGINT to the Hub will only
# shutdown the Hub, leaving everything else running.
#
# The Hub should be able to resume from database state.
# c.JupyterHub.cleanup_servers = True
# The config file to load
# c.JupyterHub.config_file = 'jupyterhub_config.py'
# Confirm that JupyterHub should be run without SSL. This is **NOT RECOMMENDED**
# unless SSL termination is being handled by another layer.
# c.JupyterHub.confirm_no_ssl = True
# Number of days for a login cookie to be valid. Default is two weeks.
# c.JupyterHub.cookie_max_age_days = 14
# The cookie secret to use to encrypt cookies.
#
# Loaded from the JPY_COOKIE_SECRET env variable by default.
# c.JupyterHub.cookie_secret = b''
# File in which to store the cookie secret.
# c.JupyterHub.cookie_secret_file = 'jupyterhub_cookie_secret'
# The location of jupyterhub data files (e.g. /usr/local/share/jupyter/hub)
# c.JupyterHub.data_files_path = '/usr/local/share/jupyter/hub'
# Include any kwargs to pass to the database connection. See
# sqlalchemy.create_engine for details.
# c.JupyterHub.db_kwargs = {}
# url for the database. e.g. `sqlite:///jupyterhub.sqlite`
# c.JupyterHub.db_url = 'sqlite:///jupyterhub.sqlite'
# log all database transactions. This has A LOT of output
# c.JupyterHub.debug_db = False
# show debug output in configurable-http-proxy
# c.JupyterHub.debug_proxy = False
# Send JupyterHub's logs to this file.
#
# This will *only* include the logs of the Hub itself, not the logs of the proxy
# or any single-user servers.
# c.JupyterHub.extra_log_file = ''
# Extra log handlers to set on JupyterHub logger
# c.JupyterHub.extra_log_handlers = []
# Generate default config file
# c.JupyterHub.generate_config = False
# The ip for this process
# c.JupyterHub.hub_ip = '127.0.0.1'
# The port for this process
# c.JupyterHub.hub_port = 8081
# The prefix for the hub server. Must not be '/'
# c.JupyterHub.hub_prefix = '/hub/'
# The public facing ip of the whole application (the proxy)
c.JupyterHub.ip = '0.0.0.0'
# Supply extra arguments that will be passed to Jinja environment.
# c.JupyterHub.jinja_environment_options = {}
# Interval (in seconds) at which to update last-activity timestamps.
# c.JupyterHub.last_activity_interval = 300
# Specify path to a logo image to override the Jupyter logo in the banner.
# c.JupyterHub.logo_file = ''
# File to write PID Useful for daemonizing jupyterhub.
# c.JupyterHub.pid_file = ''
# The public facing port of the proxy
# c.JupyterHub.port = 8000
# The ip for the proxy API handlers
# c.JupyterHub.proxy_api_ip = '127.0.0.1'
# The port for the proxy API handlers
# c.JupyterHub.proxy_api_port = 0
# The Proxy Auth token.
#
# Loaded from the CONFIGPROXY_AUTH_TOKEN env variable by default.
# c.JupyterHub.proxy_auth_token = ''
# Interval (in seconds) at which to check if the proxy is running.
# c.JupyterHub.proxy_check_interval = 30
# The command to start the http proxy.
#
# Only override if configurable-http-proxy is not on your PATH
# c.JupyterHub.proxy_cmd = ['configurable-http-proxy']
# Purge and reset the database.
# c.JupyterHub.reset_db = False
# The class to use for spawning single-user servers.
#
# Should be a subclass of Spawner.
# c.JupyterHub.spawner_class = 'jupyterhub.spawner.LocalProcessSpawner'
# Path to SSL certificate file for the public facing interface of the proxy
#
# Use with ssl_key
# c.JupyterHub.ssl_cert = ''
# Path to SSL key file for the public facing interface of the proxy
#
# Use with ssl_cert
# c.JupyterHub.ssl_key = ''
# Host to send statds metrics to
# c.JupyterHub.statsd_host = ''
# Port on which to send statsd metrics about the hub
# c.JupyterHub.statsd_port = 8125
# Prefix to use for all metrics sent by jupyterhub to statsd
# c.JupyterHub.statsd_prefix = 'jupyterhub'
# Run single-user servers on subdomains of this host.
#
# This should be the full https://hub.domain.tld[:port]
#
# Provides additional cross-site protections for javascript served by single-
# user servers.
#
# Requires <username>.hub.domain.tld to resolve to the same host as
# hub.domain.tld.
#
# In general, this is most easily achieved with wildcard DNS.
#
# When using SSL (i.e. always) this also requires a wildcard SSL certificate.
# c.JupyterHub.subdomain_host = ''
# Paths to search for jinja templates.
# c.JupyterHub.template_paths = []
# Extra settings overrides to pass to the tornado application.
# c.JupyterHub.tornado_settings = {}
#------------------------------------------------------------------------------
# Spawner configuration
#------------------------------------------------------------------------------
# Base class for spawning single-user notebook servers.
#
# Subclass this, and override the following methods:
#
# - load_state - get_state - start - stop - poll
# Extra arguments to be passed to the single-user server
# c.Spawner.args = []
# The command used for starting notebooks.
# c.Spawner.cmd = ['jupyterhub-singleuser']
# Enable debug-logging of the single-user server
# c.Spawner.debug = False
# The default URL for the single-user server.
#
# Can be used in conjunction with --notebook-dir=/ to enable full filesystem
# traversal, while preserving user's homedir as landing page for notebook
#
# `%U` will be expanded to the user's username
# c.Spawner.default_url = ''
# Disable per-user configuration of single-user servers.
#
# This prevents any config in users' $HOME directories from having an effect on
# their server.
# c.Spawner.disable_user_config = False
# Whitelist of environment variables for the subprocess to inherit
# c.Spawner.env_keep = ['PATH', 'PYTHONPATH', 'CONDA_ROOT', 'CONDA_DEFAULT_ENV', 'VIRTUAL_ENV', 'LANG', 'LC_ALL']
# Environment variables to load for the Spawner.
#
# Value could be a string or a callable. If it is a callable, it will be called
# with one parameter, which will be the instance of the spawner in use. It
# should quickly (without doing much blocking operations) return a string that
# will be used as the value for the environment variable.
# c.Spawner.environment = {}
# Timeout (in seconds) before giving up on a spawned HTTP server
#
# Once a server has successfully been spawned, this is the amount of time we
# wait before assuming that the server is unable to accept connections.
# c.Spawner.http_timeout = 30
# The IP address (or hostname) the single-user server should listen on
# c.Spawner.ip = '127.0.0.1'
# The notebook directory for the single-user server
#
# `~` will be expanded to the user's home directory `%U` will be expanded to the
# user's username
# c.Spawner.notebook_dir = '/vagrant/shares/homeshare'
# An HTML form for options a user can specify on launching their server. The
# surrounding `<form>` element and the submit button are already provided.
#
# For example:
#
# Set your key:
# <input name="key" val="default_key"></input>
# <br>
# Choose a letter:
# <select name="letter" multiple="true">
# <option value="A">The letter A</option>
# <option value="B">The letter B</option>
# </select>
# c.Spawner.options_form = ''
# Interval (in seconds) on which to poll the spawner.
# c.Spawner.poll_interval = 30
# Timeout (in seconds) before giving up on the spawner.
#
# This is the timeout for start to return, not the timeout for the server to
# respond. Callers of spawner.start will assume that startup has failed if it
# takes longer than this. start should return when the server process is started
# and its location is known.
# c.Spawner.start_timeout = 60
#------------------------------------------------------------------------------
# LocalProcessSpawner configuration
#------------------------------------------------------------------------------
# A Spawner that just uses Popen to start local processes as users.
#
# Requires users to exist on the local system.
#
# This is the default spawner for JupyterHub.
# Seconds to wait for process to halt after SIGINT before proceeding to SIGTERM
# c.LocalProcessSpawner.INTERRUPT_TIMEOUT = 10
# Seconds to wait for process to halt after SIGKILL before giving up
# c.LocalProcessSpawner.KILL_TIMEOUT = 5
# Seconds to wait for process to halt after SIGTERM before proceeding to SIGKILL
# c.LocalProcessSpawner.TERM_TIMEOUT = 5
#------------------------------------------------------------------------------
# Authenticator configuration
#------------------------------------------------------------------------------
# A class for authentication.
#
# The primary API is one method, `authenticate`, a tornado coroutine for
# authenticating users.
# set of usernames of admin users
#
# If unspecified, only the user that launches the server will be admin.
# c.Authenticator.admin_users = set()
# Dictionary mapping authenticator usernames to JupyterHub users.
#
# Can be used to map OAuth service names to local users, for instance.
#
# Used in normalize_username.
# c.Authenticator.username_map = {}
# Regular expression pattern for validating usernames.
#
# If not defined: allow any username.
# c.Authenticator.username_pattern = ''
# Username whitelist.
#
# Use this to restrict which users can login. If empty, allow any user to
# attempt login.
# c.Authenticator.whitelist = set()
#------------------------------------------------------------------------------
# LocalAuthenticator configuration
#------------------------------------------------------------------------------
# Base class for Authenticators that work with local Linux/UNIX users
#
# Checks for local users, and can attempt to create them if they exist.
# The command to use for creating users as a list of strings.
#
# For each element in the list, the string USERNAME will be replaced with the
# user's username. The username will also be appended as the final argument.
#
# For Linux, the default value is:
#
# ['adduser', '-q', '--gecos', '""', '--disabled-password']
#
# To specify a custom home directory, set this to:
#
# ['adduser', '-q', '--gecos', '""', '--home', '/customhome/USERNAME',
# '--disabled-password']
#
# This will run the command:
#
# adduser -q --gecos "" --home /customhome/river --disabled-password river
#
# when the user 'river' is created.
# c.LocalAuthenticator.add_user_cmd = []
# If a user is added that doesn't exist on the system, should I try to create
# the system user?
# c.LocalAuthenticator.create_system_users = False
# Automatically whitelist anyone in this group.
# c.LocalAuthenticator.group_whitelist = set()
#------------------------------------------------------------------------------
# PAMAuthenticator configuration
#------------------------------------------------------------------------------
# Authenticate local Linux/UNIX users with PAM
# The encoding to use for PAM
# c.PAMAuthenticator.encoding = 'utf8'
# Whether to open PAM sessions when spawners are started.
#
# This may trigger things like mounting shared filsystems, loading credentials,
# etc. depending on system configuration, but it does not always work.
#
# It can be disabled with::
#
# c.PAMAuthenticator.open_sessions = False
# c.PAMAuthenticator.open_sessions = True
# The PAM service to use for authentication.
# c.PAMAuthenticator.service = 'login'
c.Authenticator.admin_users = {'jupyter'}
c.LocalAuthenticator.create_system_users = True
|
[
"nathanhilbert@gmail.com"
] |
nathanhilbert@gmail.com
|
386b0f81ec45566dff962f1a32e176f4dffe47c5
|
747de538fb8ffc535a5d107cb5852c885da451b9
|
/tests.py
|
a39763e692dfb6cc2f993799b8207ea7e8561a3c
|
[] |
no_license
|
NilVidalRafols/iGNNspector
|
0a608ff2aa6a003a6ae22105e286db6023841bcb
|
3d8742e20a5df0606fe201440fe4c382350f68a6
|
refs/heads/main
| 2023-06-05T09:51:00.840402
| 2021-06-30T03:43:57
| 2021-06-30T03:43:57
| 346,396,589
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 160
|
py
|
import sys
from ignnspector import Graph
# from tests import custom_studies_test
# from tests import time_test
from tests import pyg_builder_test as b
b.main()
|
[
"NilVidalRafols@github.com"
] |
NilVidalRafols@github.com
|
edf4eb25316ca438a6a3f65240a0e6680a163702
|
9958136f29a0c80c7ab64370b987e3ecb4bc178b
|
/other_tasks/GümüşBar.py
|
915c159143efc8a313511dd5f3abb8652e1bfd33
|
[] |
no_license
|
yasinalp/MMOPytautoGUI
|
9c8f06bbb707d8c43b2469ea46f85c7dedca324c
|
ec0c321ba6bbbe77420b7612c5166f0cab508cd1
|
refs/heads/master
| 2022-10-22T07:00:05.216298
| 2020-06-09T13:01:55
| 2020-06-09T13:01:55
| 270,980,799
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,041
|
py
|
import pyautogui
import time
rgb = pyautogui.pixelMatchesColor(31, 615, (0, 55, 132), tolerance=5)
while rgb == True:
pyautogui.hotkey('ctrl', 'win', '1')
time.sleep(1)
rgb = pyautogui.pixelMatchesColor(31, 615, (0, 55, 132), tolerance=5)
time.sleep(1)
pyautogui.moveTo(1250, 400) # Çanta2
time.sleep(0.1)
pyautogui.click(1250, 400)
time.sleep(0.1)
pyautogui.moveTo(1210, 400) # Çanta1
time.sleep(0.1)
pyautogui.click(1210, 400)
gumusbar = pyautogui.locateCenterOnScreen('C:/Python34/Ticaret/100Mbar.png',region=(1130, 385, 236, 360))
pyautogui.moveTo(gumusbar)
time.sleep(0.1)
pyautogui.click(gumusbar)
gumusbar = pyautogui.locateCenterOnScreen('C:/Python34/Ticaret/100Mbar.png',region=(1130, 385, 236, 360))
while gumusbar != None:
gbx, gby = gumusbar
pyautogui.moveTo(gumusbar)
time.sleep(0.1)
pyautogui.click(gbx, gby)
time.sleep(0.2)
gumusbar = pyautogui.locateCenterOnScreen('C:/Python34/Ticaret/100Mbar.png',region=(1130, 385, 236, 360))
pyautogui.click(gbx, gby)
|
[
"noreply@github.com"
] |
yasinalp.noreply@github.com
|
19b4fb325defbf324d6a956531086279389ef8f2
|
7508021d39ddb94dd74fb07106b4a649de848ec0
|
/member/migrations_old/0007_auto__add_field_person_other_club__add_field_person_nfb_membership__ch.py
|
2ea1fa84772e6a3cf493de2a73562e4ba8aa20a4
|
[] |
no_license
|
cschaffner/crunchsite
|
36c97a22a27c85a5e9450a90c57c5099f00db5bd
|
209b1f528d14e6cbf7a3bd1a60c253fb5ecfdc40
|
refs/heads/master
| 2021-01-01T18:06:27.873728
| 2016-06-22T20:32:32
| 2016-06-22T20:32:32
| 21,440,344
| 0
| 0
| null | 2015-04-15T19:48:42
| 2014-07-02T21:03:28
|
Python
|
UTF-8
|
Python
| false
| false
| 8,392
|
py
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Person.other_club'
db.add_column(u'member_person', 'other_club',
self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True),
keep_default=False)
# Adding field 'Person.nfb_membership'
db.add_column(u'member_person', 'nfb_membership',
self.gf('django.db.models.fields.NullBooleanField')(default=None, null=True, blank=True),
keep_default=False)
# Changing field 'Person.phone'
db.alter_column(u'member_person', 'phone', self.gf('django.db.models.fields.CharField')(max_length=40, null=True))
def backwards(self, orm):
# Deleting field 'Person.other_club'
db.delete_column(u'member_person', 'other_club')
# Deleting field 'Person.nfb_membership'
db.delete_column(u'member_person', 'nfb_membership')
# Changing field 'Person.phone'
db.alter_column(u'member_person', 'phone', self.gf('django.db.models.fields.CharField')(max_length=20, null=True))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'member.memberjob': {
'Meta': {'object_name': 'MemberJob'},
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'job': ('django.db.models.fields.CharField', [], {'default': "'1PL'", 'max_length': '3'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'jobs'", 'to': u"orm['member.Person']"}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
u'member.person': {
'Meta': {'ordering': "['last_name']", 'object_name': 'Person'},
'account_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'birthday': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'citizenship': ('django_countries.fields.CountryField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'discount': ('django.db.models.fields.CharField', [], {'default': "'1NO'", 'max_length': '3'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'gender': ('django.db.models.fields.CharField', [], {'default': "'1M'", 'max_length': '2'}),
'house_number': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'house_number_extension': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'iban': ('django_iban.fields.IBANField', [], {'max_length': '34', 'null': 'True', 'blank': 'True'}),
'iban_authorisation': ('django.db.models.fields.CharField', [], {'default': "'1OK'", 'max_length': '3'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'nfb_membership': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'other_club': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'playing_level': ('django.db.models.fields.CharField', [], {'default': "'1DN'", 'max_length': '3'}),
'preposition': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'street': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'profile'", 'unique': 'True', 'null': 'True', 'to': u"orm['auth.User']"}),
'zip_code': ('django.db.models.fields.CharField', [], {'max_length': '7', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['member']
|
[
"huebli@gmail.com"
] |
huebli@gmail.com
|
0304faa382e20e28438cce8b9ede88b3c4392faf
|
0c55e0094bf127d7753765115720a180cc01f60f
|
/pages/process.py
|
ed1679f32789e9d4806f9d1206f3401ebbfdb061
|
[
"MIT"
] |
permissive
|
TimTree/vgsales-project
|
bbfb4f13c6acc9125e7656f68a8e3a7ea31c63e5
|
5a6452754dab419d6e9a0bfc4c8df0b9240b6e6c
|
refs/heads/master
| 2021-06-26T20:14:48.334627
| 2019-11-01T21:34:18
| 2019-11-01T21:34:18
| 216,612,469
| 0
| 0
|
MIT
| 2021-03-20T02:07:07
| 2019-10-21T16:17:27
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 3,971
|
py
|
import dash
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
from app import app
column1 = dbc.Col(
[
dcc.Markdown(
"""
## Process
This web app is an assignment from Lambda School's Data Science program. The assignment asked to predict something given a dataset.
In my case, I chose to predict video game sales.
### The Dataset
In order to predict anything in data science, you need features - that is, ways we can influence the prediction. For instance, game genre is a feature because that could possibly impact game sales. As such, I needed a decent dataset with enough features.
I found the needed dataset from this [Reddit post](https://old.reddit.com/r/datasets/comments/bco2rd/video_games_sales_2019_dataset/), which linked to it on Kaggle, a dataset-sharing website. The dataset contained over 50,000 video games, with game sales and relevant characteristics like genre, ESRB Rating, and critic scores.
### Cleaning the Dataset
I loaded the dataset with Pandas, a python library that handles datasets. Here's a sample output of the dataset:
"""
),
html.Img(src='assets/InitialDataset.png', className='img-fluid'),
dcc.Markdown(
"""
As you can see, there's a lot of data here, and some are irrelevant and/or need cleaning. For instance, `basename` is pretty much the same as `Name`. And while we have game sales, some are expressed in the column `Total_Shipped`, while others divide sales into various regions (`NA_Sales`, `EU_Sales`, `JP_Sales`).
With some python magic, I removed completely irrelevant columns and merged total game sales in a single column. I also created a new column that said if the game sold over 100,000 copies and another column that averaged the critic and user scores (a form of feature engineering). In addition, I had to remove ~30,000 games from the list because they didn't have any game sales listed.
### Regression vs Classification
You may be wondering, why predict if a game will sell over 100,000 copies instead of specifically saying how many copies the game will sell?
To explain, have a look at this histogram:
"""
),
html.Img(src='assets/SalesHistogram.png', className='img-fluid'),
dcc.Markdown(
"""
This dataset has many outliers. Many games don't sell 100,000 copies, and there are a few that sell tens of millions of copies. If I tried predicting exact game sales with mathematical models, it's very likely some predictions will shoot way higher than they should due to the extreme outliers.
That's why I chose to go for a yes or no question (will the game sell 100k+). That way, I won't ever need to consider exact game sales in the model, and the results are more focused and less out of hand. In data science, this kind of question is a classification, while the former would be a regression.
### Making the Predictions
With the dataset cleaned, it was time to decide which features to use for the prediction.
Usually in data science, the more features, the better. But since this is a web app, I needed to condense the features so the user wouldn't get overwhelmed. For instance, instead of having dropdowns for the critic, user, and vgchartz scores, there's just one dropdown for the averaged score.
In the end, I chose the seven features you see in the predictions page. As for the prediction model, I chose the random forest classifier because it led to the highest accuracy score. I explain how accuracy scores work in the Insights page.
"""
),
],
)
layout = dbc.Row([column1])
|
[
"TimTree@users.noreply.github.com"
] |
TimTree@users.noreply.github.com
|
a15db1a5d70eecd1cee3696b8edbe078ca8c6be4
|
2290d076a66d6f93b8020990f188e5c72378e75d
|
/Test_Sorting/sorting_algorithms/select_sort.py
|
fe449fa4305a1ae21805d9996163ed7439236fd9
|
[] |
no_license
|
piotr-kalemba/Test_Sorting_Algorithms
|
e7dd8328c3b679d3f160abf3444f3b157ed4b822
|
34fa7b21e4e35c4907a5d4302547f9d88996eb95
|
refs/heads/master
| 2020-05-29T15:05:30.242348
| 2019-07-08T12:03:25
| 2019-07-08T12:03:25
| 189,212,530
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 212
|
py
|
def swap(a, i, j):
a[i], a[j] = a[j], a[i]
def select_sort(a):
for i in range(len(a)):
j = i + a[i:].index(min(a[i:]))
swap(a, i, j)
def sort_method(a):
select_sort(a)
return a
|
[
"piotr-kalemba@wp.pl"
] |
piotr-kalemba@wp.pl
|
808be79d3703bc7fc250ae8e6a0c2921d71a56a9
|
d1d7d54a41f62566826bb23815920408dea586fd
|
/LAB 2/20.py
|
4d3b60bb2df3b6ac936078f89a8e56cb065f1f59
|
[] |
no_license
|
errordube/Python-Lab
|
0c2833a6318dd6f25e4ab8c637b577f1b103c406
|
f941483779afc4c51dd7654cfa54a6e922907b16
|
refs/heads/master
| 2020-04-07T23:48:59.119376
| 2018-11-23T11:59:07
| 2018-11-23T11:59:07
| 158,826,177
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 395
|
py
|
def computeHCF(x, y):
if x > y:
smaller = y
else:
smaller = x
for i in range(1, smaller+1):
if((x % i == 0) and (y % i == 0)):
hcf = i
return hcf
num1 = int(input("Enter first number: "))
num2 = int(input("Enter second number: "))
print("The H.C.F. of", num1,"and", num2,"is", computeHCF(num1, num2))
|
[
"noreply@github.com"
] |
errordube.noreply@github.com
|
71071f1ea11e242f5b4f263704aa139f4f55ffd3
|
f53a511da00ef2cd7e8d84afabe63cefee789078
|
/common/forms.py
|
51c2436558c163b045ea32581116c0162070c327
|
[] |
no_license
|
ffkirill/vertigo_billing
|
702f3ef99ffdd5ff8789da208ed22bc3d4b92ef5
|
68910792d489ad121eedf97f62790e35aba1373f
|
refs/heads/master
| 2016-08-07T08:25:20.193071
| 2014-06-27T17:22:27
| 2014-06-27T17:22:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 571
|
py
|
from bootstrap.forms import BootstrapModelForm, BootstrapMixin
class VertigoModelForm(BootstrapModelForm):
def __init__(self, *args, **kwargs):
super(VertigoModelForm, self).__init__(*args, **kwargs)
for field in self.fields.values():
field.widget.attrs['class'] = 'form-control'
class VertigoBootstrapMixin(BootstrapMixin):
def __init__(self, *args, **kwargs):
super(VertigoBootstrapMixin, self).__init__(*args, **kwargs)
for field in self.fields.values():
field.widget.attrs['class'] = 'form-control'
|
[
"ff.kirill@gmail.com"
] |
ff.kirill@gmail.com
|
58307de2922536d58c10778f26d70f420d9753bf
|
fc11922744718ffeea7b3ee1985370dfc709d4b1
|
/norfair/drawing/fixed_camera.py
|
6c04906323015695f4341655393a3ea19d06e91e
|
[
"BSD-3-Clause",
"GPL-1.0-or-later"
] |
permissive
|
tryolabs/norfair
|
cf33877d0d413bd37d89d9b238987a153974ac19
|
9b315b4cfa5f9cf145f068a21a2b7673703ac9e3
|
refs/heads/master
| 2023-08-27T02:07:39.995017
| 2023-05-15T21:21:45
| 2023-05-15T21:21:45
| 276,473,370
| 1,953
| 206
|
BSD-3-Clause
| 2023-09-12T11:08:58
| 2020-07-01T20:15:44
|
Python
|
UTF-8
|
Python
| false
| false
| 5,963
|
py
|
import numpy as np
from norfair.camera_motion import TranslationTransformation
from norfair.utils import warn_once
class FixedCamera:
"""
Class used to stabilize video based on the camera motion.
Starts with a larger frame, where the original frame is drawn on top of a black background.
As the camera moves, the smaller frame moves in the opposite direction, stabilizing the objects in it.
Useful for debugging or demoing the camera motion.

!!! Warning
This only works with [`TranslationTransformation`][norfair.camera_motion.TranslationTransformation],
using [`HomographyTransformation`][norfair.camera_motion.HomographyTransformation] will result in
unexpected behaviour.
!!! Warning
If using other drawers, always apply this one last. Using other drawers on the scaled up frame will not work as expected.
!!! Note
Sometimes the camera moves so far from the original point that the result won't fit in the scaled-up frame.
In this case, a warning will be logged and the frames will be cropped to avoid errors.
Parameters
----------
scale : float, optional
The resulting video will have a resolution of `scale * (H, W)` where HxW is the resolution of the original video.
Use a bigger scale if the camera is moving too much.
attenuation : float, optional
Controls how fast the older frames fade to black.
Examples
--------
>>> # setup
>>> tracker = Tracker("frobenious", 100)
>>> motion_estimator = MotionEstimator()
>>> video = Video(input_path="video.mp4")
>>> fixed_camera = FixedCamera()
>>> # process video
>>> for frame in video:
>>> coord_transformations = motion_estimator.update(frame)
>>> detections = get_detections(frame)
>>> tracked_objects = tracker.update(detections, coord_transformations)
>>> draw_tracked_objects(frame, tracked_objects) # fixed_camera should always be the last drawer
>>> bigger_frame = fixed_camera.adjust_frame(frame, coord_transformations)
>>> video.write(bigger_frame)
"""
def __init__(self, scale: float = 2, attenuation: float = 0.05):
self.scale = scale
self._background = None
self._attenuation_factor = 1 - attenuation
def adjust_frame(
self, frame: np.ndarray, coord_transformation: TranslationTransformation
) -> np.ndarray:
"""
Render scaled up frame.
Parameters
----------
frame : np.ndarray
The OpenCV frame.
coord_transformation : TranslationTransformation
The coordinate transformation as returned by the [`MotionEstimator`][norfair.camera_motion.MotionEstimator]
Returns
-------
np.ndarray
The new bigger frame with the original frame drawn on it.
"""
# initialize background if necessary
if self._background is None:
original_size = (
frame.shape[1],
frame.shape[0],
) # OpenCV format is (width, height)
scaled_size = tuple(
(np.array(original_size) * np.array(self.scale)).round().astype(int)
)
self._background = np.zeros(
[scaled_size[1], scaled_size[0], frame.shape[-1]],
frame.dtype,
)
else:
self._background = (self._background * self._attenuation_factor).astype(
frame.dtype
)
# top_left is the anchor coordinate from where we start drawing the fame on top of the background
# aim to draw it in the center of the background but transformations will move this point
top_left = (
np.array(self._background.shape[:2]) // 2 - np.array(frame.shape[:2]) // 2
)
top_left = (
coord_transformation.rel_to_abs(top_left[::-1]).round().astype(int)[::-1]
)
# box of the background that will be updated and the limits of it
background_y0, background_y1 = (top_left[0], top_left[0] + frame.shape[0])
background_x0, background_x1 = (top_left[1], top_left[1] + frame.shape[1])
background_size_y, background_size_x = self._background.shape[:2]
# define box of the frame that will be used
# if the scale is not enough to support the movement, warn the user but keep drawing
# cropping the frame so that the operation doesn't fail
frame_y0, frame_y1, frame_x0, frame_x1 = (0, frame.shape[0], 0, frame.shape[1])
if (
background_y0 < 0
or background_x0 < 0
or background_y1 > background_size_y
or background_x1 > background_size_x
):
warn_once(
"moving_camera_scale is not enough to cover the range of camera movement, frame will be cropped"
)
# crop left or top of the frame if necessary
frame_y0 = max(-background_y0, 0)
frame_x0 = max(-background_x0, 0)
# crop right or bottom of the frame if necessary
frame_y1 = max(
min(background_size_y - background_y0, background_y1 - background_y0), 0
)
frame_x1 = max(
min(background_size_x - background_x0, background_x1 - background_x0), 0
)
# handle cases where the limits of the background become negative which numpy will interpret incorrectly
background_y0 = max(background_y0, 0)
background_x0 = max(background_x0, 0)
background_y1 = max(background_y1, 0)
background_x1 = max(background_x1, 0)
self._background[
background_y0:background_y1, background_x0:background_x1, :
] = frame[frame_y0:frame_y1, frame_x0:frame_x1, :]
return self._background
|
[
"javier.berneche@gmail.com"
] |
javier.berneche@gmail.com
|
51e03633507defc0725e3ab236f40f65f6320bea
|
ad734329f8ed827443019cbff24670de931b3e40
|
/healthtracker/questions/forms.py
|
c69b8cbfa01d2be7804dfc5c38178db44b6b7194
|
[] |
no_license
|
ihodes/healthtracker
|
d2739cfb5ae426b74384370d6fd489dc5046eabc
|
3c8c00b3d65692200382d924cc7849c48a665cf5
|
refs/heads/master
| 2021-01-10T09:46:03.244970
| 2015-01-26T03:41:46
| 2015-01-26T03:41:46
| 8,586,482
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,890
|
py
|
# -*- coding: utf-8 -*-
import flask
from flask.ext.wtf import Form
from wtforms import TextField, HiddenField, IntegerField, BooleanField, validators
class GreaterThan(object):
"""
Compares the value of two fields the value of self is to be greater than the supplied field.
:param fieldname:
The name of the other field to compare to.
:param message:
Error message to raise in case of a validation error. Can be
interpolated with `%(other_label)s` and `%(other_name)s` to provide a
more helpful error.
"""
def __init__(self, fieldname, message=None):
self.fieldname = fieldname
self.message = message
def __call__(self, form, field):
try:
other = form[self.fieldname]
except KeyError:
raise validators.ValidationError(field.gettext(u"Invalid field name '%s'.") % self.fieldname)
if field.data != '' and field.data < other.data:
d = {
'other_label': hasattr(other, 'label') and other.label.text or self.fieldname,
'other_name': self.fieldname
}
if self.message is None:
self.message = field.gettext(u'Field must be greater than %(other_name)s.')
raise validators.ValidationError(self.message % d)
class QuestionForm(Form):
qtype = HiddenField('qtype', default='yesno')
created_by = HiddenField('created_by')
name = TextField('name')
text = TextField('text')
min_value = IntegerField('min_value', validators=[validators.Optional()], default=0)
max_value = IntegerField('max_value', validators=[validators.Optional(),
GreaterThan('min_value')], default=5)
is_public = BooleanField('is_public', default=True)
unlimited_number = BooleanField('unlimited_number', default=True)
|
[
"ihodes@mac.com"
] |
ihodes@mac.com
|
224c9864923e0183cc772f1f8d8bba53720fc038
|
265e58bd08dfb6989d7d4fcff38e0fcf6c51ac27
|
/Programas Python/Password(4).py
|
ab900be1daf03049668a2a01a9ab86570a193ec7
|
[] |
no_license
|
EduardoMSA/Proyectos_ISC_ITESM
|
34522a823c10283d6c3ff423689ebe5e9aa76faa
|
303711d741e4f1be38db0c76f2084450a7349e64
|
refs/heads/master
| 2020-05-25T17:03:12.950148
| 2019-05-22T01:05:00
| 2019-05-22T01:05:00
| 187,900,658
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 449
|
py
|
# coding: utf-8
# In[13]:
def Password(s):
for i in range(len(s)-1,0,-1):
if (s[i-1] != s[len(s)-1]) or (s[0]!=s[len(s)-i]):
continue
prefix = s[0:i]
suffix = s[len(s)-i:len(s)]
if prefix!=suffix:
continue
obelix = prefix in s[1:len(s)-1]
if not obelix:
continue
return prefix
return "Just a legend"
s = input()
print(Password(s))
|
[
"noreply@github.com"
] |
EduardoMSA.noreply@github.com
|
74f01619015cd76cf6857f2b6b627930247698b7
|
0a973640f0b02d7f3cf9211fcce33221c3a50c88
|
/.history/src/qichamao_cmpInfo_20210202185034.py
|
23fa2ee5853d61c604c22d4c9f7af2ab73f9ba9b
|
[] |
no_license
|
JiajunChen123/IPO_under_review_crawler
|
5468b9079950fdd11c5e3ce45af2c75ccb30323c
|
031aac915ebe350ec816c05a29b5827fde588567
|
refs/heads/main
| 2023-02-26T08:23:09.622725
| 2021-02-04T10:11:16
| 2021-02-04T10:11:16
| 332,619,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,309
|
py
|
import requests
from bs4 import BeautifulSoup
import time
import csv
import pandas as pd
import numpy as np
# login = {'user':'13710149700',
# 'password':'123456'}
# 使用的网站是企查查
# requests.post('https://www.qichamao.com',data=login,headers=afterLogin_headers)
afterLogin_headers = {'Cookie':'qznewsite.uid=y4eseo3a1q4xbrwimor3o5tm; qz.newsite=6C61702DD95709F9EE190BD7CCB7B62C97136BAC307B6F0B818EC0A943307DAB61627F0AC6CD818268C10D121B37F840C1EF255513480EC3012A7707443FE523DD7FF79A7F3058E5E7FB5CF3FE3544235D5313C4816B54C0CDB254F24D8ED5235B722BCBB23BE62B19A2370E7F0951CD92A731FE66C208D1BE78AA64758629806772055F7210C67D442DE7ABBE138EF387E6258291F8FBF85DFF6C785E362E2903705A0963369284E8652A61531293304D67EBB8D28775FBC7D7EBF16AC3CCA96F5A5D17; Hm_lvt_55ad112b0079dd9ab00429af7113d5e3=1611805092,1612262918; Hm_lpvt_55ad112b0079dd9ab00429af7113d5e3=1612262927',
'Referer':'https://www.qichamao.com/',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36'}
def get_compInfo(comp):
r = requests.get('https://www.qichamao.com/search/all/{}'.format(comp),headers=afterLogin_headers)
r.raise_for_status()
r.encoding = 'utf-8' #linux utf-8
soup = BeautifulSoup(r.text,features="html.parser")
url = 'http://www.qichamao.com' + soup.find(attrs={'class':'listsec_con'}).a['href']
# soup.find(attrs={'class':'listsec_con'})
time.sleep(5)
rs = requests.get(url,headers=afterLogin_headers)
rs.encoding='utf-8'
soup2 = BeautifulSoup(rs.text,'html.parser')
info = soup2.find(attrs={'class':'qd-table-body li-half f14'}).findAll('div')
info = [i.get_text().strip() for i in info]
compinfo = {'法定代表人':info[0],
'纳税人识别号':info[1],
'名称':info[2],
'机构代码':info[3],
'注册号':info[4],
'注册资本':info[5],
'统一社会信用代码':info[6],
'登记机关':info[7],
'经营状态':info[8],
'成立日期':info[9],
'企业类型':info[10],
'经营期限':info[11],
'所属地区':info[12],
'核准时间':info[13],
'企业地址':info[14],
'经营范围':info[15]}
return compinfo
if __name__ == '__main__':
import pickle
with open('C:/Users/chen/Desktop/IPO_info/zb_zxb_stocksInfo.pkl', 'rb') as file:
all_data = pickle.load(file)
try:
for i, (k, v) in enumerate(all_data.items()):
if v['统一社会信用代码'] == '':
compinfo = get_compInfo(v['机构名称'])
v['统一社会信用代码'] = compinfo['统一社会信用代码']
v[i]['经营范围'] = compinfo['经营范围']
else:
continue
except:
with open('C:/Users/chen/Desktop/IPO_info/zb_zxb_stocksInfo.pkl', 'rb') as file:
pickle.dump(file,f, pickle.HIGHEST_PROTOCOL)
# your stuff
# df = pd.read_excel('C:/Users/chen/Desktop/IPO_info/P020210122657813200711.xls',skipfooter=1,skiprows=2,index_col='序号',keep_default_na=False,encoding='utf-8',sheet_name=0)
# comp1 = df[' 企业名称'].values
# df2 = pd.read_excel('C:/Users/chen/Desktop/IPO_info/P020210122657813200711.xls',skipfooter=1,skiprows=2,index_col='序号',keep_default_na=False,encoding='utf-8',sheet_name=1)
# comp2 = df2[' 企业名称'].values
# compList =np.append(comp1,comp2)
# # for i in compList:
# # compinfo = get_compInfo(i)
# # csv_columns = ['法定代表人','纳税人识别号','名称','机构代码','注册号','注册资本','统一社会信用代码','登记机关',\
# # '经营状态','成立日期','企业类型','经营期限','所属地区','核准时间','企业地址','经营范围']
# # csv_file = "credit.csv"
# # try:
# # with open(csv_file, 'a+') as csvfile:
# # writer = csv.DictWriter(csvfile, fieldnames=csv_columns)
# # writer.writeheader()
# # writer.writerow(compinfo)
# # except IOError:
# # print("I/O error")
# try:
# with open('C:/Users/chen/Desktop/IPO_info/csrc_dict.pkl', 'rb') as file:
# csrc_dict = pickle.load(file)
# except:
# csrc_dict = {}
# count = 0
# for i in compList:
# count +=1
# i = i.replace(r'*','')
# if i in data:
# if i in csrc_dict and i['统一社会信用代码'] != '':
# continue
# try:
# compinfo = get_compInfo(i)
# data[i]['统一社会信用代码'] = compinfo['统一社会信用代码']
# data[i]['经营范围'] = compinfo['经营范围']
# csrc_dict.update(data[i])
# except:
# print('cannot use anymore')
# else:
# print('cannot found value: ',i)
# if count % 20 == 0:
# time.sleep(60)
# with open('C:/Users/chen/Desktop/IPO_info/csrc.pkl', 'rb') as file:
# pickle.dump(csrc_dict, file, pickle.HIGHEST_PROTOCOL)
|
[
"chenjiajun.jason@outlook.com"
] |
chenjiajun.jason@outlook.com
|
ccd8e606272604a00cf077ced256354b41d45c2b
|
350ea74735002ddeb22b6c8a6fa0dc7628bc2451
|
/engineer/unittests/config_tests.py
|
f2c87e033b667113f7c6cec2a6d953a9411bd9dc
|
[
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
pridkett/engineer
|
576b205c534ed0a9c81b44b3436f6610d870ccdc
|
2d0227f65fbd977cb84f138c043cdbf8f6ab5351
|
refs/heads/master
| 2021-01-18T05:42:25.439204
| 2012-12-06T20:47:51
| 2012-12-06T20:47:51
| 7,017,072
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,044
|
py
|
# coding=utf-8
import logging
import os
from path import path
from testfixtures import LogCapture
from engineer.log import bootstrap
from engineer.plugins import load_plugins
from engineer.unittests import CopyDataTestCase, SettingsTestCase
__author__ = 'Tyler Butler <tyler@tylerbutler.com>'
test_data_root = path(__file__).dirname() / 'test_data'
simple_site = test_data_root / 'simple_site'
class BaseTestCase(CopyDataTestCase):
def setUp(self):
bootstrap() #bootstrap logging infrastructure
load_plugins() #load plugins
self.source_path = simple_site
os.chdir(self.copied_data_path)
class TestConfig(BaseTestCase):
def test_config_yaml(self):
from engineer.conf import settings
settings.reload('config.yaml')
self.assertEqual(settings.SITE_TITLE, 'Test Config')
self.assertEqual(settings.HOME_URL, '/')
def test_global_settings(self):
"""All EngineerConfiguration instances share state"""
from engineer.conf import settings as s1
from engineer.conf import EngineerConfiguration
s2 = EngineerConfiguration()
self.assertEqual(s1.SITE_TITLE, s2.SITE_TITLE)
def test_manual_config_yaml(self):
"""Creating an EngineerConfiguration manually also shares state with configs created other ways"""
from engineer.conf import settings as s1
from engineer.conf import EngineerConfiguration
os.chdir(test_data_root)
s2 = EngineerConfiguration('configs/config2.yaml')
self.assertEqual(s1.SITE_TITLE, s2.SITE_TITLE)
def test_config_inheritance(self):
from engineer.conf import settings
settings.reload('inheritance.yaml')
self.assertEqual(settings.SITE_TITLE, 'Inheritance Test')
self.assertEqual(settings.HOME_URL, '/')
def test_config_inheritance_dicts(self):
from engineer.conf import settings
settings.reload('inheritance_dicts.yaml')
expected = {
'key1': 'value1new',
'key2': 'value2',
'key3': 'value3'
}
self.assertEqual(settings.test_dict, expected)
def test_deprecated_settings(self):
from engineer.conf import settings
with LogCapture('engineer.conf', level=logging.WARNING) as log_output:
settings.reload('deprecated_settings.yaml')
log_output.check(
('engineer.conf',
'CONSOLE',
"Loading configuration from %s\deprecated_settings.yaml." % self.copied_data_path),
('engineer.conf', 'WARNING', "The 'NORMALIZE_INPUT_FILES' setting was deprecated in version 0.4: This "
"setting is now ignored."),
('engineer.conf', 'WARNING', "The 'NORMALIZE_INPUT_FILE_MASK' setting was deprecated in version 0.4: "
"This setting is now ignored.")
)
|
[
"tyler@tylerbutler.com"
] |
tyler@tylerbutler.com
|
de001f929d93ab043a3ecef62cd39654249ae9ba
|
f8a4fe5da0db0f857f70565930b439ea372ac945
|
/pbb/views/__init__.py
|
3104ccf92b8307f8b15679cfa6f241ef09f9488b
|
[] |
no_license
|
aagusti/opensipkd-pbb-old
|
27dadf7526277662ae54179806f32d1e2b0926b5
|
333a0dc9dc58d0c0666386d7cbfe7a9a8a0e5096
|
refs/heads/master
| 2021-05-01T21:30:45.407063
| 2015-12-13T16:27:41
| 2015-12-13T16:27:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,236
|
py
|
from datetime import datetime
from pyramid.view import view_config
from pyramid.httpexceptions import (
HTTPFound,
HTTPForbidden,
)
from pyramid.security import (
remember,
forget,
authenticated_userid,
)
import transaction
import colander
from deform import (
Form,
ValidationFailure,
widget,
)
from ..tools import create_now
from ..models import (
DBSession,
User,
)
########
# Home #
########
@view_config(route_name='home', renderer='templates/home.pt', permission='view')
def view_home(request):
return dict(project='Opensipkd PBB')
@view_config(route_name='home-auth', renderer='templates/home.pt', permission='view')
def view_homeauth(request):
return dict(project='Opensipkd PBB')
#########
# Login #
#########
class Login(colander.Schema):
username = colander.SchemaNode(colander.String(),
oid='username')
password = colander.SchemaNode(colander.String(),
widget=widget.PasswordWidget(),
oid='password')
# http://deformdemo.repoze.org/interfield/
def login_validator(form, value):
user = form.user
if not user:
raise colander.Invalid(form, 'Login failed')
if not user.user_password:
raise colander.Invalid(form, 'Login failed')
if not user.check_password(value['password']):
raise colander.Invalid(form, 'Login failed')
def get_login_headers(request, user):
headers = remember(request, user.email)
user.last_login_date = create_now()
DBSession.add(user)
DBSession.flush()
transaction.commit()
return headers
@view_config(context=HTTPForbidden, renderer='templates/login.pt')
@view_config(route_name='login', renderer='templates/login.pt')
def view_login(request):
if authenticated_userid(request):
return HTTPFound(location=request.route_url('home'))
schema = Login(validator=login_validator)
form = Form(schema, buttons=('login',))
if 'login' in request.POST:
controls = request.POST.items()
identity = request.POST.get('username')
user = schema.user = User.get_by_identity(identity)
try:
c = form.validate(controls)
except ValidationFailure, e:
return dict(form=form)
#request.session['login failed'] = e.render()
return HTTPFound(location=request.route_url('login'))
headers = get_login_headers(request, user)
return HTTPFound(location=request.route_url('home'),
headers=headers)
elif 'login failed' in request.session:
r = dict(form=request.session['login failed'])
del request.session['login failed']
return r
return dict(form=form)
#return dict(form=form.render())
@view_config(route_name='logout')
def view_logout(request):
headers = forget(request)
return HTTPFound(location = request.route_url('home'),
headers = headers)
###################
# Change password #
###################
class Password(colander.Schema):
old_password = colander.SchemaNode(colander.String(),
title="Kata Sandi Lama",
widget=widget.PasswordWidget())
new_password = colander.SchemaNode(colander.String(),
title="Kata Sandi Baru",
widget=widget.PasswordWidget())
retype_password = colander.SchemaNode(colander.String(),
title="Ketik Ulang Kata Sandi",
widget=widget.PasswordWidget())
def password_validator(form, value):
if not form.request.user.check_password(value['old_password']):
raise colander.Invalid(form, 'Invalid old password.')
if value['new_password'] != value['retype_password']:
raise colander.Invalid(form, 'Retype mismatch.')
@view_config(route_name='password', renderer='templates/password.pt',
permission='view')
def view_password(request):
schema = Password(validator=password_validator)
form = Form(schema, buttons=('simpan','batal'))
if request.POST:
if 'simpan' in request.POST:
schema.request = request
controls = request.POST.items()
try:
c = form.validate(controls)
except ValidationFailure, e:
request.session['invalid password'] = e.render()
return HTTPFound(location=request.route_url('password'))
user = request.user
user.password = c['new_password']
DBSession.add(user)
DBSession.flush()
transaction.commit()
#request.session.flash('Your password has been changed.')
request.session.flash('Password telah berhasil dirubah.')
return HTTPFound(location=request.route_url('reklame'))
elif 'invalid password' in request.session:
r = dict(form=request.session['invalid password'])
del request.session['invalid password']
return r
return dict(form=form.render())
|
[
"aa.gustiana@gmail.com"
] |
aa.gustiana@gmail.com
|
3c9f7c4c6c272f5e17c9724a399749a113eaf730
|
c9b7782e6464d7d26e46825232daa51f41e2cd7b
|
/lista.py
|
f5cca0d01718a3f20a98e4c03f6f5e7999b5abd9
|
[] |
no_license
|
A01377832/Mision_06
|
ea4a4d067350748340c5bb2432d3d6e972cb90e9
|
9021e0bf5c762250f7231e323ecf408c1e0743d7
|
refs/heads/master
| 2022-09-02T23:14:40.294809
| 2020-05-29T20:56:32
| 2020-05-29T20:56:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,147
|
py
|
#Autor: Ana Fernanda Martínez
#Ejercicio con listas
#Ejercicio 1
def recortarLista(lista):
if len(lista)<=2:
return []
nuevaLista= list(lista)
último = nuevaLista [len(nuevaLista)-1]
nuevaLista.remove(último) #Elimina el último dato
primero = nuevaLista [0]
nuevaLista.remove (primero)
return nuevaLista
#Ejercicio 2
def estanOrdenados (lista):
#Copia
nuevaLista = list(lista)
nuevaLista.sort() #Ordenas la lista
if nuevaLista == lista:
return True
return False
#Ejercicio 3
def sonAnagramas(cadena1, cadena2):
cadena1 = cadena1.upper()
cadena2 = cadena2.upper()
lista1 = list(cadena1)
lista2 = list(cadena2)
lista1.sort()
if lista1==lista2:
return True
else:
return False
#Ejercicio 4
def hayDuplicados(lista):
for dato in lista:
if lista.count(dato)>=2:
return True #termina y me da un resultado
return False
#Ejercicio 5
def borrarDuplicados(lista):
while hayDuplicados(lista) == True:
#Eliminar duplicados
for k in range(len(lista)):
dato =lista[k]
veces = lista.count(dato)
for n in range (veces-1): #Borra tantas veces menos 1 , como aparesca el valor en la lista owo
lista.remove(dato)
if veces>=2:
break
#Función principal
def main ():
#Ejercicio 1
print("Ejercicio 1: ")
lista = [1,2,3,4,5]
nuevaLista = recortarLista(lista)
print("La lista", lista, "recortada es: ", nuevaLista)
lista1_2 = [1,2]
nuevaLista = recortarLista(lista1_2)
print("La lista", lista1_2, "recortada queda así: ", nuevaLista)
print("_____________________________")
#Ejercicio 2
print("Ejercicio 2: ")
lista2_1 = [1,2,3,4,5,6,7]
print ("La secuencia", lista2_1)
orden = estanOrdenados(lista2_1)
if orden == True:
print ("está ordenada")
else:
print ("no está ordenada")
lista2_2 = [7,5,4,2]
print ("La secuencia", lista2_2)
orden = estanOrdenados(lista2_2)
if orden == True:
print ("está ordenada")
else:
print ("no está ordenada")
print("_____________________________")
#Ejercicio 3
print("Ejercicio 3: ")
a = "roma"
b = "amor"
print(a, "y", b)
if sonAnagramas (a, b) == True:
print ("sí son anagramas")
else:
print ("no son anagramas")
b = "anime"
c= "calaca"
print(b, "y", c)
if sonAnagramas (b, c) == True:
print ("sí son anagramas")
else:
print ("no son anagramas")
print("_____________________________")
#Ejercicio 4
print("Ejercicio 4: ")
lista4_1 = [3,2,5,67,8,9,40]
if hayDuplicados(lista4_1) == False:
print("En la lista:",lista4_1 , "no tiene duplicados")
else:
print("En la lista",lista4_1 , "hay duplicados")
lista4_2 = [2,4,55,60,55,1]
if hayDuplicados(lista4_2) == False:
print("En la lista", lista4_2, "no tiene duplicados")
else:
print("En la lista", lista4_2, "tiene duplicados")
print("_____________________________")
#Ejercicio 5
lista5 = [1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,]
print("La lista original es: ", lista5)
borrarDuplicados(lista5)
print("Si los duplicados son eliminados es: ", lista5)
lista5_2= [2,4,5,7,9,2]
print("La lista original es: ", lista5_2)
borrarDuplicados(lista5_2)
print("Si los duplicados son eliminados es: ", lista5_2)
print("_____________________________")
main()
|
[
"noreply@github.com"
] |
A01377832.noreply@github.com
|
d5b42b93b8afa5614f86641bad57d66c9d3db2c9
|
ed8e842c9813ccaf9eeef9b7446294ff2ac0716a
|
/cadpy/timetable/migrations/0005_auto_20200910_1915.py
|
870699b069c57d19712b453273f706bea95f4319
|
[] |
no_license
|
shanesoysa/CAD
|
89353e1dfa6a0b1f4074f23bc3c57cb41cfd28ec
|
af32baa5f4b15ce990a97bc40ee561bd7c7ff40a
|
refs/heads/master
| 2023-04-14T05:27:49.879339
| 2020-10-12T18:29:26
| 2020-10-12T18:29:26
| 286,243,466
| 2
| 0
| null | 2023-04-10T07:44:58
| 2020-08-09T13:34:20
|
HTML
|
UTF-8
|
Python
| false
| false
| 410
|
py
|
# Generated by Django 3.1 on 2020-09-10 13:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('timetable', '0004_auto_20200909_1511'),
]
operations = [
migrations.AlterField(
model_name='programme',
name='programme_abbv',
field=models.CharField(max_length=20, unique=True),
),
]
|
[
"rehani44perera@gmail.com"
] |
rehani44perera@gmail.com
|
8920cfdf3f6ac9451b85fb30a81bb9da93c0f5fb
|
0022232ab0dc5382d596581357ffaaad16b526cc
|
/infra/backup.py
|
a2c6a52beba1706b7c0f4e36e40a923d567a1c30
|
[] |
no_license
|
dr-natetorious/aws-emr-hive
|
1806231f9c2877629b361a3a38615c0c46d4878b
|
7ec8483e8fb270c1f7fe034780b55e4cd37485d8
|
refs/heads/master
| 2023-04-19T04:09:36.162220
| 2021-04-29T22:11:02
| 2021-04-29T22:11:02
| 362,522,415
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,777
|
py
|
from typing import List
from infra.landing_zone import ILandingZone
from aws_cdk import (
core,
aws_backup as backup,
aws_iam as iam,
aws_kms as kms,
aws_sns as sns,
)
class BackupStrategyConstruct(core.Construct):
def __init__(self, scope:core.Construct, id:str, landing_zone:ILandingZone, **kwargs):
"""
Configure Dns Resolver
"""
super().__init__(scope,id, **kwargs)
region = core.Stack.of(self).region
self.encryption_key = kms.Key(self,'EncryptionKey',
description='Encryption Key for BackupStrategy')
self.topic = sns.Topic(self,'Topic')
self.role = iam.Role(self,'Role',
description='Account Backup Role',
assumed_by= iam.ServicePrincipal(service='backup'))
self.vault = backup.BackupVault(self,'Vault',
encryption_key=self.encryption_key,
notification_topic= self.topic,
backup_vault_name='{}-Backup-Vault'.format(landing_zone.zone_name),
access_policy= iam.PolicyDocument(
statements=[
iam.PolicyStatement(
effect= iam.Effect.ALLOW,
resources=["*"],
actions=['backup:CopyIntoBackupVault'],
principals= [
iam.ArnPrincipal(arn = self.role.role_arn)
])
]))
self.default_plan = backup.BackupPlan(self,'DefaultPlan',
backup_vault= self.vault,
backup_plan_name='Default Plan {} in {}'.format(landing_zone.zone_name, region),
backup_plan_rules=[
backup.BackupPlanRule.daily(),
backup.BackupPlanRule.weekly(),
])
self.default_plan.add_selection('SelectionPolicy',
allow_restores=True,
role=self.role,
resources=[
backup.BackupResource.from_tag("landing_zone", landing_zone.zone_name),
])
|
[
"nate@bachmeier"
] |
nate@bachmeier
|
2f7ac23001956b4c1523aab7ac6226d2da155d0f
|
db1aabc54998f99b9d77aafad167265c92394593
|
/hw13_train.py
|
a925acab2587f2d697d507b845c554d074c471de
|
[] |
no_license
|
Stanwang1210/ML_HW13
|
02c252deff002f9272c3f088940c614f4b6d88be
|
0f3863781f0c5449116868ea2aef191d7a7576c8
|
refs/heads/master
| 2022-11-06T15:08:22.125551
| 2020-06-30T06:55:14
| 2020-06-30T06:55:14
| 273,843,180
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 146,116
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 30 14:15:50 2020
@author: 王式珩
"""
import sys
workspace_dir = sys.argv[1]#'HW13_Data'
model_path = sys.argv[2]
"""接著我們把 dataset 的檔案解壓縮
因為檔案非常大,要等一下子,可以先執行解壓縮,同時看一下 model 的部分程式
"""
#!tar -zxvf "{workspace_dir}/Omniglot.tar.gz" -C "{workspace_dir}/" #這行會印出解壓縮的所有檔案,因為很煩所以我註解掉了
"""我們看一下 Omniglot 的 dataset 長什麼樣子"""
from PIL import Image
#from IPython.display import display
#for i in range(10, 20):
# im = Image.open("Omniglot/images_background/Japanese_(hiragana).0/character13/0500_" + str (i) + ".png")
# display(im)
"""## **Step 2: 建立模型**
以下我們就要開始建立核心的 MAML 模型
首先我們將需要的套件引入
"""
# Import modules we need
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader, Dataset
import torchvision.transforms as transforms
import glob
from tqdm import tqdm
import numpy as np
from collections import OrderedDict
"""```
# This is formatted as code
```
接著我們要建立一個 nn.Module,作為 omniglot 的分類器 (Classifier)
我們使用的是 CNN-based 的分類器。
以下是 MAML 的演算法:

由於在第10行,我們是要對原本的參數 θ 微分,並非 inner-loop (Line5~8) 的 θ' 微分,因此在 inner-loop,我們需要用 functional forward 的方式算出 input image 的 output logits,而不是直接用 nn.module 裡面的 forward(直接對 θ 微分)。在下面我們分別定義了 functional forward 以及 forward 函數。
"""
def ConvBlock(in_ch, out_ch):
return nn.Sequential(nn.Conv2d(in_ch, out_ch, 3, padding = 1),
nn.BatchNorm2d(out_ch),
nn.ReLU(),
nn.MaxPool2d(kernel_size = 2, stride = 2)) # 原作者在 paper 裡是說她在 omniglot 用的是 strided convolution
# 不過這裡我改成 max pool (mini imagenet 才是 max pool)
# 這並不是你們在 report 第三題要找的 tip
def ConvBlockFunction(x, w, b, w_bn, b_bn):
x = F.conv2d(x, w, b, padding = 1)
x = F.batch_norm(x, running_mean = None, running_var = None, weight = w_bn, bias = b_bn, training = True)
x = F.relu(x)
x = F.max_pool2d(x, kernel_size = 2, stride = 2)
return x
class Classifier(nn.Module):
def __init__(self, in_ch, k_way):
super(Classifier, self).__init__()
self.conv1 = ConvBlock(in_ch, 64)
self.conv2 = ConvBlock(64, 64)
self.conv3 = ConvBlock(64, 64)
self.conv4 = ConvBlock(64, 64)
self.logits = nn.Linear(64, k_way)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.conv4(x)
x = nn.Flatten(x)
x = self.logits(x)
return x
def functional_forward(self, x, params):
'''
Arguments:
x: input images [batch, 1, 28, 28]
params: 模型的參數,也就是 convolution 的 weight 跟 bias,以及 batchnormalization 的 weight 跟 bias
這是一個 OrderedDict
'''
for block in [1, 2, 3, 4]:
x = ConvBlockFunction(x, params[f'conv{block}.0.weight'], params[f'conv{block}.0.bias'],
params.get(f'conv{block}.1.weight'), params.get(f'conv{block}.1.bias'))
x = x.view(x.shape[0], -1)
x = F.linear(x, params['logits.weight'] , params['logits.bias'])
return x
"""這個函數是用來產生 label 的。在 n_way, k_shot 的 few-shot classification 問題中,每個 task 會有 n_way 個類別,每個類別k_shot張圖片。這是產生一個 n_way, k_shot 分類問題的 label 的函數"""
def create_label(n_way, k_shot):
return torch.arange(n_way).repeat_interleave(k_shot).long()
# 我們試著產生 5 way 2 shot 的 label 看看
create_label(5, 2)
"""接下來這裡是 MAML 的核心。演算法就跟原文完全一樣,這個函數做的事情就是用 "一個 meta-batch的 data" 更新參數。這裡助教實作的是二階MAML(inner_train_step = 1),對應老師投影片 meta learning p.13~p.18。如果要找一階的數學推導,在老師投影片 p.25。
(http://speech.ee.ntu.edu.tw/~tlkagk/courses/ML_2019/Lecture/Meta1%20(v6).pdf)
以下詳細解釋:
"""
def MAML(model, optimizer, x, n_way, k_shot, q_query, loss_fn, epcoh, inner_train_step = 1, inner_lr = 0.4, train = True):
"""
Args:
x is the input omniglot images for a meta_step, shape = [batch_size, n_way * (k_shot + q_query), 1, 28, 28]
n_way: 每個分類的 task 要有幾個 class
k_shot: 每個類別在 training 的時候會有多少張照片
q_query: 在 testing 時,每個類別會用多少張照片 update
"""
criterion = loss_fn
task_loss = [] # 這裡面之後會放入每個 task 的 loss
task_acc = [] # 這裡面之後會放入每個 task 的 loss
for meta_batch in x:
train_set = meta_batch[:n_way*k_shot] # train_set 是我們拿來 update inner loop 參數的 data
val_set = meta_batch[n_way*k_shot:] # val_set 是我們拿來 update outer loop 參數的 data
fast_weights = OrderedDict(model.named_parameters()) # 在 inner loop update 參數時,我們不能動到實際參數,因此用 fast_weights 來儲存新的參數 θ'
for inner_step in range(inner_train_step): # 這個 for loop 是 Algorithm2 的 line 7~8
# 實際上我們 inner loop 只有 update 一次 gradients,不過某些 task 可能會需要多次 update inner loop 的 θ',
# 所以我們還是用 for loop 來寫
train_label = create_label(n_way, k_shot).cuda()
logits = model.functional_forward(train_set, fast_weights)
loss = criterion(logits, train_label)
grads = torch.autograd.grad(loss, fast_weights.values(), create_graph = True) # 這裡是要計算出 loss 對 θ 的微分 (∇loss)
fast_weights = OrderedDict((name, param - inner_lr * grad)
for ((name, param), grad) in zip(fast_weights.items(), grads)) # 這裡是用剛剛算出的 ∇loss 來 update θ 變成 θ'
val_label = create_label(n_way, q_query).cuda()
logits = model.functional_forward(val_set, fast_weights) # 這裡用 val_set 和 θ' 算 logit
loss = criterion(logits, val_label) # 這裡用 val_set 和 θ' 算 loss
task_loss.append(loss) # 把這個 task 的 loss 丟進 task_loss 裡面
acc = np.asarray([torch.argmax(logits, -1).cpu().numpy() == val_label.cpu().numpy()]).mean() # 算 accuracy
task_acc.append(acc)
model.train()
if epoch % 2 == 0:
optimizer.zero_grad()
meta_batch_loss = torch.stack(task_loss).mean() # 我們要用一整個 batch 的 loss 來 update θ (不是 θ')
if train:
meta_batch_loss.backward()
if epoch % 2 == 0:
optimizer.step()
task_acc = np.mean(task_acc)
return meta_batch_loss, task_acc
"""定義 dataset。這個 dataset 會回傳某個 character 的 image,總共會有 k_shot+q_query 張,所以回傳的 tensor 大小是 [k_shot+q_query, 1, 28, 28]"""
class Omniglot(Dataset):
def __init__(self, data_dir, k_way, q_query):
self.file_list = [f for f in glob.glob(data_dir + "**/character*", recursive=True)]
self.transform = transforms.Compose([transforms.ToTensor()])
self.n = k_way + q_query
def __getitem__(self, idx):
sample = np.arange(20)
np.random.shuffle(sample) # 這裡是為了等一下要 random sample 出我們要的 character
img_path = self.file_list[idx]
img_list = [f for f in glob.glob(img_path + "**/*.png", recursive=True)]
img_list.sort()
imgs = [self.transform(Image.open(img_file)) for img_file in img_list]
imgs = torch.stack(imgs)[sample[:self.n]] # 每個 character,取出 k_way + q_query 個
return imgs
def __len__(self):
return len(self.file_list)
"""## **Step 3: 開始訓練**
定義 hyperparameter
"""
n_way = 5
k_shot = 1
q_query = 1
inner_train_step = 1
inner_lr = 0.4
meta_lr = 0.001
meta_batch_size = 32
max_epoch = 100
eval_batches = test_batches = 20
train_data_path = os.path.join(workspace_dir,'Omniglot/images_background/')
test_data_path = os.path.join(workspace_dir,'Omniglot/images_evaluation/')
"""初始化 dataloader"""
#dataset = Omniglot(train_data_path, k_shot, q_query)
train_set, val_set = torch.utils.data.random_split(Omniglot(train_data_path, k_shot, q_query), [3200,656])
train_loader = DataLoader(train_set,
batch_size = n_way, # 這裡的 batch size 並不是 meta batch size, 而是一個 task裡面會有多少不同的
# characters,也就是 few-shot classifiecation 的 n_way
num_workers = 8,
shuffle = True,
drop_last = True)
val_loader = DataLoader(val_set,
batch_size = n_way,
num_workers = 8,
shuffle = True,
drop_last = True)
test_loader = DataLoader(Omniglot(test_data_path, k_shot, q_query),
batch_size = n_way,
num_workers = 8,
shuffle = True,
drop_last = True)
train_iter = iter(train_loader)
val_iter = iter(val_loader)
test_iter = iter(test_loader)
"""初始化 model 和 optimizer"""
meta_model = Classifier(1, n_way).cuda()
optimizer = torch.optim.Adam(meta_model.parameters(), lr = meta_lr)
loss_fn = nn.CrossEntropyLoss().cuda()
"""這是一個用來抓一個 meta-batch 的 data 出來的 function"""
def get_meta_batch(meta_batch_size, k_shot, q_query, data_loader, iterator):
data = []
for _ in range(meta_batch_size):
try:
task_data = iterator.next() # 一筆 task_data 就是一個 task 裡面的 data,大小是 [n_way, k_shot+q_query, 1, 28, 28]
except StopIteration:
iterator = iter(data_loader)
task_data = iterator.next()
train_data = task_data[:, :k_shot].reshape(-1, 1, 28, 28)
val_data = task_data[:, k_shot:].reshape(-1, 1, 28, 28)
task_data = torch.cat((train_data, val_data), 0)
data.append(task_data)
return torch.stack(data).cuda(), iterator
"""開始 train!!!"""
for epoch in range(max_epoch):
print("Epoch %d" %(epoch))
train_meta_loss = []
train_acc = []
for step in tqdm(range(len(train_loader) // (meta_batch_size))): # 這裡的 step 是一次 meta-gradinet update step
x, train_iter = get_meta_batch(meta_batch_size, k_shot, q_query, train_loader, train_iter)
meta_loss, acc = MAML(meta_model, optimizer, x, n_way, k_shot, q_query, loss_fn, epcoh = epoch)
train_meta_loss.append(meta_loss.item())
train_acc.append(acc)
print(" Loss : ", np.mean(train_meta_loss))
print(" Accuracy: ", np.mean(train_acc))
# 每個 epoch 結束後,看看 validation accuracy 如何
# 助教並沒有做 early stopping,同學如果覺得有需要是可以做的
val_acc = []
for eval_step in tqdm(range(len(val_loader) // (eval_batches))):
x, val_iter = get_meta_batch(eval_batches, k_shot, q_query, val_loader, val_iter)
_, acc = MAML(meta_model, optimizer, x, n_way, k_shot, q_query, loss_fn, epcoh = epoch, inner_train_step = 3, train = False) # testing時,我們更新三次 inner-step
val_acc.append(acc)
print(" Validation accuracy: ", np.mean(val_acc))
torch.save(meta_model.state_dict(), os.path.join(model_path, 'model_b07701209.bin'))
|
[
"ch995308@gmail.com"
] |
ch995308@gmail.com
|
b6aae62636e47dfa3e6947450b42fa9406b95b58
|
818d3556aaf830f7a0711dea79c44f22a5d6a69e
|
/catalog/admin.py
|
0f7b15d1b76d9b889477e8d80eda953e19129a17
|
[] |
no_license
|
kelyip99/django_local_library
|
f3b1580535f811494e4332e5bf6edfa1302f985e
|
1f3fbcc247ba606f1fa872b2d3ceb353e4d5aa59
|
refs/heads/master
| 2020-05-30T11:12:11.991642
| 2019-06-01T06:27:38
| 2019-06-01T06:27:38
| 189,693,892
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,144
|
py
|
from django.contrib import admin
# Register your models here.
from catalog.models import Author, Genre, Book, BookInstance
#admin.site.register(Book)
#admin.site.register(Author)
admin.site.register(Genre)
#admin.site.register(BookInstance)
# Define the admin class
@admin.register(Author)
class AuthorAdmin(admin.ModelAdmin):
list_display = ('last_name', 'first_name', 'date_of_birth', 'date_of_death')
fields = ['first_name', 'last_name', ('date_of_birth', 'date_of_death')]
class BooksInstanceInline(admin.TabularInline):
model = BookInstance
# Register the Admin classes for Book using the decorator
@admin.register(Book)
class BookAdmin(admin.ModelAdmin):
list_display = ('title', 'author', 'display_genre')
inlines = [BooksInstanceInline]
# Register the Admin classes for BookInstance using the decorator
@admin.register(BookInstance)
class BookInstanceAdmin(admin.ModelAdmin):
list_filter = ('status', 'due_back')
fieldsets = (
(None, {
'fields': ('book', 'imprint', 'id')
}),
('Availability', {
'fields': ('status', 'due_back')
}),
)
|
[
"kelyip@gmail.com"
] |
kelyip@gmail.com
|
320d64cb2e3c2d21af72fd2be18bd590d13d625b
|
e0e948d55f8db8a6fcacd3ab2a7e0d1497a4e716
|
/file_instance.py
|
c2dc80e103466be521531bfa102e04b16fa701d9
|
[] |
no_license
|
jordsti/sufs
|
11f8c8f5f714761f3884675b8f63c88229cc25cb
|
b1838972e08777678587c2c717db11eb023f00ea
|
refs/heads/master
| 2016-09-06T21:12:35.329764
| 2014-07-21T22:02:45
| 2014-07-21T22:02:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,407
|
py
|
__author__ = 'JordSti'
import network
import block
class file_block(block.block):
def __init__(self, parent_hash, block_id, data=None):
block.block.__init__(self, parent_hash, block_id)
self.data = data
self.length = len(data)
class file_instance:
(DefaultBlockSize) = 1024
def __init__(self, file_entry, block_size=DefaultBlockSize):
self.entry = file_entry
self.block_size = block_size
self.__blocks = []
self.length = 0
self.hash = self.entry.get_hash() #todo need to verify that hash maybe. ?
self.__load_blocks()
def generate_file_info_packet(self):
p = network.packet()
p.header = network.packet_header()
p.header.packet_type = network.packet_header.FileInformation
p.header.fields['name'] = self.entry.name
p.header.fields['hash'] = self.hash
p.header.fields['length'] = self.length
b_str = ""
for b in self.each_blocks_length():
b_str += "%d," % b
b_str = b_str.rstrip(',')
p.header.fields['blocks'] = b_str
return p
def blocks_count(self):
return len(self.__blocks)
def each_blocks_length(self):
lengths = []
for b in self.__blocks:
lengths.append(b.length)
return lengths
def get_block(self, b_i):
return self.__blocks[b_i]
def get_block_packet(self, b_i):
b = self.__blocks[b_i]
p = network.packet()
p.header = network.packet_header()
p.header.packet_type = network.packet_header.FileBlock
p.header.length = b.length
p.header.fields['block_id'] = b.block_id
p.header.fields['parent_hash'] = self.hash
p.bytes = b.data
#print p.to_string()
return p
def __load_blocks(self):
fp = open(self.entry.get_fullpath(), 'rb')
chunk = fp.read(self.block_size)
self.length = 0
b_i = 0
while len(chunk) == self.block_size:
self.length += self.block_size
block = file_block(self.hash, b_i, chunk)
self.__blocks.append(block)
chunk = fp.read(self.block_size)
b_i += 1
if len(chunk) > 0:
block = file_block(self.hash, b_i, chunk)
self.length += len(chunk)
self.__blocks.append(block)
fp.close()
|
[
"jord52@gmail.com"
] |
jord52@gmail.com
|
273b210bcebf54dd3ed4b1884abd6bb9070894cb
|
ec7c4148725d68c7fe246619a422aeb1d7719d1e
|
/pyshadowsocks/protocol/shadowsocks/client.py
|
50b8ce6f92e6d807769017f7d80d38867dfdaf5c
|
[
"MIT"
] |
permissive
|
FTwOoO/pyShadowsocks
|
a1ff33796f327ebd7eba29ff910b9650143d1dd9
|
452323e30c4b97d322cbb67e9bbc7c4549e67b5f
|
refs/heads/master
| 2021-01-18T00:00:28.969382
| 2020-12-08T12:37:47
| 2020-12-08T12:37:47
| 53,716,642
| 21
| 7
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 932
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: booopooob@gmail.com
#
# Info: asyncio - Stream<https://docs.python.org/3/library/asyncio-stream.html#asyncio-tcp-echo-server-streams>
# - Transport<https://docs.python.org/3/library/asyncio-protocol.html>
#
from protocol.COMMON.common_client_relay_protocol import CommonClientRelayProtocol
from protocol.shadowsocks.encoder import ShadowsocksEncryptionWrapperEncoder
class ShadowsocksClientRelayProtocol(CommonClientRelayProtocol):
def create_encoder(self):
return ShadowsocksEncryptionWrapperEncoder(
encrypt_method=self.config.cipher_method,
password=self.config.password,
encript_mode=True)
def create_decoder(self):
return ShadowsocksEncryptionWrapperEncoder(
encrypt_method=self.config.cipher_method,
password=self.config.password,
encript_mode=False)
|
[
"booopooob@gmail.com"
] |
booopooob@gmail.com
|
a7cbe793db0ef9035c8f238811ce432679915a9c
|
001ee3277f57519d1639aa7702724232c1c4e948
|
/multipage_backup/app_pages/app3.py
|
1cffbceaab49f46c3814e92f1b8c79e4fb9dac48
|
[] |
no_license
|
kestefon/dev
|
a18c33e18a8ee8ffe41349b3d8441b28fead9b64
|
06f5045aa051e01eae1d794a3292c5e1d2292e42
|
refs/heads/master
| 2020-04-07T04:08:18.964640
| 2019-03-14T04:28:28
| 2019-03-14T04:28:28
| 158,041,333
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 275
|
py
|
import dash
import dash_core_components as dcc
import dash_html_components as html
layout = html.Div([
html.H3(children="App 3"),
dcc.Link('Go to App 1', href='/page-1'),
dcc.Link('Go to App 2', href='/page-2'),
dcc.Link('Go to App 3', href='/page-3')
])
|
[
"kestefon@gmail.com"
] |
kestefon@gmail.com
|
55ac6d7265c63689a96ee072219e7ee700d94fa1
|
eae704ccddad3e7774b8de47e6620aa55706be97
|
/Capture.py
|
56ea8be4a12733ccf1bdd3f8384d5d7b559e5203
|
[] |
no_license
|
mixify/Ptolemy
|
32b5478169c43571107d4cf279d71837eb8d37ec
|
682d5c6c30f58ccfddda8ad3439c136d55070e75
|
refs/heads/master
| 2020-04-27T17:36:40.288571
| 2019-06-18T12:31:13
| 2019-06-18T12:31:13
| 174,528,644
| 3
| 0
| null | 2019-03-09T09:18:28
| 2019-03-08T11:48:06
| null |
UTF-8
|
Python
| false
| false
| 796
|
py
|
import numpy as np
from PIL import ImageGrab
import cv2
import time
def process_img(image):
original_image = image
##convert to gray
processed_img = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
##edge detection
processed_img = cv2.Canny(processed_img, threshold1=200, threshold2=300)
return processed_img
def main():
last_time = time.time()
while (True):
screen = np.array(ImageGrab.grab())
##print('look took {} seconds'.format(time.time()-last_time))
last_time = time.time()
new_screen = process_img(screen)
cv2.imshow('window', new_screen)
## ##cv2.imshow('window', cv2.cvtColor(screen, cv2.COLOR_BGR2RGB))
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
main()
|
[
"ohsg0315@naver.com"
] |
ohsg0315@naver.com
|
52b20659db76ef82f748aba5040175eb212060a5
|
57094f0d09fd3e74eeb511e94400c3ec97051ad3
|
/Quax_dev_archive/integrals_dev/tei_trials/teis_trial2/custom_boys/primitive_trial2.py
|
b6f5e70300c1b4aa596fff3f12bbfd0543cd8f18
|
[] |
no_license
|
adabbott/Research_Notes
|
cccba246e81065dc4a663703fe225fc1ebbf806b
|
644394edff99dc6542e8ae6bd0ce8bcf158cff69
|
refs/heads/master
| 2023-05-12T20:26:58.938617
| 2021-06-02T17:15:35
| 2021-06-02T17:15:35
| 119,863,228
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,873
|
py
|
import jax
import jax.numpy as np
import numpy as onp
from functools import partial
from jax.config import config; config.update("jax_enable_x64", True)
_float = {onp.floating, jax.dtypes.bfloat16}
def boys(x):
return boys_p.bind(x)
boys_p = jax.core.Primitive('boys')
# evalutation rule of primitive
def boys_eval(x):
x = x + 1e-12
return 0.88622692545275798 * jax.lax.rsqrt(x) * jax.lax.erf(jax.lax.sqrt(x))
#def boys_jvp_rule(g, x):
# tmp = boys(x)
# result = jax.lax.select(x < 1e-8, (-0.3333333333333333333) + + (2 * x * 0.1) + -(3 * x**2 * 0.023809523809523808) + (4 * x**3 * 0.004629629629629629),
# jax.lax.div(-jax.lax.sub(tmp, jax.lax.exp(-x)), jax.lax.mul(jax.lax._const(x,2), x)))
# return result
#def boys_jvp_rule(g, x):
# tmp = boys(x)
# result = jax.lax.select(x < 1e-8, (-0.3333333333333333333) + (2 * x * 0.1) + -(3 * x**2 * 0.023809523809523808) + (4 * x**3 * 0.004629629629629629),
# jax.lax.div(-jax.lax.sub(tmp, jax.lax.exp(-x)), jax.lax.mul(jax.lax._const(x,2), x)))
# return result
def boys_jvp_rule(g, ans, x):
result = jax.lax.select(x < 1e-8, (-0.3333333333333333333) + (2 * x * 0.1) + -(3 * x**2 * 0.023809523809523808) + (4 * x**3 * 0.004629629629629629),
jax.lax.div(-jax.lax.sub(ans, jax.lax.exp(-x)), jax.lax.mul(jax.lax._const(x,2), x)))
return result
def f_vjp(x):
return boys(x), lambda g: (2 * g * x,)
jax.lax.lax.standard_unop(_float, 'boys')
boys_p.def_impl(boys_eval)
#jax.interpreters.ad.defjvp(boys_p, boys_jvp_rule)
# okay, defjvp2 assumes 3 arguments: tangent, result of original function, function argument
jax.interpreters.ad.defjvp2(boys_p, boys_jvp_rule)
jax.interpreters.ad.defvjp(boys_p, f_vjp)
print(boys(0.5))
print(jax.jacfwd(boys)(0.5))
print(jax.jacrev(boys)(0.5))
|
[
"adabbott@uga.edu"
] |
adabbott@uga.edu
|
c236f001913a5f71151ca7fa4dfda4d570104b86
|
42b71380ef5ea0fe904127bef483d7854facbd68
|
/blog/models.py
|
0eb6108265978aa2533da7c273d1edf8c25e73a8
|
[] |
no_license
|
akash2415/my-first-blog
|
86247b78ca35e1d1fe9eb859a73e9becdfcac3bb
|
f653dbaa8018f5c0175e43138f7251e33e1ab526
|
refs/heads/master
| 2020-03-14T03:57:51.272942
| 2018-05-30T17:00:31
| 2018-05-30T17:00:31
| 45,772,664
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 468
|
py
|
from django.db import models
from django.utils import timezone
class Post(models.Model):
author = models.ForeignKey('auth.User', on_delete=models.CASCADE)
title = models.CharField(max_length=200)
text = models.TextField()
created_date = models.DateTimeField(default=timezone.now)
published_date = models.DateTimeField(blank = True, null=True)
def publish(self):
self.published_date = timezone.now()
self.save()
def __str__(self):
return self.title
|
[
"akash2415@gmail.com"
] |
akash2415@gmail.com
|
a460fa5c367496aefb344eb50f9975890e0e03f5
|
c1eb833c4164b6d411cecc2d18edb959971b1395
|
/apps/operations/migrations/0001_initial.py
|
b93de47e36fbf0bc576c69924e7fada22253fd8c
|
[] |
no_license
|
Ylrving/Django_sxonline
|
90c848fdf36534509d7f75cd4bd5d4ee5587a95d
|
5f360ae2db59960e21793fa4a79e2a63892c038d
|
refs/heads/master
| 2020-05-23T22:53:08.894486
| 2019-05-16T09:00:04
| 2019-05-16T09:00:04
| 186,981,594
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,393
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2019-05-08 14:07
from __future__ import unicode_literals
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('course', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='CourseComments',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comments', models.CharField(max_length=200, verbose_name='评论')),
('add_time', models.DateTimeField(default=datetime.datetime.now, verbose_name='添加时间')),
('course', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='course.Course', verbose_name='课程')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='用户')),
],
options={
'verbose_name': '课程评论',
'verbose_name_plural': '课程评论',
},
),
migrations.CreateModel(
name='UserAsk',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20, verbose_name='姓名')),
('mobile', models.CharField(max_length=11, verbose_name='手机')),
('course_name', models.CharField(max_length=50, verbose_name='课程名')),
('add_time', models.DateTimeField(default=datetime.datetime.now, verbose_name='添加时间')),
],
options={
'verbose_name': '用户咨询',
'verbose_name_plural': '用户咨询',
},
),
migrations.CreateModel(
name='UserCourse',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('add_time', models.DateTimeField(default=datetime.datetime.now, verbose_name='添加时间')),
('course', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='course.Course', verbose_name='课程')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='用户')),
],
options={
'verbose_name': '用户课程',
'verbose_name_plural': '用户课程',
},
),
migrations.CreateModel(
name='UserFavorite',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fav_id', models.IntegerField(default=0, verbose_name='数据id')),
('fav_type', models.IntegerField(choices=[(1, '课程'), (2, '课程机构'), (3, '讲师')], default=1, verbose_name='收藏类型')),
('add_time', models.DateTimeField(default=datetime.datetime.now, verbose_name='添加时间')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='用户')),
],
options={
'verbose_name': '用户收藏',
'verbose_name_plural': '用户收藏',
},
),
migrations.CreateModel(
name='UserMessage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user', models.IntegerField(default=0, verbose_name='接受用户')),
('message', models.CharField(max_length=500, verbose_name='消息内容')),
('has_read', models.BooleanField(default=False, verbose_name='是否已读')),
('add_time', models.DateTimeField(default=datetime.datetime.now, verbose_name='添加时间')),
],
options={
'verbose_name': '用户消息',
'verbose_name_plural': '用户消息',
},
),
]
|
[
"670823709@qq.com"
] |
670823709@qq.com
|
06374d8b6babfcb333152e22478b38382e54f59b
|
ee1d3b46c1d5bfe3b262be15ee747b7ab16c722a
|
/random_problems/delete_str_repeats.py
|
0e43924e2b66ff06b3778a6a19f75da0ce7e56ac
|
[] |
no_license
|
razzlepdx/practice-algorithms
|
e45a21a061ebe02ff88c2c5c944c5cad3f91207e
|
ef8c5c5ea2340d698aafcd6f42b6153019428e24
|
refs/heads/master
| 2020-03-06T20:45:39.684838
| 2018-05-17T05:30:17
| 2018-05-17T05:30:17
| 127,061,607
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,524
|
py
|
'''
QUESTION: Given a string as your input, delete any reoccurring character,
and return the new string.
'''
# strategy 1 - iterate over string, utilize dictionary
def delete_repeat_chars(phrase):
""" Given a string, returns a new string that contains only the first
occurance of each letter. """
final_str = ''
letters = {}
for char in phrase:
if letters.get(char): # if already in dict, move on to next letter
continue
# otherwise, add char to dictionary and append to final_str
letters[char] = True
final_str += char
return final_str
print(delete_repeat_chars("aabbcc"))
#-------------------------------------------
# optimization - use set instead of dictionary, since value is not pertinent
def del_repeat_chars(phrase):
""" Given a string, returns a new string that contains only the first
occurance of each letter. """
final_str = ''
letters = set()
for char in phrase:
if char not in letters:
letters.add(char)
final_str += char
return final_str
print(del_repeat_chars("aabbcc"))
#------------------------------------------
# strategy 2 - split string, setify, and rejoin to return new string
def del_repeat_letters(phrase):
""" Given a string, returns a new strings that contains only one
occurance of each letter. """
final = set(list(phrase))
return "".join(final)
print(del_repeat_letters("aabbccdd"))
# cons: does not maintain order, no memory benefit
|
[
"rwilson.or@gmail.com"
] |
rwilson.or@gmail.com
|
5a6d4472e96309412ab947849c3e578304dea3cd
|
12d6e7075a8624047e78c719b1a0f2c81c08f842
|
/Python/3sum.py
|
6a2aa24c0e9aa76bd40b44e144a60195dad0def7
|
[
"MIT"
] |
permissive
|
gitPratikSingh/LeetCode-Solutions
|
5b0e98223b829f968a77fa0053bcc67cd8250b0e
|
0d86b9a6dccdabd0eb951095a736e729eb1080a2
|
refs/heads/master
| 2021-01-25T11:14:25.071566
| 2018-06-17T06:31:18
| 2018-06-17T06:31:18
| 123,382,073
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 977
|
py
|
class Solution:
def threeSum(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
nums.sort()
res = []
for i in range(len(nums)):
if i>0 and nums[i]==nums[i-1]:
continue
j=i+1
k=len(nums)-1
while j<k:
vsum = nums[i] + nums[j] + nums[k]
if vsum == 0:
res.append([nums[i], nums[j], nums[k]])
numj = nums[j]
while j<k and numj == nums[j]:
j += 1
numk = nums[k]
while k>j and numk == nums[k]:
k -= 1
elif vsum>0:
k -= 1
else:
j += 1
return res
|
[
"noreply@github.com"
] |
gitPratikSingh.noreply@github.com
|
b147fc402feea3e90f5771b924a0345cdfc6a316
|
73f5051a5413f9f82229421d50d5ae3af075a395
|
/openai/mybalance.py
|
9cd38cf7c1cbc1fad5af839f4c7966cf8d7c86d2
|
[] |
no_license
|
flaska/tensorflow
|
9a2704334a08b97aaddaaece9b28356f79800f43
|
0606858d3e9a0bfc1c2e46da19645709ccd8a023
|
refs/heads/master
| 2021-09-07T22:08:38.786701
| 2018-03-01T21:24:24
| 2018-03-01T21:24:24
| 116,314,248
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,344
|
py
|
import gym
import random
import numpy
import tflearn
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.estimator import regression
from statistics import mean, median
from collections import Counter
LR = 1e-3
env = gym.make('CartPole-v0')
env.reset();
goal_steps = 500
score_requirement = 50
initial_games = 10000
def random_games():
for episode in range(5):
env.reset();
for t in range(goal_steps):
env.render() # performance killer
action = env.action_space.sample() # takes random action
observation, reward, done, info = env.step(action)
print(observation)
if (done):
break
#random_games();
def initial_population():
training_data = []
for _ in range(initial_games):
score = 0
game_memory = []
for _ in range(goal_steps)
action = random.randrange(0,2)
observation, reward, done, info = env.step(action)
game_memory.append([observation, action])
score += reward
if done:
break
if score >= score_requirement
training_data.append([data[0], output])
env.reset()
scores.append(score)
training_data_save = np.array(training_data)
np.save('saved.npy', training_data_save)
print('Average accepted score', mean(accepted_scores))
print('Median accepted score', median(accepted_scores))
print(Counter(accepted_scores))
|
[
"jakub.flaska@nih.gov"
] |
jakub.flaska@nih.gov
|
4c8cb1e79b3b034781b6d1a4bfdeb60f2c91a057
|
63432112bb1f72465e04771dbdabb050a44ce025
|
/eventanalyzer/management/commands/reportsexecute.py
|
ce93a426931724b598b16ca398b7d2523dbc3333
|
[] |
no_license
|
MichalMaM/Django-event-analyzer
|
1e0b9f4a384ee6bf54070941959969dbb90d3958
|
e2181b0dbbb16c6a4748c94bbc5197cc62aca2ea
|
refs/heads/master
| 2021-01-01T19:23:54.018055
| 2011-04-20T09:13:39
| 2011-04-20T09:13:39
| 1,053,731
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 476
|
py
|
"""
This file execute reports that are saved in db
"""
from django.core.management.base import BaseCommand, CommandError
from eventanalyzer.jobs import create_reports
class Command(BaseCommand):
args = '<>'
help = 'execute saved periodic reports'
def handle(self, *args, **options):
"""
execute saved periodic reports
"""
if not create_reports():
raise CommandError('error - in execute priodic reports')
print'execute reports successfull'
|
[
"michal.dub@centrumholdings.com"
] |
michal.dub@centrumholdings.com
|
ef48691497b882b63085622f491b55a4f665df1c
|
c6f06ee1506c064c12ba55d7a90f4f148fd5127a
|
/flask-api/app.py
|
574718689dc4432b1e048d4e67dd954c77db1ef2
|
[] |
no_license
|
ashleylobo/Restaurant-Recommendation
|
2fc6f0168c8c038a4759d0895674e3bbe2cb3cdf
|
6e1251515e1fe98121070b77e650fa068a056b0e
|
refs/heads/master
| 2022-12-10T21:48:23.125071
| 2019-04-21T17:53:55
| 2019-04-21T17:53:55
| 179,105,307
| 0
| 0
| null | 2022-12-08T04:56:41
| 2019-04-02T15:18:03
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,388
|
py
|
import flask
import os
from flask import jsonify, request
from flask import flash, redirect, url_for, session
from joblib import load
from flask_cors import CORS, cross_origin
import requests, json
import pandas as pd
import requests
predictions = pd.read_csv("predictionApi.csv",index_col=0)
prof = pd.read_csv("profileApi.csv")
hotelData = pd.read_csv("hotels-dataset.csv")
hotelData.set_index('id', inplace=True)
prof.set_index('userID', inplace=True)
app = flask.Flask(__name__)
app.config["DEBUG"] = True
app.secret_key = 'super secret key'
cors = CORS(app, resources={r"/*": {"origins": "*"}})
@app.route('/test', methods=['GET','POST'])
def test():
print(prof.head())
data = [ 1 , 2 , "Buckle My Shoe" , 3 , 4 , "Shut the Door" ]
return jsonify( data )
@app.route('/predict', methods=['GET'])
def predict():
# print( json.dumps( request.json['data'] ) )
try :
print("hi")
user = request.args.get('user')
myProfile = {
"U1001" : {
"name" : "Riya Patil",
"password" : "123"
},
"U1002" : {
"name" : "Prachiti Patil",
"password" : "123"
},
"U1003" : {
"name" : "Amey Patil",
"password" : "123"
},
"U1004" : {
"name" : "Priyanka Patil",
"password" : "123"
}
}
userData = ( prof.loc[user , :].to_json() )
userData = json.loads(userData.replace("\'", '"'))
hotelList = predictions[user]
# print(hotelList[:10])
arr = ( hotelList.sort_values(ascending=False)[:10].index )
# print(arr)
hotels = hotelData.loc[ arr , : ].to_dict('records')
# hotels = json.loads(hotels.replace("\'", '"'))
print(hotels)
# hot = {}
# i = 0
# for row in hotels:
# hot[i] = row
# i += 1
# # print(hot)
return jsonify( { "userData" : userData, "profile" : myProfile[user],"hotels" : hotels , "status" : True } )
except Exception as e:
return jsonify( { "result" : "error" , "status" : False } )
@app.route('/', methods=['GET'])
def home():
print("loaded")
return "Welcome to My API"
if __name__ == '__main__':
app.run()
|
[
"yenwiikae@gmail.com"
] |
yenwiikae@gmail.com
|
3a31fc2dd4edca530b34791eb9d6e5597e8e55b0
|
9ae6ce54bf9a2a86201961fdbd5e7b0ec913ff56
|
/google/ads/googleads/v9/services/services/ad_group_feed_service/transports/__init__.py
|
32e70d411d86385df4f1f48cce1526a3dd19af67
|
[
"Apache-2.0"
] |
permissive
|
GerhardusM/google-ads-python
|
73b275a06e5401e6b951a6cd99af98c247e34aa3
|
676ac5fcb5bec0d9b5897f4c950049dac5647555
|
refs/heads/master
| 2022-07-06T19:05:50.932553
| 2022-06-17T20:41:17
| 2022-06-17T20:41:17
| 207,535,443
| 0
| 0
|
Apache-2.0
| 2019-09-10T10:58:55
| 2019-09-10T10:58:55
| null |
UTF-8
|
Python
| false
| false
| 1,051
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from typing import Dict, Type
from .base import AdGroupFeedServiceTransport
from .grpc import AdGroupFeedServiceGrpcTransport
# Compile a registry of transports.
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[AdGroupFeedServiceTransport]]
_transport_registry["grpc"] = AdGroupFeedServiceGrpcTransport
__all__ = (
"AdGroupFeedServiceTransport",
"AdGroupFeedServiceGrpcTransport",
)
|
[
"noreply@github.com"
] |
GerhardusM.noreply@github.com
|
932ec679b52ef439e98647e518026b2e061da703
|
c13e453358cc87050fd196319576fbc392fbeee3
|
/Generator/genProgramming/p10.py
|
125b038e6a3c2850db9e16eb8de2b75cb30d797f
|
[] |
no_license
|
ChrisJaunes/generating_test_paper
|
4eabcc60a28318ec24901e220a268e0dccaf69ce
|
e29eb725331cea807b3aeef908ab9c9e552b0ec4
|
refs/heads/main
| 2023-04-22T18:44:27.629498
| 2021-05-11T08:47:56
| 2021-05-11T08:47:56
| 362,047,858
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,320
|
py
|
import random
import string
from sample import ProgrammingProblemDesc, ProgrammingProblemSTD
def generatorProblemDesc() -> ProgrammingProblemDesc:
return ProgrammingProblemDesc("将数字用字符代替", """
给你一个下标从 0开始的字符串 s,它的偶数下标处为小写英文字母,奇数下标处为数字。
定义一个函数shift(c, x),其中c是一个字符且x是一个数字,函数返回字母表中c后面第 x个字符。
比方说,shift('a', 5) = 'f'和shift('x', 0) = 'x'。
对于每个 奇数下标i,你需要将数字s[i] 用shift(s[i-1], s[i])替换。
请你替换所有数字以后,将字符串 s返回。题目 保证shift(s[i-1], s[i])不会超过 'z'。
示例 1:
输入:a1c1e1
输出:abcdef
解释:数字被替换结果如下:
- s[1] -> shift('a',1) = 'b'
- s[3] -> shift('c',1) = 'd'
- s[5] -> shift('e',1) = 'f'
示例 2:
输入:a1b2c3d4e
输出:abbdcfdhe
解释:数字被替换结果如下:
- s[1] -> shift('a',1) = 'b'
- s[3] -> shift('b',2) = 'd'
- s[5] -> shift('c',3) = 'f'
- s[7] -> shift('d',4) = 'h'
提示:
1 <= s.length <= 100
s只包含小写英文字母和数字。
对所有 奇数 下标处的i,满足shift(s[i-1], s[i]) <= 'z'。
""")
def generatorProblemSTD() -> ProgrammingProblemSTD:
return ProgrammingProblemSTD("Java", """
import java.io.*;
import java.util.*;
public class Solution {
static public String replaceDigits(String s) {
char[] a=s.toCharArray();
char temp='a';
for(int i=0;i<a.length;i++){
if(a[i] <48 || a[i] >57 ){
temp=a[i];
}else{
a[i] = (char)(temp + Integer.parseInt(a[i]+""));
}
}
return String.valueOf(a);
}
public static void main(String[] args) {
Scanner in = new Scanner(System.in);
String s = in.next();
System.out.println(replaceDigits(s));
}
}
""")
def generatorProblemTestSingle(f, seed: int = 2021):
random.seed(seed)
n = random.randint(1, 100)
mix_string = []
for i in range(n):
if i % 2:
mix_string.append(random.sample('0123456789', 1)[0])
else:
mix_string.append(random.choice(string.ascii_lowercase))
print("".join(temp for temp in mix_string), file=f)
|
[
"201930343469@mail.scut.edu.cn"
] |
201930343469@mail.scut.edu.cn
|
9d7abb05c706a4078cc1f7b3d0acded44fe1087d
|
3943fb2652d754bbba4b1052a2a996578f92184e
|
/python/week 1/3day/exp/ex1.py
|
dde253e63eecbee2cc75ab072279adbce3840c40
|
[] |
no_license
|
esthergoldman/DI_Bootcamp
|
54fe2e140858600dcf33a46a954ed9221f8524b7
|
2f02ceb349e8239bb46ffcccfd4a15b1213e3656
|
refs/heads/master
| 2023-04-09T14:12:36.747406
| 2021-04-17T07:50:55
| 2021-04-17T07:50:55
| 355,103,383
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 210
|
py
|
keys = ['Ten', 'Twenty', 'Thirty']
values = [10, 20, 30]
zip_list = zip(keys,values)
print(zip_list)
num_dict = {}
for info in zip_list:
#print(info)
num_dict[info[0]] = info[1]
print(num_dict)
|
[
"esthergoldman@gmail.com"
] |
esthergoldman@gmail.com
|
4be43c66932644e65085748949e25ae90c6a63b2
|
8ce09769f4d9858cb02f5263a75d12ea11b68c65
|
/venv/bin/distro
|
65966f188d9325591927ed9aad569d7d2c416276
|
[] |
no_license
|
rabbanimd/ipFinder-webapp-Flask-RESTapi
|
50c16ae321cdb3a358b6734d0cb83df3aceba880
|
247e0e38e729b8be6648a8d34d147dbf449df2ff
|
refs/heads/master
| 2023-01-14T06:11:11.039045
| 2020-11-15T13:08:32
| 2020-11-15T13:08:32
| 312,975,537
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 246
|
#!/home/c0rt3s/PycharmProjects/flaskProject/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from distro import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"c0rt3s@k4l1.k4l1"
] |
c0rt3s@k4l1.k4l1
|
|
2f64e51170ad09aae09260381e2fcf5f84d80675
|
3cf5fd02e548f55d756acd672dd2e5b113343d10
|
/src/cormorant/models/cormorant_lep.py
|
9c912782a6b8a01bbd34777a032234a3154cf0de
|
[] |
no_license
|
drorlab/cormorant
|
e6a196320dd8ca124283aa602442d639b4a4682c
|
3920a8f453c9c71c09b20c92c99fd0e9eec265ce
|
refs/heads/master
| 2023-05-08T03:04:59.271118
| 2021-05-15T08:02:18
| 2021-05-15T08:02:18
| 265,646,271
| 2
| 4
| null | 2020-05-20T17:59:50
| 2020-05-20T17:59:49
| null |
UTF-8
|
Python
| false
| false
| 10,182
|
py
|
import torch
import torch.nn as nn
import logging
from cormorant.cg_lib import CGModule, SphericalHarmonicsRel
from cormorant.models.cormorant_cg import CormorantCG
from cormorant.nn import RadialFilters
from cormorant.nn import InputLinear, InputMPNN
from cormorant.nn import OutputLinear, OutputPMLP, OutputSoftmax, GetScalarsAtom
from cormorant.nn import NoLayer
class CormorantLEP(CGModule):
"""
Basic Cormorant Network used to train on BDBBind.
Parameters
----------
maxl : :obj:`int` of :obj:`list` of :obj:`int`
Maximum weight in the output of CG products. (Expanded to list of
length :obj:`num_cg_levels`)
max_sh : :obj:`int` of :obj:`list` of :obj:`int`
Maximum weight in the output of the spherical harmonics (Expanded to list of
length :obj:`num_cg_levels`)
num_cg_levels : :obj:`int`
Number of cg levels to use.
num_channels : :obj:`int` of :obj:`list` of :obj:`int`
Number of channels that the output of each CG are mixed to (Expanded to list of
length :obj:`num_cg_levels`)
num_species : :obj:`int`
Number of species of atoms included in the input dataset.
device : :obj:`torch.device`
Device to initialize the level to
dtype : :obj:`torch.dtype`
Data type to initialize the level to level to
cg_dict : :obj:`nn.cg_lib.CGDict`
"""
def __init__(self, maxl, max_sh, num_cg_levels, num_channels, num_species,
cutoff_type, hard_cut_rad, soft_cut_rad, soft_cut_width,
weight_init, level_gain, charge_power, basis_set,
charge_scale, gaussian_mask, #top, input, num_mpnn_layers,
activation='leakyrelu', num_classes=2, cgprod_bounded=False,
cg_agg_normalization='none', cg_pow_normalization='none',
device=None, dtype=None, cg_dict=None):
logging.info('Initializing network!')
level_gain = expand_var_list(level_gain, num_cg_levels)
hard_cut_rad = expand_var_list(hard_cut_rad, num_cg_levels)
soft_cut_rad = expand_var_list(soft_cut_rad, num_cg_levels)
soft_cut_width = expand_var_list(soft_cut_width, num_cg_levels)
maxl = expand_var_list(maxl, num_cg_levels)
max_sh = expand_var_list(max_sh, num_cg_levels)
num_channels = expand_var_list(num_channels, num_cg_levels+1)
logging.info('hard_cut_rad: {}'.format(hard_cut_rad))
logging.info('soft_cut_rad: {}'.format(soft_cut_rad))
logging.info('soft_cut_width: {}'.format(soft_cut_width))
logging.info('maxl: {}'.format(maxl))
logging.info('max_sh: {}'.format(max_sh))
logging.info('num_channels: {}'.format(num_channels))
super().__init__(maxl=max(maxl+max_sh), device=device, dtype=dtype, cg_dict=cg_dict)
device, dtype, cg_dict = self.device, self.dtype, self.cg_dict
self.num_cg_levels = num_cg_levels
self.num_channels = num_channels
self.charge_power = charge_power
self.charge_scale = charge_scale
self.num_species = num_species
# Set up spherical harmonics
self.sph_harms = SphericalHarmonicsRel(max(max_sh), conj=True,
device=device, dtype=dtype, cg_dict=cg_dict)
# Set up position functions, now independent of spherical harmonics
self.rad_funcs = RadialFilters(max_sh, basis_set, num_channels, num_cg_levels,
device=self.device, dtype=self.dtype)
tau_pos = self.rad_funcs.tau
num_scalars_in = self.num_species * (self.charge_power + 1)
num_scalars_out = num_channels[0]
self.input_func_atom = InputLinear(num_scalars_in, num_scalars_out,
device=self.device, dtype=self.dtype)
self.input_func_edge = NoLayer()
tau_in_atom = self.input_func_atom.tau
tau_in_edge = self.input_func_edge.tau
self.cormorant_cg = CormorantCG(maxl, max_sh, tau_in_atom, tau_in_edge,
tau_pos, num_cg_levels, num_channels, level_gain, weight_init,
cutoff_type, hard_cut_rad, soft_cut_rad, soft_cut_width,
cat=True, gaussian_mask=False,
cgprod_bounded=cgprod_bounded,
cg_agg_normalization=cg_agg_normalization,
cg_pow_normalization=cg_pow_normalization,
device=self.device, dtype=self.dtype, cg_dict=self.cg_dict)
tau_cg_levels_atom = self.cormorant_cg.tau_levels_atom
tau_cg_levels_edge = self.cormorant_cg.tau_levels_edge
self.get_scalars_atom = GetScalarsAtom(tau_cg_levels_atom,
device=self.device, dtype=self.dtype)
self.get_scalars_edge = NoLayer()
num_scalars_atom = self.get_scalars_atom.num_scalars
num_scalars_edge = self.get_scalars_edge.num_scalars
self.output_layer_atom = OutputSoftmax(num_scalars_atom, num_classes, bias=True,
device=self.device, dtype=self.dtype)
self.output_layer_edge = NoLayer()
logging.info('Model initialized. Number of parameters: {}'.format(
sum([p.nelement() for p in self.parameters()])))
def forward_once(self, data):
"""
Runs a single forward pass of the network.
Parameters
----------
data : :obj:`dict`
Dictionary of data to pass to the network.
covariance_test : :obj:`bool`, optional
If true, returns all of the atom-level representations twice.
Returns
-------
prediction : :obj:`torch.Tensor`
The output of the layer
"""
# Get and prepare the data
atom_scalars, atom_mask, edge_scalars, edge_mask, atom_positions = self.prepare_input(data)
# Calculate spherical harmonics and radial functions
spherical_harmonics, norms = self.sph_harms(atom_positions, atom_positions)
rad_func_levels = self.rad_funcs(norms, edge_mask * (norms > 0))
# Prepare the input reps for both the atom and edge network
atom_reps_in = self.input_func_atom(atom_scalars, atom_mask, edge_scalars, edge_mask, norms)
edge_net_in = self.input_func_edge(atom_scalars, atom_mask, edge_scalars, edge_mask, norms)
# Clebsch-Gordan layers central to the network
atoms_all, edges_all = self.cormorant_cg(atom_reps_in, atom_mask, edge_net_in, edge_mask,
rad_func_levels, norms, spherical_harmonics)
# Construct scalars for network output
atom_scalars = self.get_scalars_atom(atoms_all)
edge_scalars = self.get_scalars_edge(edges_all)
# Prediction in this case will depend only on the atom_scalars. Can make
# it more general here.
prediction = self.output_layer_atom(atom_scalars, atom_mask)
return prediction, atoms_all, edges_all
def forward(self, data, covariance_test=False):
"""
Runs a single forward pass of the network.
Parameters
----------
data : :obj:`dict`
Dictionary of data to pass to the network.
covariance_test : :obj:`bool`, optional
If true, returns all of the atom-level representations twice.
Returns
-------
prediction : :obj:`torch.Tensor`
The output of the network
"""
data1 = {}
data2 = {}
data1['label'] = data['label']
data2['label'] = data['label']
data1['charges'] = data['charges1']
data2['charges'] = data['charges2']
data1['positions'] = data['positions1']
data2['positions'] = data['positions2']
data1['one_hot'] = data['one_hot1']
data2['one_hot'] = data['one_hot2']
data1['atom_mask'] = data['atom_mask1']
data2['atom_mask'] = data['atom_mask2']
data1['edge_mask'] = data['edge_mask1']
data2['edge_mask'] = data['edge_mask2']
prediction1, atoms_all1, edges_all1 = self.forward_once(data1)
prediction2, atoms_all2, edges_all2 = self.forward_once(data2)
prediction = (prediction2 - prediction1)**2
# Covariance test
if covariance_test:
return prediction, atoms_all1, edges_all1
else:
return prediction
def prepare_input(self, data):
"""
Extracts input from data class
Parameters
----------
data : ?????
Information on the state of the system.
Returns
-------
atom_scalars : :obj:`torch.Tensor`
Tensor of scalars for each atom.
atom_mask : :obj:`torch.Tensor`
Mask used for batching data.
atom_positions: :obj:`torch.Tensor`
Positions of the atoms
edge_mask: :obj:`torch.Tensor`
Mask used for batching data.
"""
charge_power, charge_scale, device, dtype = self.charge_power, self.charge_scale, self.device, self.dtype
atom_positions = data['positions'].to(device, dtype)
one_hot = data['one_hot'].to(device, dtype)
charges = data['charges'].to(device, dtype)
atom_mask = data['atom_mask'].to(device)
edge_mask = data['edge_mask'].to(device)
charge_tensor = (charges.unsqueeze(-1)/charge_scale).pow(torch.arange(charge_power+1., device=device, dtype=dtype))
charge_tensor = charge_tensor.view(charges.shape + (1, charge_power+1))
atom_scalars = (one_hot.unsqueeze(-1) * charge_tensor).view(charges.shape[:2] + (-1,))
edge_scalars = torch.tensor([])
return atom_scalars, atom_mask, edge_scalars, edge_mask, atom_positions
def expand_var_list(var, num_cg_levels):
if type(var) is list:
var_list = var + (num_cg_levels-len(var))*[var[-1]]
elif type(var) in [float, int]:
var_list = [var] * num_cg_levels
else:
raise ValueError('Incorrect type {}'.format(type(var)))
return var_list
|
[
"martinvoegele1989@gmail.com"
] |
martinvoegele1989@gmail.com
|
54ef8914727cd09544ff6c9b95288d9a954859c4
|
c19ba27d1a4aa2615e831e72f84dab05d37210c9
|
/single_byte_xor_cipher.py
|
07c55b9fcfc1c19557fdb37eee039e915b35b3c7
|
[] |
no_license
|
starVader/cryptography
|
5c1b2164b16078b4405943101c12d70a3ae0cfda
|
b64ab8e290c108e2256ba0909bdc87524065b43f
|
refs/heads/master
| 2020-09-05T03:05:25.701440
| 2019-11-06T11:03:29
| 2019-11-06T11:03:29
| 219,964,020
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 419
|
py
|
from fixed_xor import calculate_xor
def single_byte_xor_cipher(output):
alphabets = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't',
'u', 'v', 'w', 'x', 'y', 'z']
if __name__ == '__main__':
hex_enc_xored_str = "1b37373331363f78151b7f2b783431333d78397828372d363c78373e783a393b3736"
single_byte_xor_cipher(hex_enc_xored_str)
|
[
"rakesh@coffeebeans.io"
] |
rakesh@coffeebeans.io
|
6ff1199e076538633df47e9a7c40ce99750acf2d
|
f12907ab992b85a5e7e19953fbe1dab2305c8d2d
|
/CosasDeCasa/UserModeLoader/generator_data_file.py
|
3f4db3fabc0be714b5f7d61b030e32457fa3275c
|
[] |
no_license
|
Fare9/SomeVirusesTechniques
|
c460364c6b612d1c64fb88363da06ba48d312736
|
ca452c14de71ea11828aa7171562982a9dfff5aa
|
refs/heads/master
| 2020-05-30T17:46:33.427451
| 2019-06-23T11:54:09
| 2019-06-23T11:54:09
| 189,882,986
| 6
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,981
|
py
|
#!python3
#-*- coding: utf-8 -*-
__author__ = "Fare9"
__credits__ = ["Fare9"]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Fare9"
__email__ = "farenain9@gmail.com"
__status__ = "Production"
import os # standard library
import sys
import random
import string
file_name = "data.cpp"
file_to_generate = '''
#include "common.h"
std::string key = "%s"
uint64_t file_size = 0x%X;
uint8_t encrypted_file[] = {%s};
'''
def randomString(stringLength=10):
"""Generate a random string of fixed length """
letters = string.ascii_letters + string.hexdigits
return ''.join(random.choice(letters) for i in range(stringLength))
def crypt(key, data):
S = list(range(256))
j = 0
for i in list(range(256)):
j = (j + S[i] + ord(key[i % len(key)])) % 256
S[i], S[j] = S[j], S[i]
j = 0
y = 0
out = []
for byte in data:
j = (j + 1) % 256
y = (y + S[j]) % 256
S[j], S[y] = S[y], S[j]
if sys.version_info.major == 2:
out.append(unichr(ord(byte) ^ S[(S[j] + S[y]) % 256]))
if sys.version_info.major == 3:
out.append(byte ^ S[(S[j] + S[y]) % 256])
print("Real data = ")
for a in data[0:10]:
sys.stdout.write('%X ' % a)
print ("")
print ("Encrypted data = ")
for a in out[0:10]:
sys.stdout.write('%X ' % a)
print("")
print ("Key = %s" % key)
return out
def read_file_and_get_data(file_to_open=""):
'''
Method to read the file and generate the data to generate the file
'''
exists = os.path.isfile(file_to_open)
file_size = 0
file_content = ""
key = ""
if exists:
if (file_to_open.endswith('.dll')):
file_ = open(file_to_open,'rb')
data = file_.read()
file_size = len(data)
file_.close()
counter = 0
key = randomString(15)
data = crypt(key,data)
for c in data:
file_content += '0x%X,' % c
counter += 1
if counter == 10:
file_content += '\n'
counter = 0
if file_content[-1] == ',':
file_content = file_content[0:-1]
else:
print ('File must be .dll file')
else:
print ("File '%s' does not exists..." % (file_to_open))
return file_size, file_content, key
def main():
if len(sys.argv) != 2:
print ("USAGE: generator_data_file.py <dll_file_to_generate_data>")
sys.exit(-1)
file_size, file_content, key = read_file_and_get_data(str(sys.argv[1]))
if file_size == 0 or file_content == "":
print ("Error generating data file")
sys.exit(-1)
data_file_content = file_to_generate % (key, file_size, file_content)
opened_file = open(file_name, 'w')
opened_file.write(data_file_content)
opened_file.close()
if __name__ == '__main__':
main()
|
[
"eduardo.blazquez@edu.uah.es"
] |
eduardo.blazquez@edu.uah.es
|
788d31e1c22321163e5c3a2f40bc386cceba1c6c
|
0e6bdf6801934f7e5374c47159e8c7f5925cb95d
|
/src/utils/logger.py
|
139853abd0d6815d7af2a67907f3b7b6d1f7411f
|
[] |
no_license
|
HawChang/learn_tf2
|
5c8029cea339410f44e5f6c7b51b113e9ce326c1
|
21de6d6659281542264c20dd1a58a845214fdb3d
|
refs/heads/master
| 2021-03-26T13:07:14.187899
| 2020-03-31T03:43:46
| 2020-03-31T03:43:46
| 247,706,742
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 850
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/3/16 3:49 PM
# @Author : ZhangHao
# @File : logger.py
# @Desc :
import logging
"""
Logger的级别:
1. DEBUG
2. INFO
3. WARNING
4. ERROR
5. CRITICAL
"""
class Logger(object):
_is_init = False
def __init__(self):
if not self._is_init:
logging.basicConfig(
# filename="log/run.log",
level=logging.DEBUG,
format="[%(asctime)s][%(filename)s:%(funcName)s:%(lineno)s][%(levelname)s]:%(message)s",
datefmt='%Y-%m-%d %H:%M:%S')
# ch = logging.StreamHandler()
self.logger = logging.getLogger()
# self.logger.addHandler(ch)
self._is_init = True
def get_logger(self):
return self.logger
if __name__ == "__main__":
pass
|
[
"changhaw@126.com"
] |
changhaw@126.com
|
96ec42e448b7eeb8921d53eb0ed2c1f012f9e714
|
f0d0ea29240c53b6ce1c4b06095b528ece02fdd7
|
/apps/config/migrations/0009_mail.py
|
f05d496f1a8ec6c6047e85a0dbb57b16caf33448
|
[] |
no_license
|
zhifuliu/dianjing
|
477529ccd6159329e1bc121aeb2ff328ee499f4a
|
7b3f6d58f5bc0738651d8d72c9a24df4ade0ed36
|
refs/heads/master
| 2020-03-21T09:10:28.343268
| 2017-03-24T03:06:24
| 2017-03-24T03:06:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,492
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-21 09:17
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
import re
class Migration(migrations.Migration):
dependencies = [
('config', '0008_auto_20161020_1321'),
]
operations = [
migrations.CreateModel(
name='Mail',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255, verbose_name=b'\xe6\xa0\x87\xe9\xa2\x98')),
('content', models.TextField(verbose_name=b'\xe5\x86\x85\xe5\xae\xb9')),
('items', models.TextField(blank=True, verbose_name=b'\xe9\x99\x84\xe4\xbb\xb6')),
('send_at', models.DateTimeField(db_index=True, verbose_name=b'\xe5\x8f\x91\xe9\x80\x81\xe6\x97\xb6\xe9\x97\xb4')),
('condition_type', models.IntegerField(choices=[(1, b'\xe5\x85\xa8\xe9\x83\xa8\xe6\x9c\x8d\xe5\x8a\xa1\xe5\x99\xa8'), (2, b'\xe6\x8c\x87\xe5\xae\x9a\xe6\x9c\x8d\xe5\x8a\xa1\xe5\x99\xa8'), (3, b'\xe6\x8e\x92\xe9\x99\xa4\xe6\x8c\x87\xe5\xae\x9a\xe6\x9c\x8d\xe5\x8a\xa1\xe5\x99\xa8'), (11, b'\xe6\x8c\x87\xe5\xae\x9a\xe8\xa7\x92\xe8\x89\xb2ID')], verbose_name=b'\xe5\x8f\x91\xe9\x80\x81\xe6\x9d\xa1\xe4\xbb\xb6')),
('condition_value', models.CharField(blank=True, max_length=255, null=True, validators=[django.core.validators.RegexValidator(re.compile('^\\d+(?:\\,\\d+)*\\Z'), code='invalid', message='Enter only digits separated by commas.')], verbose_name=b'\xe6\x9d\xa1\xe4\xbb\xb6\xe5\x80\xbcID\xe5\x88\x97\xe8\xa1\xa8')),
('condition_club_level', models.PositiveIntegerField(blank=True, null=True, verbose_name=b'\xe4\xbf\xb1\xe4\xb9\x90\xe9\x83\xa8\xe7\xad\x89\xe7\xba\xa7\xe5\xa4\xa7\xe4\xba\x8e\xe7\xad\x89\xe4\xba\x8e')),
('condition_vip_level', models.PositiveIntegerField(blank=True, null=True, verbose_name=b'VIP\xe7\xad\x89\xe7\xba\xa7\xe5\xa4\xa7\xe4\xba\x8e\xe7\xad\x89\xe4\xba\x8e')),
('condition_login_at_1', models.DateTimeField(blank=True, null=True, verbose_name=b'\xe7\x99\xbb\xe9\x99\x86\xe6\x97\xb6\xe9\x97\xb4\xe5\xa4\xa7\xe4\xba\x8e\xe7\xad\x89\xe4\xba\x8e')),
('condition_login_at_2', models.DateTimeField(blank=True, null=True, verbose_name=b'\xe7\x99\xbb\xe9\x99\x86\xe6\x97\xb6\xe9\x97\xb4\xe5\xb0\x8f\xe4\xba\x8e\xe7\xad\x89\xe4\xba\x8e')),
('condition_exclude_chars', models.CharField(blank=True, max_length=255, null=True, validators=[django.core.validators.RegexValidator(re.compile('^\\d+(?:\\,\\d+)*\\Z'), code='invalid', message='Enter only digits separated by commas.')], verbose_name=b'\xe6\x8e\x92\xe9\x99\xa4\xe8\xa7\x92\xe8\x89\xb2ID\xe5\x88\x97\xe8\xa1\xa8')),
('create_at', models.DateTimeField(auto_now_add=True, verbose_name=b'\xe5\x88\x9b\xe5\xbb\xba\xe6\x97\xb6\xe9\x97\xb4')),
('status', models.IntegerField(choices=[(0, b'\xe7\xad\x89\xe5\xbe\x85'), (1, b'\xe6\xad\xa3\xe5\x9c\xa8\xe5\x8f\x91\xe9\x80\x81'), (2, b'\xe5\xae\x8c\xe6\x88\x90'), (3, b'\xe5\xa4\xb1\xe8\xb4\xa5')], db_index=True, default=0, verbose_name=b'\xe7\x8a\xb6\xe6\x80\x81')),
],
options={
'db_table': 'mail',
'verbose_name': '\u90ae\u4ef6',
'verbose_name_plural': '\u90ae\u4ef6',
},
),
]
|
[
"yueyoum@gmail.com"
] |
yueyoum@gmail.com
|
78eedfc25bf4d66206ca15b411bd9d49ddb21226
|
233f2321abd301b52ed5a22ae191ae82ce71e4e4
|
/app/__init__.py
|
5406bc31951e69021e61f49c8c276f8d08455cdf
|
[] |
no_license
|
kishoresvk21/tech_support
|
f7ee1d23eb80eae2e5215c9c122e4f5b07394509
|
3577bc00c79ce4388809801be6cbce9ca60373d8
|
refs/heads/main
| 2023-08-27T03:02:22.675670
| 2021-11-02T04:40:56
| 2021-11-02T04:40:56
| 416,664,375
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,659
|
py
|
from flask_sqlalchemy import SQLAlchemy
from flask import Flask
from flask_restplus import Api
from flask_cors import CORS
from flask_migrate import Migrate
app = Flask(__name__)
CORS(app)
cors = CORS(app, resources={r"": {"origins": ""}, })
app.config['SECRET_KEY'] = 'rmijlkqqqawtre@1((11'
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://root:Root#123@localhost/tech_support_database'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS']=False
db = SQLAlchemy(app)
migrate=Migrate(app,db)
api = Api(app)
#USER APIs
from app.user.users.views import Login,Register,UpdatePassword,ForgotPassword,Logout,GetProfile,UserProfile
api.add_resource(Login, "/login")
api.add_resource(Register,"/register")
api.add_resource(Logout,"/logout")
api.add_resource(UpdatePassword,"/changepassword") #profile/changepassword
api.add_resource(ForgotPassword,"/forgotpassword")
api.add_resource(GetProfile,"/getprofile/user/<int:user_id>")
api.add_resource(UserProfile,"/editprofile")
api.add_resource(UserProfile,"/editprofile")
from app.user.queries.views import QueriesClass,GetQueryByUserId,GetQueryByTechnology,GetQueryByTitle
api.add_resource(QueriesClass,"/query")
api.add_resource(GetQueryByUserId,"/getqueries/user/<int:user_id>")
api.add_resource(GetQueryByTechnology,"/getqueries/technology/<int:tech_id>")
api.add_resource(GetQueryByTitle,"/getqueries/title/<string:title>")
from app.user.user_comments.views import GetCommentByQuery,GetCommentsByUserId,CommentCRUD
api.add_resource(CommentCRUD,"/comment")
api.add_resource(GetCommentsByUserId,"/getcomments/user/<int:user_id>")
api.add_resource(GetCommentByQuery,"/getcomments/query")
from app.user.technologies.views import TechFilter
api.add_resource(TechFilter,"/filter")
from app.user.likes_dislikes.views import Likes,DisLikes
api.add_resource(Likes,"/comment/like")
api.add_resource(DisLikes,"/comment/dislike")
#ADMIN APIs
from app.admin.users.views import Login,ForgotPassword,GetAllUsers,GetProfile,UserDelete,UserSearch
api.add_resource(Login,"/admin/login")
api.add_resource(ForgotPassword,"/admin/forgotpassword")
api.add_resource(GetAllUsers,"/admin/getallusers")
api.add_resource(GetProfile,"/admin/getuserprofile/<int:user_id>")
api.add_resource(UserDelete,"/admin/deleteusers")
api.add_resource(UserSearch,"/admin/usersearch")
from app.admin.dashboards.views import FilterRecord,TopUsersList
api.add_resource(FilterRecord,"/admin/datefilter") #,methods="[PUT]" #/<string:from_date>/<string:to_date>/<string:record_selection>
# api.add_resource(TopUsers,"/admin/topusers/<int:users_limit>")
api.add_resource(TopUsersList,"/admin/topusers")
from app.admin.comments.views import CommentClass,GetCommentsByUserId,GetCommentByQuery
api.add_resource(CommentClass,"/admin/comment") #delete edit comments
api.add_resource(GetCommentByQuery,"/admin/comment/query")
api.add_resource(GetCommentsByUserId,"/admin/getcomments/user")
from app.admin.queries.views import QueriesClass,GetQueryByUserId,GetQueryByTechnology,GetQueryByTitle,Unanswered
api.add_resource(GetQueryByUserId,"/admin/getqueries/user/<int:user_id>")
api.add_resource(QueriesClass,"/admin/query") #edit delete
api.add_resource(Unanswered,"/admin/query/unanswered") #edit delete
from app.admin.technologies.views import TechnologiesCRUD,TechFilter,AdminTechClass
api.add_resource(TechFilter,"/admin/gettechnologies")
api.add_resource(AdminTechClass,"/admin/technologies")
from app.admin.admin_users.views import AdminUserDetails,EditProfile,RolesClass,ChangePassword,AdminUsersEditDel,GetAllAdminUsers
api.add_resource(AdminUserDetails, "/admin/adminuserdetails")
api.add_resource(ChangePassword, "/admin/changepassword")
api.add_resource(EditProfile, "/admin/editadminuserdetails")
api.add_resource(RolesClass, "/admin/roles")
api.add_resource(AdminUsersEditDel, "/admin/users")
api.add_resource(GetAllAdminUsers,"/admin/getalladminusers")
from app.utils.file_upload import upload
api.add_resource(upload,"/file")
# api.add_resource(, "/admin/users")
# api.add_resource(TopTenUsers,"/toptenusers") # methods="[GET]"
# from app.admin.technologies import
# from app.admin.users.views import
# from app.admin.queries.views import
# from app.admin.comments.views import
# from app.admin.likes_dislikes import
# from app.
# api.add_resource(CommentClass,"/comment")
# api.add_resource(TechFilter,"/filter")
# api.add_resource(UserProfile,"/profile")
# api.add_resource(GetProfile,"/getprofile/<int:user_id>")
# api.add_resource(GetCommentByQuery,"/getcomments/query/<int:query_id>")
#
# api.add_resource(AdminTechClass,"/admin/technologies") #admin/addtechnologies
# api.add_resource(UserStatus,"/userstatuschange") #admin/userroles
|
[
"svkrishnakishore2000@gmail.com"
] |
svkrishnakishore2000@gmail.com
|
fd5655dff832d4c4084d5d15f8af917e6b2e04f0
|
e5a25acd14fd7e080ffb255cb2bcbfa921c06806
|
/users/management/commands/utils/init_drp.py
|
c2d679d6e9f44c22178042d24c1dd7765ac46bc3
|
[
"MIT"
] |
permissive
|
GolamMullick/HR_PROJECT
|
26a6c2b251ee54a1ec35432668d8960cace71c3b
|
fc4c76cfc835ad014a62a3da9d32b8fc8d474397
|
refs/heads/master
| 2021-06-21T00:16:28.132340
| 2019-11-24T08:56:59
| 2019-11-24T08:56:59
| 223,716,477
| 0
| 0
|
MIT
| 2021-06-10T22:18:24
| 2019-11-24T08:52:54
|
Python
|
UTF-8
|
Python
| false
| false
| 193
|
py
|
from users.models import DepartmentRoleModelPermission
def init_drp(license):
DepartmentRoleModelPermission.load_on_migrate(license)
print("department role model permission worked!!")
|
[
"fahadmullick89@gmail.com"
] |
fahadmullick89@gmail.com
|
47108a87a1e8c3ab03cd93554fe0455da8e314e3
|
2bc74414e71a280cc50085ec2e5a6499d22ae5e6
|
/src/python/probdist/_SquareMatrix.py
|
b493e0baf491a20e1e752ce83acc3082e32f4528
|
[
"MIT"
] |
permissive
|
plewis/phycas
|
610c989d49dce741fc2d2ad048a9d7587eabeb74
|
9f5a4d9b2342dab907d14a46eb91f92ad80a5605
|
refs/heads/master
| 2020-12-25T16:48:31.870762
| 2017-07-15T14:07:37
| 2017-07-15T14:07:37
| 21,300,616
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,596
|
py
|
from _ProbDistExt import *
class SquareMatrix(SquareMatrixBase):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Encapsulates a square matrix of floating point values (underlying C++
implementation stores these as doubles).
"""
def __init__(self, dimension, value):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Create a square matrix of size dimension containing value in every
cell.
"""
SquareMatrixBase.__init__(self, dimension, value)
def duplicate(self):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Returns a copy of this matrix.
"""
return SquareMatrixBase.duplicate(self)
def identity(self):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Converts existing matrix to an identity matrix (1s on diagonal, 0s
everywhere else). Dimension of the matrix is not changed.
"""
SquareMatrixBase.identity(self)
def trace(self):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Returns the sum of the elements on the main diagonal.
"""
return SquareMatrixBase.trace(self)
def inverse(self):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Returns a SquareMatrix that is the inverse of this matrix.
"""
return SquareMatrixBase.inverse(self)
def pow(self, p):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Returns a SquareMatrix that is raised to the (postive) power p.
"""
return SquareMatrixBase.pow(self, p)
def getDimension(self):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Returns an integer representing the number of rows of the matrix. The
number of columns is the same value because this is a square matrix.
"""
return SquareMatrixBase.getDimension(self)
def getElement(self, i, j):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Returns (i,j)th element of the square matrix.
"""
return SquareMatrixBase.getElement(self, i, j)
def setElement(self, i, j, v):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Sets (i,j)th element of the square matrix to value v.
"""
SquareMatrixBase.setElement(self, i, j, v)
def getMatrix(self):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Returns square matrix in the form of a two-dimensional list.
"""
dim = self.getDimension()
v = SquareMatrixBase.getMatrix(self)
m = []
k = 0
for i in range(dim):
tmp = []
for j in range(dim):
tmp.append(v[k])
k += 1
m.append(tmp)
return m
def setMatrixFromFlattenedList(self, dim, v):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Replaces existing or creates a new square matrix using the supplied
unidimensional list or tuple v. The supplied list v is expected to
have length equal to the square of dim, the number of elements in a
single row or column of the matrix.
"""
SquareMatrixBase.setMatrix(self, dim, v)
def setMatrix(self, m):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Replaces existing or creates a new square matrix using the supplied
two-dimensional list or tuple m.
"""
dim = len(m[0])
v = []
for row in m:
for col in row:
v.append(col)
SquareMatrixBase.setMatrix(self, dim, v)
def __repr__(self):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Represents matrix as string.
"""
s = SquareMatrixBase.__repr__(self)
return s
def leftMultiplyMatrix(self, matrixOnLeft):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Returns a SquareMatrix that equals the product of supplied
matrixOnLeft with this matrix (on right).
"""
return SquareMatrixBase.leftMultiplyMatrix(self, matrixOnLeft)
def rightMultiplyMatrix(self, matrixOnRight):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Returns a SquareMatrix that equals the product of this matrix (on
left) with supplied matrixOnRight.
"""
return SquareMatrixBase.rightMultiplyMatrix(self, matrixOnRight)
def leftMultiplyVector(self, vectorOnLeft):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Returns a SquareMatrix that equals the product of supplied
(transposed) vectorOnLeft with this matrix (on right).
"""
return SquareMatrixBase.leftMultiplyVector(self, vectorOnLeft)
def rightMultiplyVector(self, vectorOnRight):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Returns a SquareMatrix that equals the product of this matrix (on
left) with supplied vectorOnRight.
"""
return SquareMatrixBase.rightMultiplyVector(self, vectorOnRight)
def logAbsDet(self):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Returns natural logarithm of the absolute value of the determinant of
this square matrix.
"""
return SquareMatrixBase.logAbsDet(self)
def CholeskyDecomposition(self):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Returns Cholesky decomposition of this square matrix as a lower
triangular matrix. Note: if this matrix is not symmetric and positive
definite, result will be None.
"""
return SquareMatrixBase.CholeskyDecomposition(self)
|
[
"paul.lewis@uconn.edu"
] |
paul.lewis@uconn.edu
|
bd8ef05ba2857891d6e8b09adf4efd715afddbbd
|
e7e42ae069a5d3165fbe6acc86f4a0cd4b709194
|
/djsite/settings.py
|
f7ba93283b05bd36a83ff1b736bef4bf2638f220
|
[] |
no_license
|
Kearenus/testblog
|
c1c59732f999af64c079447dc02982d0f36d361d
|
75f8bd2d4faefd13ee97f319a0ef7f19e5e5843a
|
refs/heads/master
| 2022-12-18T01:56:35.635762
| 2020-09-14T13:01:49
| 2020-09-14T13:01:49
| 295,406,502
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,198
|
py
|
"""
Django settings for djsite project.
Generated by 'django-admin startproject' using Django 3.1.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'y1fgb)n!1gy4u0e8i#=r4(prvtj0apbvb#(xz72@*zw+1#^3i8'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'core',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'djsite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'djsite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'test1dj',
'USER' : 'postgres',
'PASSWORD' : '123456',
'HOST' : 'localhost',
'PORT' : '5432',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'ru-rus'
TIME_ZONE = 'Europe/Moscow'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
|
[
"Kearenus@gmail.com"
] |
Kearenus@gmail.com
|
774e1042c1b495b81805a042253a3386768be94b
|
6b5d6690678f05a71837b85016db3da52359a2f6
|
/depot_tools/recipe_modules/gclient/api.py
|
1b705d5b35d28efa7ce15cc73361cdfda960e24e
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
bopopescu/MQUIC
|
eda5477bacc68f30656488e3cef243af6f7460e6
|
703e944ec981366cfd2528943b1def2c72b7e49d
|
refs/heads/master
| 2022-11-22T07:41:11.374401
| 2016-04-08T22:27:32
| 2016-04-08T22:27:32
| 282,352,335
| 0
| 0
|
MIT
| 2020-07-25T02:05:49
| 2020-07-25T02:05:49
| null |
UTF-8
|
Python
| false
| false
| 12,269
|
py
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from recipe_engine import recipe_api
class RevisionResolver(object):
"""Resolves the revision based on build properties."""
def resolve(self, properties): # pragma: no cover
raise NotImplementedError()
class RevisionFallbackChain(RevisionResolver):
"""Specify that a given project's sync revision follows the fallback chain."""
def __init__(self, default=None):
self._default = default
def resolve(self, properties):
"""Resolve the revision via the revision fallback chain.
If the given revision was set using the revision_fallback_chain() function,
this function will follow the chain, looking at relevant build properties
until it finds one set or reaches the end of the chain and returns the
default. If the given revision was not set using revision_fallback_chain(),
this function just returns it as-is.
"""
return (properties.get('parent_got_revision') or
properties.get('orig_revision') or
properties.get('revision') or
self._default)
class ProjectRevisionResolver(RevisionResolver):
"""Revision resolver that takes into account the project."""
def __init__(self, project, parent_got_revision=None):
self.project = project
self.parent_got_revision = parent_got_revision or 'parent_got_revision'
# TODO(phajdan.jr): Move to proper repo and add coverage.
def resolve(self, properties): # pragma: no cover
"""Resolve the revision if project matches, otherwise default to HEAD."""
if properties.get('project') == self.project:
return (properties.get(self.parent_got_revision) or
properties.get('revision') or
'HEAD')
return (properties.get(self.parent_got_revision) or
'HEAD')
def jsonish_to_python(spec, is_top=False):
ret = ''
if is_top: # We're the 'top' level, so treat this dict as a suite.
ret = '\n'.join(
'%s = %s' % (k, jsonish_to_python(spec[k])) for k in sorted(spec)
)
else:
if isinstance(spec, dict):
ret += '{'
ret += ', '.join(
"%s: %s" % (repr(str(k)), jsonish_to_python(spec[k]))
for k in sorted(spec)
)
ret += '}'
elif isinstance(spec, list):
ret += '['
ret += ', '.join(jsonish_to_python(x) for x in spec)
ret += ']'
elif isinstance(spec, basestring):
ret = repr(str(spec))
else:
ret = repr(spec)
return ret
class GclientApi(recipe_api.RecipeApi):
# Singleton object to indicate to checkout() that we should run a revert if
# we detect that we're on the tryserver.
RevertOnTryserver = object()
def __init__(self, **kwargs):
super(GclientApi, self).__init__(**kwargs)
self.USE_MIRROR = None
self._spec_alias = None
def __call__(self, name, cmd, infra_step=True, **kwargs):
"""Wrapper for easy calling of gclient steps."""
assert isinstance(cmd, (list, tuple))
prefix = 'gclient '
if self.spec_alias:
prefix = ('[spec: %s] ' % self.spec_alias) + prefix
kwargs.setdefault('env', {})
kwargs['env'].setdefault('PATH', '%(PATH)s')
kwargs['env']['PATH'] = self.m.path.pathsep.join([
kwargs['env']['PATH'], str(self._module.PACKAGE_DIRECTORY)])
return self.m.python(prefix + name,
self.package_resource('gclient.py'),
cmd,
infra_step=infra_step,
**kwargs)
@property
def use_mirror(self):
"""Indicates if gclient will use mirrors in its configuration."""
if self.USE_MIRROR is None:
self.USE_MIRROR = self.m.properties.get('use_mirror', True)
return self.USE_MIRROR
@use_mirror.setter
def use_mirror(self, val): # pragma: no cover
self.USE_MIRROR = val
@property
def spec_alias(self):
"""Optional name for the current spec for step naming."""
return self._spec_alias
@spec_alias.setter
def spec_alias(self, name):
self._spec_alias = name
@spec_alias.deleter
def spec_alias(self):
self._spec_alias = None
def get_config_defaults(self):
ret = {
'USE_MIRROR': self.use_mirror
}
ret['CACHE_DIR'] = self.m.path['root'].join('git_cache')
return ret
def resolve_revision(self, revision):
if hasattr(revision, 'resolve'):
return revision.resolve(self.m.properties)
return revision
def sync(self, cfg, with_branch_heads=False, **kwargs):
revisions = []
for i, s in enumerate(cfg.solutions):
if s.safesync_url: # prefer safesync_url in gclient mode
continue
if i == 0 and s.revision is None:
s.revision = RevisionFallbackChain()
if s.revision is not None and s.revision != '':
fixed_revision = self.resolve_revision(s.revision)
if fixed_revision:
revisions.extend(['--revision', '%s@%s' % (s.name, fixed_revision)])
for name, revision in sorted(cfg.revisions.items()):
fixed_revision = self.resolve_revision(revision)
if fixed_revision:
revisions.extend(['--revision', '%s@%s' % (name, fixed_revision)])
test_data_paths = set(cfg.got_revision_mapping.keys() +
[s.name for s in cfg.solutions])
step_test_data = lambda: (
self.test_api.output_json(test_data_paths, cfg.GIT_MODE))
try:
if not cfg.GIT_MODE:
args = ['sync', '--nohooks', '--force', '--verbose']
if cfg.delete_unversioned_trees:
args.append('--delete_unversioned_trees')
if with_branch_heads:
args.append('--with_branch_heads')
self('sync', args + revisions + ['--output-json', self.m.json.output()],
step_test_data=step_test_data,
**kwargs)
else:
# clean() isn't used because the gclient sync flags passed in checkout()
# do much the same thing, and they're more correct than doing a separate
# 'gclient revert' because it makes sure the other args are correct when
# a repo was deleted and needs to be re-cloned (notably
# --with_branch_heads), whereas 'revert' uses default args for clone
# operations.
#
# TODO(mmoss): To be like current official builders, this step could
# just delete the whole <slave_name>/build/ directory and start each
# build from scratch. That might be the least bad solution, at least
# until we have a reliable gclient method to produce a pristine working
# dir for git-based builds (e.g. maybe some combination of 'git
# reset/clean -fx' and removing the 'out' directory).
j = '-j2' if self.m.platform.is_win else '-j8'
args = ['sync', '--verbose', '--with_branch_heads', '--nohooks', j,
'--reset', '--force', '--upstream', '--no-nag-max']
if cfg.delete_unversioned_trees:
args.append('--delete_unversioned_trees')
self('sync', args + revisions +
['--output-json', self.m.json.output()],
step_test_data=step_test_data,
**kwargs)
finally:
result = self.m.step.active_result
data = result.json.output
for path, info in data['solutions'].iteritems():
# gclient json paths always end with a slash
path = path.rstrip('/')
if path in cfg.got_revision_mapping:
propname = cfg.got_revision_mapping[path]
result.presentation.properties[propname] = info['revision']
return result
def inject_parent_got_revision(self, gclient_config=None, override=False):
"""Match gclient config to build revisions obtained from build_properties.
Args:
gclient_config (gclient config object) - The config to manipulate. A value
of None manipulates the module's built-in config (self.c).
override (bool) - If True, will forcibly set revision and custom_vars
even if the config already contains values for them.
"""
cfg = gclient_config or self.c
for prop, custom_var in cfg.parent_got_revision_mapping.iteritems():
val = str(self.m.properties.get(prop, ''))
# TODO(infra): Fix coverage.
if val: # pragma: no cover
# Special case for 'src', inject into solutions[0]
if custom_var is None:
# This is not covered because we are deprecating this feature and
# it is no longer used by the public recipes.
if cfg.solutions[0].revision is None or override: # pragma: no cover
cfg.solutions[0].revision = val
else:
if custom_var not in cfg.solutions[0].custom_vars or override:
cfg.solutions[0].custom_vars[custom_var] = val
def checkout(self, gclient_config=None, revert=RevertOnTryserver,
inject_parent_got_revision=True, with_branch_heads=False,
**kwargs):
"""Return a step generator function for gclient checkouts."""
cfg = gclient_config or self.c
assert cfg.complete()
if revert is self.RevertOnTryserver:
revert = self.m.tryserver.is_tryserver
if inject_parent_got_revision:
self.inject_parent_got_revision(cfg, override=True)
spec_string = jsonish_to_python(cfg.as_jsonish(), True)
self('setup', ['config', '--spec', spec_string], **kwargs)
sync_step = None
try:
if not cfg.GIT_MODE:
try:
if revert:
self.revert(**kwargs)
finally:
sync_step = self.sync(cfg, with_branch_heads=with_branch_heads,
**kwargs)
else:
sync_step = self.sync(cfg, with_branch_heads=with_branch_heads,
**kwargs)
cfg_cmds = [
('user.name', 'local_bot'),
('user.email', 'local_bot@example.com'),
]
for var, val in cfg_cmds:
name = 'recurse (git config %s)' % var
self(name, ['recurse', 'git', 'config', var, val], **kwargs)
finally:
cwd = kwargs.get('cwd', self.m.path['slave_build'])
if 'checkout' not in self.m.path:
self.m.path['checkout'] = cwd.join(
*cfg.solutions[0].name.split(self.m.path.sep))
return sync_step
def revert(self, **kwargs):
"""Return a gclient_safe_revert step."""
# Not directly calling gclient, so don't use self().
alias = self.spec_alias
prefix = '%sgclient ' % (('[spec: %s] ' % alias) if alias else '')
return self.m.python(prefix + 'revert',
self.m.path['build'].join('scripts', 'slave', 'gclient_safe_revert.py'),
['.', self.m.path['depot_tools'].join('gclient',
platform_ext={'win': '.bat'})],
infra_step=True,
**kwargs
)
def runhooks(self, args=None, name='runhooks', **kwargs):
args = args or []
assert isinstance(args, (list, tuple))
return self(
name, ['runhooks'] + list(args), infra_step=False, **kwargs)
@property
def is_blink_mode(self):
""" Indicates wether the caller is to use the Blink config rather than the
Chromium config. This may happen for one of two reasons:
1. The builder is configured to always use TOT Blink. (factory property
top_of_tree_blink=True)
2. A try job comes in that applies to the Blink tree. (patch_project is
blink)
"""
return (
self.m.properties.get('top_of_tree_blink') or
self.m.properties.get('patch_project') == 'blink')
def break_locks(self):
"""Remove all index.lock files. If a previous run of git crashed, bot was
reset, etc... we might end up with leftover index.lock files.
"""
self.m.python.inline(
'cleanup index.lock',
"""
import os, sys
build_path = sys.argv[1]
if os.path.exists(build_path):
for (path, dir, files) in os.walk(build_path):
for cur_file in files:
if cur_file.endswith('index.lock'):
path_to_file = os.path.join(path, cur_file)
print 'deleting %s' % path_to_file
os.remove(path_to_file)
""",
args=[self.m.path['slave_build']],
infra_step=True,
)
|
[
"alyssar@google.com"
] |
alyssar@google.com
|
e0e86e42242d9e8b93db20b6f8b31985d4cae909
|
f38e78214992de722a6ec2012e844bce7b3c59ed
|
/lib/clckwrkbdgr/oldrogue/__main__.py
|
5d5846a28afc44aecd837e2b664887d748df9f23
|
[
"MIT"
] |
permissive
|
clckwrkbdgr/dotfiles
|
20fb86f54d93ae4936c334898c3d7b1b3820fb06
|
a7e880e189bfa4793f30ff928b049e4a182a38cd
|
refs/heads/master
| 2023-08-31T13:13:47.533868
| 2023-08-30T18:32:00
| 2023-08-30T18:32:00
| 20,396,084
| 2
| 2
|
MIT
| 2022-10-01T16:35:31
| 2014-06-02T07:26:38
|
Python
|
UTF-8
|
Python
| false
| false
| 27,391
|
py
|
import os, sys
import curses, curses.ascii
import json, itertools, copy, functools
import logging
import inspect
from operator import itemgetter
from collections import namedtuple
import six
if six.PY2:
import itertools
filter = itertools.ifilter
import vintage
from clckwrkbdgr import xdg
from clckwrkbdgr.utils import get_type_by_name
from clckwrkbdgr.math import Point, Matrix
from clckwrkbdgr.fs import SerializedEntity
import clckwrkbdgr.math
from clckwrkbdgr.collections import dotdict, AutoRegistry
import clckwrkbdgr.collections
import clckwrkbdgr.text
from clckwrkbdgr import tui
import clckwrkbdgr.logging
trace = logging.getLogger('rogue')
from clckwrkbdgr.events import Events, MessageEvent
from . import game
from .game import Version, Item, Consumable, Wearable, Monster, Room, Tunnel, GridRoomMap, GridRoomMap as Map, Furniture, LevelPassage, GodMode, Dungeon, Event
from . import pcg
class MakeEntity:
""" Creates builders for bare-properties-based classes to create subclass in one line. """
def __init__(self, base_classes, *properties):
""" Properties are either list of strings, or a single strings with space-separated identifiers. """
self.base_classes = base_classes if clckwrkbdgr.utils.is_collection(base_classes) else (base_classes,)
self.properties = properties
if len(self.properties) == 1 and ' ' in self.properties[0]:
self.properties = self.properties[0].split()
def __call__(self, class_name, *values):
""" Creates class object and puts it into global namespace.
Values should match properties given at init.
"""
assert len(self.properties) == len(values), len(values)
entity_class = type(class_name, self.base_classes, dict(zip(self.properties, values)))
globals()[class_name] = entity_class
return entity_class
class EntityClassDistribution:
def __init__(self, prob):
self.prob = prob
self.classes = []
def __lshift__(self, entity_class):
self.classes.append(entity_class)
def __iter__(self):
return iter(self.classes)
def get_distribution(self, param):
if callable(self.prob):
value = self.prob(param)
else:
value = self.prob
return [(value, entity_class) for entity_class in self.classes]
class StairsUp(LevelPassage):
_sprite = '<'
_name = 'stairs up'
_id = 'enter'
_can_go_up = True
class DungeonGates(LevelPassage):
_sprite = '<'
_name = 'exit from the dungeon'
_id = 'enter'
_can_go_up = True
def use(self, who):
if who.has_item(McGuffin):
raise GameCompleted()
raise Furniture.Locked(McGuffin)
class StairsDown(LevelPassage):
_sprite = '>'
_name = 'stairs down'
_id = 'exit'
_can_go_down = True
class McGuffin(Item):
_sprite = "*"
_name = "mcguffin"
class HealingPotion(Item, Consumable):
_sprite = "!"
_name = "potion"
def consume_by(self, who):
who.heal(10)
Events().trigger(DrinksHealingPotion(Who=who.name.title()))
return True
make_weapon = MakeEntity(Item, '_sprite _name _attack')
make_weapon('Dagger', '(', 'dagger', 1)
make_weapon('Sword', '(', 'sword', 2)
make_weapon('Axe', '(', 'axe', 4)
make_armor = MakeEntity((Item, Wearable), '_sprite _name _protection')
make_armor('Rags', "[", "rags", 1)
make_armor('Leather', "[", "leather", 2)
make_armor('ChainMail', "[", "chain mail", 3)
make_armor('PlateArmor', "[", "plate armor", 4)
class Rogue(Monster):
_hostile_to = [Monster]
_sprite = "@"
_name = "rogue"
_max_hp = 25
_attack = 1
_max_inventory = 26
class RealMonster(Monster):
_hostile_to = [Rogue]
animal_drops = [
(70, None),
(20, HealingPotion),
(5, Dagger),
(5, Rags),
]
monster_drops = [
(78, None),
(3, HealingPotion),
(3, Dagger),
(3, Sword),
(3, Axe),
(3, Rags),
(3, Leather),
(3, ChainMail),
(1, PlateArmor),
]
thug_drops = [
(10, None),
(20, HealingPotion),
(30, Dagger),
(10, Sword),
(30, Leather),
]
warrior_drops = [
(40, None),
(30, HealingPotion),
(10, Dagger),
(5, Sword),
(10, Leather),
(5, ChainMail),
]
super_warrior_drops = [
(80, None),
(5, HealingPotion),
(5, Axe),
(10, Leather),
]
easy_monsters = EntityClassDistribution(1)
norm_monsters = EntityClassDistribution(lambda depth: max(0, (depth-2)))
hard_monsters = EntityClassDistribution(lambda depth: max(0, (depth-7)//2))
make_monster = MakeEntity((RealMonster), '_sprite _name _max_hp _attack _drops')
easy_monsters << make_monster('Ant', 'a', 'ant', 5, 1, animal_drops)
easy_monsters << make_monster('Bat', 'b', 'bat', 5, 1, animal_drops)
easy_monsters << make_monster('Cockroach', 'c', 'cockroach', 5, 1, animal_drops)
easy_monsters << make_monster('Dog', 'd', 'dog', 7, 1, animal_drops)
norm_monsters << make_monster('Elf', 'e', 'elf', 10, 2, warrior_drops)
easy_monsters << make_monster('Frog', 'f', 'frog', 5, 1, animal_drops)
norm_monsters << make_monster('Goblin', "g", "goblin", 10, 2, warrior_drops*2)
norm_monsters << make_monster('Harpy', 'h', 'harpy', 10, 2, monster_drops)
norm_monsters << make_monster('Imp', 'i', 'imp', 10, 3, monster_drops)
easy_monsters << make_monster('Jelly', 'j', 'jelly', 5, 2, animal_drops)
norm_monsters << make_monster('Kobold', 'k', 'kobold', 10, 2, warrior_drops)
easy_monsters << make_monster('Lizard', 'l', 'lizard', 5, 1, animal_drops)
easy_monsters << make_monster('Mummy', 'm', 'mummy', 10, 2, monster_drops)
norm_monsters << make_monster('Narc', 'n', 'narc', 10, 2, thug_drops)
norm_monsters << make_monster('Orc', 'o', 'orc', 15, 3, warrior_drops*2)
easy_monsters << make_monster('Pigrat', 'p', 'pigrat', 10, 2, animal_drops)
easy_monsters << make_monster('Quokka', 'q', 'quokka', 5, 1, animal_drops)
easy_monsters << make_monster('Rat', "r", "rat", 5, 1, animal_drops)
norm_monsters << make_monster('Skeleton', 's', 'skeleton', 20, 2, monster_drops)
norm_monsters << make_monster('Thug', 't', 'thug', 15, 3, thug_drops*2)
norm_monsters << make_monster('Unicorn', 'u', 'unicorn', 15, 3, monster_drops)
norm_monsters << make_monster('Vampire', 'v', 'vampire', 20, 2, monster_drops)
easy_monsters << make_monster('Worm', 'w', 'worm', 5, 2, animal_drops)
hard_monsters << make_monster('Exterminator', 'x', 'exterminator', 20, 3, super_warrior_drops)
norm_monsters << make_monster('Yak', 'y', 'yak', 10, 2, animal_drops)
easy_monsters << make_monster('Zombie', 'z', 'zombie', 5, 2, thug_drops)
hard_monsters << make_monster('Angel', 'A', 'angel', 30, 5, super_warrior_drops)
norm_monsters << make_monster('Beholder', 'B', 'beholder', 20, 2, warrior_drops)
hard_monsters << make_monster('Cyborg', 'C', 'cyborg', 20, 5, super_warrior_drops*3)
hard_monsters << make_monster('Dragon', 'D', 'dragon', 40, 5, monster_drops*3)
norm_monsters << make_monster('Elemental', 'E', 'elemental', 10, 2, [])
hard_monsters << make_monster('Floater', 'F', 'floater', 40, 1, animal_drops)
hard_monsters << make_monster('Gargoyle', 'G', 'gargoyle', 30, 3, monster_drops)
hard_monsters << make_monster('Hydra', 'H', 'hydra', 30, 2, monster_drops)
norm_monsters << make_monster('Ichthyander', 'I', 'ichthyander', 20, 2, thug_drops)
hard_monsters << make_monster('Juggernaut', 'J', 'juggernaut', 40, 4, monster_drops)
hard_monsters << make_monster('Kraken', 'K', 'kraken', 30, 3, monster_drops)
norm_monsters << make_monster('Lich', 'L', 'lich', 20, 2, monster_drops)
norm_monsters << make_monster('Minotaur', 'M', 'minotaur', 20, 2, warrior_drops*2)
norm_monsters << make_monster('Necromancer', 'N', 'necromancer', 20, 2, warrior_drops)
hard_monsters << make_monster('Ogre', 'O', 'ogre', 30, 5, super_warrior_drops)
hard_monsters << make_monster('Phoenix', 'P', 'phoenix', 20, 3, monster_drops)
norm_monsters << make_monster('QueenBee', 'Q', 'queen bee', 20, 2, animal_drops)
hard_monsters << make_monster('Revenant', 'R', 'revenant', 20, 3, super_warrior_drops)
norm_monsters << make_monster('Snake', 'S', 'snake', 10, 2, animal_drops)
hard_monsters << make_monster('Troll', "T", "troll", 25, 5, super_warrior_drops)
norm_monsters << make_monster('Unseen', 'U', 'unseen', 10, 2, thug_drops)
norm_monsters << make_monster('Viper', 'V', 'viper', 10, 2, animal_drops)
hard_monsters << make_monster('Wizard', 'W', 'wizard', 40, 5, thug_drops*2)
hard_monsters << make_monster('Xenomorph', 'X', 'xenomorph', 30, 3, animal_drops)
norm_monsters << make_monster('Yeti', 'Y', 'yeti', 10, 2, animal_drops)
norm_monsters << make_monster('Zealot', 'Z', 'zealot', 10, 2, thug_drops)
class GodModeSwitched(MessageEvent): _message = "God {name} -> {state}"
class NeedMcGuffin(MessageEvent): _message = "You cannot escape the dungeon without mcguffin!"
class GoingUp(MessageEvent): _message = "Going up..."
class GoingDown(MessageEvent): _message = "Going down..."
class CannotGoBelow(MessageEvent): _message = "No place down below."
class CannotDig(MessageEvent): _message = "Cannot dig through the ground."
class CannotReachCeiling(MessageEvent): _message = "Cannot reach the ceiling."
class NoSuchItem(MessageEvent): _message = "No such item '{char}'."
class InventoryFull(MessageEvent): _message = "Inventory is full! Cannot pick up {item}"
class GrabbedItem(MessageEvent): _message = "Grabbed {item}."
class NothingToPickUp(MessageEvent): _message = "There is nothing here to pick up."
class InventoryEmpty(MessageEvent): _message = "Inventory is empty."
class ItemDropped(MessageEvent): _message = "Dropped {item}."
class DropsItem(MessageEvent): _message = "{Who} drops {item}."
class CannotConsume(MessageEvent): _message = "Cannot consume {item}."
class ItemConsumed(MessageEvent): _message = "Consumed {item}."
class DrinksHealingPotion(MessageEvent): _message = "{Who} heals itself."
class NothingToUnwield(MessageEvent): _message = "Nothing is wielded already."
class Unwielding(MessageEvent): _message = "Unwielding {item}."
class Wielding(MessageEvent): _message = "Wielding {item}."
class CannotWear(MessageEvent): _message = "Cannot wear {item}."
class NothingToTakeOff(MessageEvent): _message = "Nothing is worn already."
class TakingOff(MessageEvent): _message = "Taking off {item}."
class Wearing(MessageEvent): _message = "Wearing {item}."
class Attacking(MessageEvent): _message = "{Who} hit {whom} for {damage} hp."
class IsDead(MessageEvent): _message = "{Who} is dead."
class BumpsIntoWall(MessageEvent): _message = "{Who} bumps into wall."
class BumpsIntoOther(MessageEvent): _message = "{Who} bumps into {whom}."
class WelcomeBack(MessageEvent): _message = "Welcome back, {who}!"
Event.register('WelcomeBack', 'who')
class RogueDungeonGenerator(pcg.Generator):
MAX_LEVELS = 26
def build_level(self, level_id):
if level_id < 0 or level_id >= self.MAX_LEVELS:
raise KeyError("Invalid level ID: {0} (supports only [0; {1}))".format(level_id, self.MAX_LEVELS))
depth = level_id
is_bottom = depth >= (self.MAX_LEVELS - 1)
result = self.original_rogue_dungeon(
map_size=(78, 21),
grid_size=(3, 3),
room_class=Room, tunnel_class=Tunnel,
item_distribution = [
(50, HealingPotion),
(depth, Dagger),
(depth // 2, Sword),
(max(0, (depth-5) // 3), Axe),
(depth, Rags),
(depth // 2, Leather),
(max(0, (depth-5) // 3), ChainMail),
(max(0, (depth-10) // 3), PlateArmor),
],
item_amount=(2, 4),
monster_distribution = list(itertools.chain(
easy_monsters.get_distribution(depth),
norm_monsters.get_distribution(depth),
hard_monsters.get_distribution(depth),
)),
monster_amount=5,
prev_level_id=level_id - 1 if level_id > 0 else None,
next_level_id=level_id + 1 if not is_bottom else None,
enter_object_type=StairsUp if level_id > 0 else DungeonGates,
exit_object_type=StairsDown,
enter_connected_id='exit',
exit_connected_id='enter',
item_instead_of_exit=McGuffin if is_bottom else None,
)
result.level_id = level_id
return GridRoomMap(**vars(result))
class ExitWithoutSave(tui.app.AppExit):
def __init__(self):
super(ExitWithoutSave, self).__init__(False)
class SaveAndExit(tui.app.AppExit):
def __init__(self):
super(SaveAndExit, self).__init__(True)
class GameCompleted(Exception):
pass
def to_main_screen(mode):
return MessageView(StatusLine(MainGame, mode.data), mode.data)
class MessageView(tui.widgets.MessageLineOverlay):
def get_new_messages(self):
process_game_events(self.data, self.data.history)
del self.data.history[:]
events = Events()
while events.listen():
trace.debug("Message posted: {0}: {1}".format(repr(events.current), str(events.current)))
yield events.current
def force_ellipsis(self):
return not self.data.rogue.is_alive()
StatusSection = tui.widgets.StatusLine.LabeledSection
class StatusLine(tui.widgets.StatusLine):
CORNER = "[?]"
SECTIONS = [
StatusSection('Lvl', 2, lambda dungeon: 1+dungeon.current_level_id),
StatusSection("HP", 6, lambda dungeon: "{0}/{1}".format(dungeon.rogue.hp, dungeon.rogue.max_hp)),
StatusSection("Items", 2, lambda dungeon:(
None if not dungeon.rogue.inventory else (
''.join(item.sprite for item in dungeon.rogue.inventory)
if len(dungeon.rogue.inventory) <= 2
else len(dungeon.rogue.inventory)
))),
StatusSection("Wld", 7, lambda dungeon: dungeon.rogue.wielding.name if dungeon.rogue.wielding else None),
StatusSection("Wear", 7, lambda dungeon: dungeon.rogue.wearing.name if dungeon.rogue.wearing else None),
StatusSection("Here", 1, lambda dungeon: getattr(next(dungeon.current_level.items_at(dungeon.rogue.pos), next(dungeon.current_level.objects_at(dungeon.rogue.pos), None)), 'sprite', None)),
]
Controls = AutoRegistry()
class MainGame(tui.app.MVC):
_full_redraw = True
def _view(self, window):
stdscr, dungeon = window, self.data
trace.debug(list(dungeon.current_level.rooms.keys()))
for room in dungeon.current_level.rooms.values():
if not dungeon.is_remembered(room):
continue
stdscr.addstr(1 + room.top, room.left, "+")
stdscr.addstr(1 + room.bottom, room.left, "+")
stdscr.addstr(1 + room.top, room.right, "+")
stdscr.addstr(1 + room.bottom, room.right, "+")
for x in range(room.left+1, room.right):
stdscr.addstr(1 + room.top, x, "-")
stdscr.addstr(1 + room.bottom, x, "-")
for y in range(room.top+1, room.bottom):
stdscr.addstr(1 + y, room.left, "|")
stdscr.addstr(1 + y, room.right, "|")
if dungeon.is_visible(room):
for y in range(room.top+1, room.bottom):
for x in range(room.left+1, room.right):
stdscr.addstr(1 + y, x, ".")
else:
for y in range(room.top+1, room.bottom):
for x in range(room.left+1, room.right):
stdscr.addstr(1 + y, x, " ")
for tunnel in dungeon.current_level.tunnels:
for cell in tunnel.iter_points():
if dungeon.is_visible(tunnel, cell):
stdscr.addstr(1 + cell.y, cell.x, "#")
if dungeon.is_visible(tunnel, tunnel.start):
stdscr.addstr(1 + tunnel.start.y, tunnel.start.x, "+")
if dungeon.is_visible(tunnel, tunnel.stop):
stdscr.addstr(1 + tunnel.stop.y, tunnel.stop.x, "+")
for pos, obj in dungeon.current_level.objects:
if dungeon.is_remembered(pos) or dungeon.is_visible(pos):
stdscr.addstr(1 + pos.y, pos.x, obj.sprite)
for pos, item in dungeon.current_level.items:
if dungeon.is_remembered(pos) or dungeon.is_visible(pos):
stdscr.addstr(1 + pos.y, pos.x, item.sprite)
for monster in dungeon.current_level.monsters:
if dungeon.is_visible(monster.pos):
stdscr.addstr(1 + monster.pos.y, monster.pos.x, monster.sprite)
stdscr.addstr(1 + dungeon.rogue.pos.y, dungeon.rogue.pos.x, dungeon.rogue.sprite)
def _control(self, ch):
self.step_is_over = False
try:
new_mode = Controls[str(ch)](self)
if new_mode:
return new_mode
if not self.step_is_over:
return
return self.process_others()
except KeyError:
trace.debug("Unknown key: {0}".format(ch))
pass
@Controls('Q')
def quit(self):
""" Abandon game. """
return SuicideAttempt(to_main_screen(self), self.data)
@Controls('S')
def save_and_exit(self):
""" Save & exit. """
raise SaveAndExit()
@Controls('~')
def god_mode(self):
return GodModeAction
@Controls('?')
def show_help(self):
""" Show help message. """
return HelpScreen
@Controls('>')
def descend(self):
""" Go down. """
dungeon = self.data
stairs_here = next(filter(lambda obj: isinstance(obj, LevelPassage) and obj.can_go_down, dungeon.current_level.objects_at(dungeon.rogue.pos)), None)
if stairs_here:
dungeon.use_stairs(stairs_here)
Events().trigger(GoingDown())
return to_main_screen(self)
else:
Events().trigger(CannotDig())
@Controls('<')
def ascend(self):
""" Go up. """
dungeon = self.data
stairs_here = next(filter(lambda obj: isinstance(obj, LevelPassage) and obj.can_go_up, dungeon.current_level.objects_at(dungeon.rogue.pos)), None)
if stairs_here:
try:
dungeon.use_stairs(stairs_here)
Events().trigger(GoingUp())
return to_main_screen(self)
except Furniture.Locked:
Events().trigger(NeedMcGuffin())
except GameCompleted:
return Greetings
else:
Events().trigger(CannotReachCeiling())
@Controls('g')
def grab(self):
""" Grab item. """
dungeon = self.data
item_here = next( (index for index, (pos, item) in enumerate(reversed(dungeon.current_level.items)) if pos == dungeon.rogue.pos), None)
trace.debug("Items: {0}".format(dungeon.current_level.items))
trace.debug("Rogue: {0}".format(dungeon.rogue.pos))
trace.debug("Items here: {0}".format([(index, pos, item) for index, (pos, item) in enumerate(reversed(dungeon.current_level.items)) if pos == dungeon.rogue.pos]))
trace.debug("Item here: {0}".format(item_here))
if item_here is not None:
item_here = len(dungeon.current_level.items) - 1 - item_here # Index is from reversed list.
trace.debug("Unreversed item here: {0}".format(item_here))
_, item = dungeon.current_level.items[item_here]
self.data.history += dungeon.current_level.grab_item(dungeon.rogue, item)
self.step_is_over = True
else:
Events().trigger(NothingToPickUp())
@Controls('d')
def drop(self):
""" Drop item. """
dungeon = self.data
if not dungeon.rogue.inventory:
Events().trigger(InventoryEmpty())
else:
return QuickDropItem(to_main_screen(self), self.data)
@Controls('e')
def eat(self):
""" Consume item. """
dungeon = self.data
if not dungeon.rogue.inventory:
Events().trigger(InventoryEmpty())
else:
return QuickConsumeItem(to_main_screen(self), self.data)
@Controls('w')
def wield(self):
""" Wield item. """
dungeon = self.data
if not dungeon.rogue.inventory:
Events().trigger(InventoryEmpty())
else:
return QuickWieldItem(to_main_screen(self), self.data)
@Controls('U')
def unwield(self):
""" Unwield item. """
dungeon = self.data
if not dungeon.rogue.wielding:
Events().trigger(NothingToUnwield())
else:
self.data.history += dungeon.rogue.wield(None)
@Controls('W')
def wear(self):
""" Wear item. """
dungeon = self.data
if not dungeon.rogue.inventory:
Events().trigger(InventoryEmpty())
else:
return QuickWearItem(to_main_screen(self), self.data)
@Controls('T')
def take_off(self):
""" Take item off. """
dungeon = self.data
if not dungeon.rogue.wearing:
Events().trigger(NothingToTakeOff())
else:
self.data.history += dungeon.rogue.wear(None)
@Controls('i')
def inventory(self):
""" Toggle inventory. """
return Inventory
@Controls('.')
def wait(self):
""" Wait. """
self.step_is_over = True
@Controls('h')
def move_west(self):
""" Move around. """
self.move_by(Point(-1, 0))
@Controls('j')
def move_south(self):
""" Move around. """
self.move_by(Point( 0, +1))
@Controls('k')
def move_north(self):
""" Move around. """
self.move_by(Point( 0, -1))
@Controls('l')
def move_east(self):
""" Move around. """
self.move_by(Point(+1, 0))
@Controls('y')
def move_north_west(self):
""" Move around. """
self.move_by(Point(-1, -1))
@Controls('u')
def move_north_east(self):
""" Move around. """
self.move_by(Point(+1, -1))
@Controls('b')
def move_south_west(self):
""" Move around. """
self.move_by(Point(-1, +1))
@Controls('n')
def move_south_east(self):
""" Move around. """
self.move_by(Point(+1, +1))
def move_by(self, shift):
dungeon = self.data
self.data.history += dungeon.move_monster(dungeon.rogue, dungeon.rogue.pos + shift)
dungeon.current_level.visit(dungeon.rogue.pos)
self.step_is_over = True
def process_others(self):
dungeon = self.data
for monster in dungeon.current_level.monsters:
if not dungeon.current_room:
continue
sees_rogue = dungeon.current_room.contains(monster.pos)
if not sees_rogue:
continue
shift = Point(
clckwrkbdgr.math.sign(dungeon.rogue.pos.x - monster.pos.x),
clckwrkbdgr.math.sign(dungeon.rogue.pos.y - monster.pos.y),
)
new_pos = monster.pos + shift
self.data.history += dungeon.move_monster(monster, new_pos, with_tunnels=False)
if not dungeon.rogue.is_alive():
return MessageView(Grave, self.data)
def process_game_events(dungeon, events):
for event in events:
if isinstance(event, Event.BumpIntoTerrain):
if event.who != dungeon.rogue:
Events().trigger(BumpsIntoWall(Who=event.who.name.title()))
elif isinstance(event, Event.BumpIntoMonster):
Events().trigger(BumpsIntoOther(Who=event.who.name.title(), whom=event.whom.name))
elif isinstance(event, Event.AttackMonster):
Events().trigger(Attacking(Who=event.who.name.title(), whom=event.whom.name, damage=event.damage))
elif isinstance(event, Event.MonsterDied):
Events().trigger(IsDead(Who=event.who.name.title()))
elif isinstance(event, Event.MonsterDroppedItem):
Events().trigger(DropsItem(Who=event.who.name.title(), item=event.item.name))
elif isinstance(event, Event.MonsterConsumedItem):
Events().trigger(ItemConsumed(item=event.item.name))
elif isinstance(event, Event.Unwielding):
Events().trigger(Unwielding(item=event.item.name))
elif isinstance(event, Event.TakingOff):
Events().trigger(TakingOff(item=event.item.name))
elif isinstance(event, Event.Wielding):
Events().trigger(Wielding(item=event.item.name))
elif isinstance(event, Event.Wearing):
Events().trigger(Wearing(item=event.item.name))
elif isinstance(event, Event.NotWearable):
Events().trigger(CannotWear(item=event.item.name))
elif isinstance(event, Event.NotConsumable):
Events().trigger(CannotConsume(item=event.item.name))
elif isinstance(event, Event.WelcomeBack):
trace.debug(event)
Events().trigger(WelcomeBack(who=event.who.name))
elif isinstance(event, Event.InventoryFull):
Events().trigger(InventoryFull(item=event.item.name))
elif isinstance(event, Event.GrabbedItem):
Events().trigger(GrabbedItem(who=event.who.name, item=event.item.name))
class GodModeAction(tui.widgets.Menu):
KEYS_TO_CLOSE = [curses.ascii.ESC, ord('~')]
def items(self):
return [
tui.widgets.Menu.Item('v', 'see all: {0}'.format('ON' if self.data.god.vision else 'off'), 'vision'),
]
def on_close(self):
return to_main_screen(self)
def on_item(self, item):
new_state = not getattr(self.data.god, item.data)
setattr(self.data.god, item.data, new_state)
Events().trigger(GodModeSwitched(name=item.text, state='ON' if new_state else 'off'))
return to_main_screen(self)
class ConsumeItem:
def prompt(self): return "Which item to consume?"
def item_action(self, index):
item = self.data.rogue.inventory[index]
self.data.history += self.data.rogue.consume(item)
class DropItem:
def prompt(self): return "Which item to drop?"
def item_action(self, index):
item = self.data.rogue.inventory[index]
self.data.history += self.data.current_level.drop_item(self.data.rogue, item)
class WieldItem:
def prompt(self): return "Which item to wield?"
def item_action(self, index):
item = self.data.rogue.inventory[index]
self.data.history += self.data.rogue.wield(item)
class WearItem:
def prompt(self): return "Which item to wear?"
def item_action(self, index):
item = self.data.rogue.inventory[index]
self.data.history += self.data.rogue.wear(item)
class QuickItemSelection(tui.widgets.Prompt):
def extended_mode(self):
raise NotImplementedError
def choices(self):
return [chr(ord('a') + i) for i in range(len(self.data.rogue.inventory))] + ['*']
def on_choice(self, key):
if key == '*':
return self.extended_mode()
index = key.value - ord('a')
self.item_action(index)
return self.actual_mode
class QuickConsumeItem(ConsumeItem, QuickItemSelection):
def extended_mode(self):
return ConsumeFromInventory
class QuickDropItem(DropItem, QuickItemSelection):
def extended_mode(self):
return DropFromInventory
class QuickWearItem(WearItem, QuickItemSelection):
def extended_mode(self):
return WearFromInventory
class QuickWieldItem(WieldItem, QuickItemSelection):
def extended_mode(self):
return WieldFromInventory
class Inventory(tui.widgets.Menu):
COLUMNS = 2
KEYS_TO_CLOSE = ['i', curses.ascii.ESC]
def on_close(self):
return to_main_screen(self)
def prompt(self):
if not self.data.rogue.inventory:
return "(empty)"
return ""
def items(self):
for index, item in enumerate(self.data.rogue.inventory):
line = "{0} {1}".format(item.sprite, item.name)
if self.data.rogue.wielding == item:
line += " (wielding)"
if self.data.rogue.wearing == item:
line += " (wearing)"
key = ord('a') + index
yield tui.widgets.Menu.Item(key, line, key)
def on_item(self, item):
if hasattr(self, 'item_action'):
self.item_action(item.data - ord('a'))
return to_main_screen(self)
return None
class ConsumeFromInventory(ConsumeItem, Inventory):
pass
class DropFromInventory(DropItem, Inventory):
pass
class WieldFromInventory(WieldItem, Inventory):
pass
class WearFromInventory(WearItem, Inventory):
pass
class HelpScreen(tui.widgets.TextScreen):
_full_redraw = True
LINES = ["{0} - {1}".format(''.join(map(itemgetter(1), keys)), text)
for text, keys in itertools.groupby(sorted([
(inspect.getdoc(value), key)
for key, value
in Controls.items()
if value.__doc__
]), key=itemgetter(0))]
def on_close(self):
return to_main_screen(self)
class SuicideAttempt(tui.widgets.Confirmation):
MESSAGE = "Do you really want to quit without saving?"
def on_yes(self):
raise ExitWithoutSave()
class Grave(tui.widgets.TextScreen):
LINES = [
"You failed to reach mcguffin!"
]
RETURN_VALUE = ExitWithoutSave
class Greetings(tui.widgets.TextScreen):
LINES = [
"Mcguffin is successfully retrieved!"
]
RETURN_VALUE = ExitWithoutSave
class Game(tui.app.App):
pass
def main(stdscr):
curses.curs_set(0)
with SerializedEntity(xdg.save_data_path('dotrogue')/'rogue.sav', Version._top(), entity_name='dungeon', unlink=True, readable=True) as savefile:
if savefile.entity:
dungeon = savefile.entity
dungeon.generator = RogueDungeonGenerator()
dungeon.history.append(Event.WelcomeBack(dungeon.rogue))
else:
dungeon = Dungeon(RogueDungeonGenerator(), Rogue)
dungeon.go_to_level(0)
dungeon.rogue.inventory.append(Dagger())
savefile.reset(dungeon)
game = Game(stdscr)
return_code = game.run(to_main_screen(dotdict(data=dungeon)))
if return_code is False:
savefile.reset()
import click
@click.command()
@click.option('--debug', is_flag=True)
def cli(debug=False):
clckwrkbdgr.logging.init('rogue',
debug=debug,
filename=xdg.save_state_path('dotrogue')/'rogue.log',
stream=None,
)
curses.wrapper(main)
if __name__ == '__main__':
cli()
|
[
"umi0451@gmail.com"
] |
umi0451@gmail.com
|
7e6ae3866209330b045a79d660c0ab9423b0337b
|
b559fb774f770a1d7bf594e58bad582e5bc5a145
|
/partlist/migrations/0001_initial.py
|
6de925cdb4bfa4d404ade61195f927ee8f256027
|
[] |
no_license
|
sktometometo/Test_django_samplesite
|
2da194e91e66c4482a0eaf19d337e4ef41aad273
|
6077d4c80fa8129f259b10554285a637711af3e6
|
refs/heads/master
| 2022-11-16T02:11:12.165776
| 2020-07-22T07:01:14
| 2020-07-22T07:01:14
| 279,798,307
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,207
|
py
|
# Generated by Django 3.0.8 on 2020-07-15 05:17
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Part',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('part_name', models.CharField(max_length=200, verbose_name='部品名')),
('part_number', models.CharField(max_length=20, unique=True, verbose_name='部品番号')),
('part_amount', models.FloatField(default=0, verbose_name='数量')),
('part_unit', models.CharField(max_length=200, verbose_name='単位')),
('part_place', models.CharField(max_length=200, verbose_name='保管場所')),
('part_supplier', models.URLField(blank=True, verbose_name='調達先')),
('part_remark', models.TextField(blank=True, verbose_name='備考')),
],
options={
'db_table': 'partlist',
},
),
migrations.CreateModel(
name='Transaction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('transaction_date', models.DateTimeField(default=datetime.datetime.now, verbose_name='日付')),
('transaction_diff', models.FloatField(default=0, verbose_name='変更量')),
('transaction_remark', models.TextField(blank=True, verbose_name='備考')),
('transaction_part', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='partlist.Part', verbose_name='部品名')),
('transaction_user', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL, verbose_name='担当者')),
],
options={
'db_table': 'transactions',
},
),
]
|
[
"sktometometo@gmail.com"
] |
sktometometo@gmail.com
|
615bb0ef4d02e7266ab2d4ff06b73adeb290dc2f
|
445b16e1754234ed9afea078872545f2fee32b1e
|
/chasha.py
|
fde8781a6cccbc9ab1252f4f133b0f6d6d2e93dd
|
[] |
no_license
|
dlwngh1113/2DGP
|
ee7013ea0a22b87f15098d0c1f0f20c1e846024b
|
07566128bdee8af8027ae6298a96be381fdc73a3
|
refs/heads/master
| 2020-08-03T16:27:26.336450
| 2019-12-10T13:10:45
| 2019-12-10T13:10:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,010
|
py
|
from pico2d import *
import random
import game_framework
from BehaviorTree import BehaviorTree, SelectorNode, SequenceNode, LeafNode
PIXEL_PER_METER = (10.0 / 0.3) # 10 pixel 30 cm
RUN_SPEED_KMPH = 25.0 # Km / Hour
RUN_SPEED_MPM = (RUN_SPEED_KMPH * 1000.0 / 60.0)
RUN_SPEED_MPS = (RUN_SPEED_MPM / 60.0)
RUN_SPEED_PPS = (RUN_SPEED_MPS * PIXEL_PER_METER)
# Boy Action Speed
TIME_PER_ACTION = 0.5
ACTION_PER_TIME = 1.0 / TIME_PER_ACTION
FRAMES_PER_ACTION = 4
class Chasha:
image = None
def __init__(self, level=None, x=None, y=None):
if self.image is None:
self.image = load_image('image_resources\\chasha.png')
self.font = load_font('gothic.ttf', 12)
if x is None and y is None:
x, y = 250, 600
self.x, self.y = x, y
if level is None:
level = 3
self.level = level
self.charWidth = self.level * 33
self.charHeight = self.level * 32
self.money = self.level * 1087
self.atk = level * 43
self.timer = 1.0
self.speed = 0
self.dir = random.random() * 2 * math.pi
self.life = self.level * int(game_framework.player.atk * 5.5)
self.frame = 0
self.build_behavior_tree()
def calculate_current_position(self):
self.frame = (self.frame + FRAMES_PER_ACTION * ACTION_PER_TIME * game_framework.frame_time) % FRAMES_PER_ACTION
self.x += self.speed * math.cos(self.dir) * game_framework.frame_time
self.y += self.speed * math.sin(self.dir) * game_framework.frame_time
self.x = clamp(33, self.x, 500 - 33)
self.y = clamp(32, self.y, 800 - 64)
def draw(self):
if 0 < self.dir < math.pi:
self.image.clip_draw(int(self.frame) * 33, 0,
33, 32, self.x, self.y, self.charWidth, self.charHeight)
else:
self.image.clip_draw(int(self.frame) * 33, 4 * 32,
33, 32, self.x, self.y, self.charWidth, self.charHeight)
self.font.draw(self.x + self.charWidth / 2, self.y + self.charHeight / 2, str(self.life), (255,0,0))
def update(self):
self.bt.run()
pass
def add_event(self, event):
pass
def handle_event(self, event):
pass
def build_behavior_tree(self):
wander_node = LeafNode("WanderNode", self.wander)
self.bt = BehaviorTree(wander_node)
pass
def wander(self):
# fill here
self.speed = RUN_SPEED_PPS
self.calculate_current_position()
self.timer -= game_framework.frame_time / 2
if self.timer < 0:
self.timer = 1.0
self.dir = random.random() * 2 * math.pi
self.x = clamp(33, self.x, 500 - 33)
self.y = clamp(32, self.y, 800 - 64)
return BehaviorTree.SUCCESS
pass
def get_bb(self):
return self.x - self.charWidth / 2, self.y - self.charHeight / 2, self.x + self.charWidth / 2 - 10, self.y + self.charHeight / 2
|
[
"dlwngh1113@naver.com"
] |
dlwngh1113@naver.com
|
f3d42a10d50a212195491831b823aee6a236d05b
|
f18ee8805c738b2cd22634ea728a6a35c0153eee
|
/result_generator/result_feature_db/index.py
|
dfab4c112be45ea8fed8554ee4fb67d1f1cc85ed
|
[
"MIT"
] |
permissive
|
shijack/feature_extract
|
e9a115085e1c82dd9a2782e1464e90f18f273885
|
2c45750ea42a30a1f0b5cbe305edc4c8ab0461d7
|
refs/heads/master
| 2020-04-04T19:15:26.941570
| 2018-11-05T10:34:49
| 2018-11-05T10:34:49
| 156,198,465
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,388
|
py
|
# -*- coding: utf-8 -*-
# Author: shijack
import sys
import time
sys.path.append('../../')
import os
from nets import resnet_v2
from net_model.extract_cnn_vgg16 import VGG16_MODIFIED
import h5py
import numpy as np
import tensorflow as tf
from keras.preprocessing import image
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
from util import utils
from net_model.extract_cnn_densenet_keras import DenseNETMAX
from z_extend_rmac.rmac import rmac
from z_extend_rmac.get_regions import rmac_regions, get_size_vgg_feat_map
def feature_generator_densenet(file_img, file_feature_output):
tmp_img_list = []
img_list = []
with open(file_img, 'r') as f:
tmp_img_list = f.readlines()
for item_img in tmp_img_list:
img_list.append(item_img.split(' ')[0])
print "--------------------------------------------------"
print " feature extraction starts"
print "--------------------------------------------------"
feats = []
names = []
start_time = time.time()
model = DenseNETMAX()
for i, img_path in enumerate(img_list):
norm_feat = model.extract_feat(img_path)
# dct_feat = np.multiply(np.array(DCT_binaray(img_path)),np.full((1,256),0.01))
# dct_feat = get_dct_feature(img_path)
# dct_feat = DCT_binaray(img_path)
# final_feat = np.append(dct_feat,norm_feat)
img_name = img_path
# norm_feat = np.hstack((norm_feat,np.zeros([32,],dtype=np.float32)))
feats.append(norm_feat)
names.append(img_name)
print "extracting feature from image No. %d , %d images in total" % ((i + 1), len(img_list))
end_time = time.time()
print ("final_feature extract time:", (end_time - start_time))
feats = np.array(feats)
# directory for storing extracted features
output = file_feature_output
print "--------------------------------------------------"
print " writing feature extraction results ..."
print "--------------------------------------------------"
h5f = h5py.File(output, 'w')
h5f.create_dataset('dataset_1', data=feats)
h5f.create_dataset('dataset_2', data=names)
h5f.close()
def feature_generator_rmac_vgg16(dir_img, file_feature_output, is_split_dir=False):
'''
按照文件夹目录,每个目录生成一个文件夹所有图片特征的集合.bow文件,format:每行一个图片的特征。
:param dir_img:
:param file_feature_output:
:param is_split_dir:
:return:
'''
path = dir_img
print "--------------------------------------------------"
print " feature extraction starts"
print "--------------------------------------------------"
if is_split_dir:
model = rmac.rmac(20)
for child_dirs in utils.get_dirs_child(path):
img_list = utils.get_all_files_suffix(child_dirs)
start_time = time.time()
feats = []
names = []
for i, img_path in enumerate(img_list):
img = image.load_img(img_path)
# Resize
scale = utils.IMG_SIZE / max(img.size)
new_size = (int(np.ceil(scale * img.size[0])), int(np.ceil(scale * img.size[1])))
# print('Original size: %s, Resized image: %s' % (str(img.size), str(new_size)))
img = img.resize(new_size)
# Mean substraction
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = utils.preprocess_image(x)
# Load RMAC model
Wmap, Hmap = get_size_vgg_feat_map(x.shape[2], x.shape[1])
regions = rmac_regions(Wmap, Hmap, 3)
# Compute RMAC vector
# print('Extracting RMAC from image...')
# print (len(regions))
norm_feat = model.predict([x, np.expand_dims(regions, axis=0)])
norm_feat = norm_feat.reshape((-1,))
img_name = os.path.split(img_path)[1]
final_feat = np.hstack((norm_feat.reshape((-1,)), np.zeros([288, ], dtype=np.float32)))
feats.append(final_feat)
names.append(img_name)
print "extracting feature from image No. %d , %d images in total" % ((i + 1), len(img_list))
feats = np.array(feats)
print "--------------------------------------------------"
print " writing feature extraction results ..."
print "--------------------------------------------------"
feats_6 = feats.astype('float32')
np.savetxt(child_dirs + "/" + child_dirs.split("/")[-1] + '.bow', feats_6, fmt='%f')
end_time = time.time()
print ('the total time cnsumed is %d\n', (end_time - start_time))
else:
feats = []
names = []
start_time = time.time()
model = rmac.rmac(20)
img_list = utils.get_all_files_suffix(dir_img)
for i, img_path in enumerate(img_list):
img = image.load_img(img_path)
# Resize
scale = utils.IMG_SIZE / max(img.size)
new_size = (int(np.ceil(scale * img.size[0])), int(np.ceil(scale * img.size[1])))
# print('Original size: %s, Resized image: %s' % (str(img.size), str(new_size)))
img = img.resize(new_size)
# Mean substraction
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = utils.preprocess_image(x)
# Load RMAC model
Wmap, Hmap = get_size_vgg_feat_map(x.shape[2], x.shape[1])
regions = rmac_regions(Wmap, Hmap, 3)
# Compute RMAC vector
# print('Extracting RMAC from image...')
# print (len(regions))
norm_feat = model.predict([x, np.expand_dims(regions, axis=0)])
norm_feat = norm_feat.reshape((-1,))
img_name = os.path.split(img_path)[1]
final_feat = np.hstack((norm_feat.reshape((-1,)), np.zeros([288, ], dtype=np.float32)))
feats.append(final_feat)
names.append(img_name)
print "extracting feature from image No. %d , %d images in total" % ((i + 1), len(img_list))
end_time = time.time()
print ("final_feature extract time:", (end_time - start_time))
feats = np.array(feats)
print "--------------------------------------------------"
print " writing feature extraction results ..."
print "--------------------------------------------------"
# directory for storing extracted features
output = file_feature_output
h5f = h5py.File(output, 'w')
h5f.create_dataset('dataset_1', data=feats)
h5f.create_dataset('dataset_2', data=names)
h5f.close()
def feature_generator_vae(file_img, file_meta_graph, file_ckpt, file_feature_output):
print os.path.abspath(file_meta_graph)
print file_ckpt
tmp_img_list = []
img_list = []
with open(file_img, 'r') as f:
tmp_img_list = f.readlines()
for item_img in tmp_img_list:
img_list.append(item_img.split(' ')[0])
print "--------------------------------------------------"
print " feature extraction starts"
print "--------------------------------------------------"
feats = []
names = []
start_time = time.time()
with tf.Session() as sess:
saver = tf.train.import_meta_graph(file_meta_graph)
saver.restore(sess, file_ckpt)
graph = tf.get_default_graph()
x_input = graph.get_tensor_by_name('encoder/input_img:0')
latent_feature = graph.get_tensor_by_name('variance/latent_feature:0')
for i, img_path in enumerate(img_list):
img = utils.img_process(img_path)
norm_feat = sess.run(latent_feature, feed_dict={x_input: img})
img_name = img_path
# norm_feat = np.hstack((norm_feat,np.zeros([160,],dtype=np.float32)))
feats.append(norm_feat.flatten())
names.append(img_name)
print "extracting feature from image No. %d , %d images in total" % ((i + 1), len(img_list))
end_time = time.time()
print ("final_feature extract time:", (end_time - start_time))
feats = np.array(feats)
print "--------------------------------------------------"
print " writing feature extraction results ..."
print "--------------------------------------------------"
# directory for storing extracted features
output = file_feature_output
h5f = h5py.File(output, 'w')
h5f.create_dataset('dataset_1', data=feats)
h5f.create_dataset('dataset_2', data=names)
h5f.close()
def feature_generator_basenet(file_img, checkpoints_dir, file_feature_output):
tmp_img_list = []
img_list = []
with open(file_img, 'r') as f:
tmp_img_list = f.readlines()
for item_img in tmp_img_list:
img_list.append(item_img.split(' ')[0])
print "--------------------------------------------------"
print " feature extraction starts"
print "--------------------------------------------------"
feats = []
names = []
from tensorflow.contrib import slim
x_input = tf.placeholder(tf.float32, shape=[None, 224, 224, 3], name='input_img')
# latent_mean, latent_stddev = encoder(x_input, train_logical=True, latent_dim=LATENT_DIM)
# latent_mean, latent_stddev = encoder_vgg16(x_input, latent_dim=LATENT_DIM)
# latent_mean, latent_stddev = encoder_vgg19(x_input, latent_dim=LATENT_DIM)
# latent_mean, latent_stddev = encoder_inceptionv1(x_input, latent_dim=LATENT_DIM)
# latent_mean, latent_stddev = encoder_inceptionv4(x_input, latent_dim=LATENT_DIM)
# latent_mean, latent_stddev = encoder_inception_resnetv2(x_input, latent_dim=LATENT_DIM)
# latent_mean, latent_stddev = encoder_resnetv2_152(x_input, latent_dim=LATENT_DIM)#参数过多,训练很慢
with slim.arg_scope(resnet_v2.resnet_arg_scope()):
logits, _ = resnet_v2.resnet_v2_101(x_input, num_classes=None, is_training=False)
init_fn = slim.assign_from_checkpoint_fn(
os.path.join(checkpoints_dir, 'resnet_v2_101.ckpt'),
slim.get_model_variables('resnet_v2_101'))
start_time = time.time()
with tf.Session() as sess:
init_fn(sess)
latent_feature = logits
for i, img_path in enumerate(img_list):
img = utils.img_process_vgg_tf(img_path)
norm_feat = sess.run(latent_feature, feed_dict={x_input: img})
img_name = img_path
# norm_feat = np.hstack((norm_feat,np.zeros([160,],dtype=np.float32)))
feats.append(norm_feat.flatten())
names.append(img_name)
print "extracting feature from image No. %d , %d images in total" % ((i + 1), len(img_list))
end_time = time.time()
print ("final_feature extract time:", (end_time - start_time))
feats = np.array(feats)
print "--------------------------------------------------"
print " writing feature extraction results ..."
print "--------------------------------------------------"
# directory for storing extracted features
output = file_feature_output
h5f = h5py.File(output, 'w')
h5f.create_dataset('dataset_1', data=feats)
h5f.create_dataset('dataset_2', data=names)
h5f.close()
def feature_generator_basenet_vgg(file_img, file_feature_output):
tmp_img_list = []
img_list = []
with open(file_img, 'r') as f:
tmp_img_list = f.readlines()
for item_img in tmp_img_list:
img_list.append(item_img.split(' ')[0])
print "--------------------------------------------------"
print " feature extraction starts"
print "--------------------------------------------------"
feats = []
names = []
# model = DenseNETMAX()
model = VGG16_MODIFIED()
start_time = time.time()
for i, img_path in enumerate(img_list):
norm_feat = model.extract_feat(img_path)
# dct_feat = DCT_binaray(img_path)
# final_feat = np.append(dct_feat, norm_feat)
img_name = img_path
# norm_feat = np.hstack((norm_feat,np.zeros([160,],dtype=np.float32)))
feats.append(norm_feat.flatten())
names.append(img_name)
print "extracting feature from image No. %d , %d images in total" % ((i + 1), len(img_list))
end_time = time.time()
print ("final_feature extract time:", (end_time - start_time))
feats = np.array(feats)
print "--------------------------------------------------"
print " writing feature extraction results ..."
print "--------------------------------------------------"
# directory for storing extracted features
h5f = h5py.File(file_feature_output, 'w')
h5f.create_dataset('dataset_1', data=feats)
h5f.create_dataset('dataset_2', data=names)
h5f.close()
if __name__ == "__main__":
args = {'index_basenet': './result_generator/features/feature_densenet169_trans_imgs_basenet.h5',
'index': '../features/feature_vae_resnetv2_101_trans_imgs_136000_basenet.h5',
'database': '/data/datasets/trans_imgs'}
# feature_generator_densenet(dir_img=args["database"], file_feature_output=args["index"])
# feature_generator_rmac_vgg16(dir_img=args["database"], file_feature_output=args["index"])
# file_ckpt = '/shihuijie/project/densenet/model_new/model_vae_resnetv2_101/vae-136000'
# feature_generator_vae(file_img='/shihuijie/project/vae/data/image_list.txt',
# file_meta_graph=file_ckpt + '.meta',
# file_ckpt=file_ckpt,
# file_feature_output=args["index"])
feature_generator_basenet(file_img='/shihuijie/project/vae/data/image_list.txt',
checkpoints_dir='/shihuijie/project/vae/checkpoints/resnet_v2_101/',
file_feature_output=args["index_basenet"])
# feature_generator_basenet_vgg(file_img='/shihuijie/project/vae/data/image_list.txt',
# file_feature_output=args["index_basenet"])
# feature_generator_densenet(file_img='/shihuijie/project/vae-system/data/image_list.txt',
# file_feature_output=args["index_basenet"])
|
[
"690141808@qq.com"
] |
690141808@qq.com
|
aad47db0f8136de55098fc0c8e2f82093cadc0c3
|
e72db255e41332c113f929eb63815b2169038209
|
/Chapter09/transforming/group_by.py
|
bf0a08814a324d02b5c2857fef7b5816a5ccd792
|
[
"MIT"
] |
permissive
|
PacktPublishing/Hands-On-Reactive-Programming-with-Python
|
b196b971fe49a36da9f979790b8c31c98a659031
|
757d45e2023032c6074e26ad252530f3c89978bf
|
refs/heads/master
| 2023-02-07T01:03:37.648175
| 2023-02-05T18:21:17
| 2023-02-05T18:21:38
| 128,761,473
| 75
| 19
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 475
|
py
|
import reactivex as rx
import reactivex.operators as ops
from reactivex.subject import Subject
def wrap_items(i):
return i.pipe(ops.map(lambda j: 'obs {}: {}'.format(i, j)))
numbers = rx.from_([1, 2, 3, 4, 5, 6])
numbers.pipe(
ops.group_by(lambda i: i % 2 == 0),
ops.flat_map(wrap_items),
).subscribe(
on_next=lambda i: print("on_next {}".format(i)),
on_error=lambda e: print("on_error: {}".format(e)),
on_completed=lambda: print("on_completed")
)
|
[
"romain.picard@oakbits.com"
] |
romain.picard@oakbits.com
|
f13227ef1502fcff48b95b267daabd3a25bb2f9c
|
ea0ab657b0b6e543ac8e6b70224cf91dceb2230b
|
/favorite_book/wsgi.py
|
f67506ffbe62a89a1dad33dcf9894b53b858700e
|
[] |
no_license
|
BigGeF/Favorite_Books_python
|
7f91c62593c4318a6a818a16b0ad3ef642fa1d2f
|
84db640027e7bc00a5c240915302f1c048dffdef
|
refs/heads/master
| 2020-09-16T16:28:28.031180
| 2019-11-25T00:06:41
| 2019-11-25T00:06:41
| 223,829,298
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 404
|
py
|
"""
WSGI config for favorite_book project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "favorite_book.settings")
application = get_wsgi_application()
|
[
"52731933+BigGeF@users.noreply.github.com"
] |
52731933+BigGeF@users.noreply.github.com
|
889fa342d79eff981d7d49709fa04b39322447ff
|
7f1e0158e70b69bfa353661bfb2eabda9ee5c56c
|
/tests/models/validators/v2_2_1/jsd_ed5cbafc332a5efa97547736ba8b6044.py
|
680209d36b4b94a32fcd18341ad37eacd1d6e7b9
|
[
"MIT"
] |
permissive
|
Jerbuck/dnacentersdk
|
97fb11844410ec7ab49aec35a30979d6288a87fd
|
ef2adde6113e7a6acd28a287007eb470fa39d31f
|
refs/heads/master
| 2023-07-31T13:43:01.108243
| 2021-09-14T17:41:19
| 2021-09-14T17:41:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 69,451
|
py
|
# -*- coding: utf-8 -*-
"""Cisco DNA Center retrievesPreviousPathtrace data model.
Copyright (c) 2019-2021 Cisco Systems.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import fastjsonschema
import json
from dnacentersdk.exceptions import MalformedRequest
from builtins import *
class JSONSchemaValidatorEd5Cbafc332A5Efa97547736Ba8B6044(object):
"""retrievesPreviousPathtrace request schema definition."""
def __init__(self):
super(JSONSchemaValidatorEd5Cbafc332A5Efa97547736Ba8B6044, self).__init__()
self._validator = fastjsonschema.compile(json.loads(
'''{
"$schema": "http://json-schema.org/draft-04/schema#",
"properties": {
"response": {
"properties": {
"detailedStatus": {
"properties": {
"aclTraceCalculation": {
"type": "string"
},
"aclTraceCalculationFailureReason": {
"type": "string"
}
},
"type": "object"
},
"lastUpdate": {
"type": "string"
},
"networkElements": {
"items": {
"properties": {
"accuracyList": {
"items": {
"properties": {
"percent": {
"type": "integer"
},
"reason": {
"type": "string"
}
},
"type": "object"
},
"type": "array"
},
"detailedStatus": {
"properties": {
"aclTraceCalculation": {
"type": "string"
},
"aclTraceCalculationFailureReason": {
"type": "string"
}
},
"type": "object"
},
"deviceStatistics": {
"properties": {
"cpuStatistics": {
"properties": {
"fiveMinUsageInPercentage": {
"type": "number"
},
"fiveSecsUsageInPercentage": {
"type": "number"
},
"oneMinUsageInPercentage": {
"type": "number"
},
"refreshedAt": {
"type": "integer"
}
},
"type": "object"
},
"memoryStatistics": {
"properties": {
"memoryUsage": {
"type": "integer"
},
"refreshedAt": {
"type": "integer"
},
"totalMemory": {
"type": "integer"
}
},
"type": "object"
}
},
"type": "object"
},
"deviceStatsCollection": {
"type": "string"
},
"deviceStatsCollectionFailureReason": {
"type": "string"
},
"egressPhysicalInterface": {
"properties": {
"aclAnalysis": {
"properties": {
"aclName": {
"type": "string"
},
"matchingAces": {
"items": {
"properties": {
"ace": {
"type": "string"
},
"matchingPorts": {
"items": {
"properties": {
"ports": {
"items": {
"properties": {
"destPorts": {
"items": {
"type": "string"
},
"type": "array"
},
"sourcePorts": {
"items": {
"type": "string"
},
"type": "array"
}
},
"type": "object"
},
"type": "array"
},
"protocol": {
"type": "string"
}
},
"type": "object"
},
"type": "array"
},
"result": {
"type": "string"
}
},
"type": "object"
},
"type": "array"
},
"result": {
"type": "string"
}
},
"type": "object"
},
"id": {
"type": "string"
},
"interfaceStatistics": {
"properties": {
"adminStatus": {
"type": "string"
},
"inputPackets": {
"type": "integer"
},
"inputQueueCount": {
"type": "integer"
},
"inputQueueDrops": {
"type": "integer"
},
"inputQueueFlushes": {
"type": "integer"
},
"inputQueueMaxDepth": {
"type": "integer"
},
"inputRatebps": {
"type": "integer"
},
"operationalStatus": {
"type": "string"
},
"outputDrop": {
"type": "integer"
},
"outputPackets": {
"type": "integer"
},
"outputQueueCount": {
"type": "integer"
},
"outputQueueDepth": {
"type": "integer"
},
"outputRatebps": {
"type": "integer"
},
"refreshedAt": {
"type": "integer"
}
},
"type": "object"
},
"interfaceStatsCollection": {
"type": "string"
},
"interfaceStatsCollectionFailureReason": {
"type": "string"
},
"name": {
"type": "string"
},
"pathOverlayInfo": {
"items": {
"properties": {
"controlPlane": {
"type": "string"
},
"dataPacketEncapsulation": {
"type": "string"
},
"destIp": {
"type": "string"
},
"destPort": {
"type": "string"
},
"protocol": {
"type": "string"
},
"sourceIp": {
"type": "string"
},
"sourcePort": {
"type": "string"
},
"vxlanInfo": {
"properties": {
"dscp": {
"type": "string"
},
"vnid": {
"type": "string"
}
},
"type": "object"
}
},
"type": "object"
},
"type": "array"
},
"qosStatistics": {
"items": {
"properties": {
"classMapName": {
"type": "string"
},
"dropRate": {
"type": "integer"
},
"numBytes": {
"type": "integer"
},
"numPackets": {
"type": "integer"
},
"offeredRate": {
"type": "integer"
},
"queueBandwidthbps": {
"type": "string"
},
"queueDepth": {
"type": "integer"
},
"queueNoBufferDrops": {
"type": "integer"
},
"queueTotalDrops": {
"type": "integer"
},
"refreshedAt": {
"type": "integer"
}
},
"type": "object"
},
"type": "array"
},
"qosStatsCollection": {
"type": "string"
},
"qosStatsCollectionFailureReason": {
"type": "string"
},
"usedVlan": {
"type": "string"
},
"vrfName": {
"type": "string"
}
},
"type": "object"
},
"egressVirtualInterface": {
"properties": {
"aclAnalysis": {
"properties": {
"aclName": {
"type": "string"
},
"matchingAces": {
"items": {
"properties": {
"ace": {
"type": "string"
},
"matchingPorts": {
"items": {
"properties": {
"ports": {
"items": {
"properties": {
"destPorts": {
"items": {
"type": "string"
},
"type": "array"
},
"sourcePorts": {
"items": {
"type": "string"
},
"type": "array"
}
},
"type": "object"
},
"type": "array"
},
"protocol": {
"type": "string"
}
},
"type": "object"
},
"type": "array"
},
"result": {
"type": "string"
}
},
"type": "object"
},
"type": "array"
},
"result": {
"type": "string"
}
},
"type": "object"
},
"id": {
"type": "string"
},
"interfaceStatistics": {
"properties": {
"adminStatus": {
"type": "string"
},
"inputPackets": {
"type": "integer"
},
"inputQueueCount": {
"type": "integer"
},
"inputQueueDrops": {
"type": "integer"
},
"inputQueueFlushes": {
"type": "integer"
},
"inputQueueMaxDepth": {
"type": "integer"
},
"inputRatebps": {
"type": "integer"
},
"operationalStatus": {
"type": "string"
},
"outputDrop": {
"type": "integer"
},
"outputPackets": {
"type": "integer"
},
"outputQueueCount": {
"type": "integer"
},
"outputQueueDepth": {
"type": "integer"
},
"outputRatebps": {
"type": "integer"
},
"refreshedAt": {
"type": "integer"
}
},
"type": "object"
},
"interfaceStatsCollection": {
"type": "string"
},
"interfaceStatsCollectionFailureReason": {
"type": "string"
},
"name": {
"type": "string"
},
"pathOverlayInfo": {
"items": {
"properties": {
"controlPlane": {
"type": "string"
},
"dataPacketEncapsulation": {
"type": "string"
},
"destIp": {
"type": "string"
},
"destPort": {
"type": "string"
},
"protocol": {
"type": "string"
},
"sourceIp": {
"type": "string"
},
"sourcePort": {
"type": "string"
},
"vxlanInfo": {
"properties": {
"dscp": {
"type": "string"
},
"vnid": {
"type": "string"
}
},
"type": "object"
}
},
"type": "object"
},
"type": "array"
},
"qosStatistics": {
"items": {
"properties": {
"classMapName": {
"type": "string"
},
"dropRate": {
"type": "integer"
},
"numBytes": {
"type": "integer"
},
"numPackets": {
"type": "integer"
},
"offeredRate": {
"type": "integer"
},
"queueBandwidthbps": {
"type": "string"
},
"queueDepth": {
"type": "integer"
},
"queueNoBufferDrops": {
"type": "integer"
},
"queueTotalDrops": {
"type": "integer"
},
"refreshedAt": {
"type": "integer"
}
},
"type": "object"
},
"type": "array"
},
"qosStatsCollection": {
"type": "string"
},
"qosStatsCollectionFailureReason": {
"type": "string"
},
"usedVlan": {
"type": "string"
},
"vrfName": {
"type": "string"
}
},
"type": "object"
},
"flexConnect": {
"properties": {
"authentication": {
"enum": [
"LOCAL",
"CENTRAL"
],
"type": "string"
},
"dataSwitching": {
"enum": [
"LOCAL",
"CENTRAL"
],
"type": "string"
},
"egressAclAnalysis": {
"properties": {
"aclName": {
"type": "string"
},
"matchingAces": {
"items": {
"properties": {
"ace": {
"type": "string"
},
"matchingPorts": {
"items": {
"properties": {
"ports": {
"items": {
"properties": {
"destPorts": {
"items": {
"type": "string"
},
"type": "array"
},
"sourcePorts": {
"items": {
"type": "string"
},
"type": "array"
}
},
"type": "object"
},
"type": "array"
},
"protocol": {
"type": "string"
}
},
"type": "object"
},
"type": "array"
},
"result": {
"type": "string"
}
},
"type": "object"
},
"type": "array"
},
"result": {
"type": "string"
}
},
"type": "object"
},
"ingressAclAnalysis": {
"properties": {
"aclName": {
"type": "string"
},
"matchingAces": {
"items": {
"properties": {
"ace": {
"type": "string"
},
"matchingPorts": {
"items": {
"properties": {
"ports": {
"items": {
"properties": {
"destPorts": {
"items": {
"type": "string"
},
"type": "array"
},
"sourcePorts": {
"items": {
"type": "string"
},
"type": "array"
}
},
"type": "object"
},
"type": "array"
},
"protocol": {
"type": "string"
}
},
"type": "object"
},
"type": "array"
},
"result": {
"type": "string"
}
},
"type": "object"
},
"type": "array"
},
"result": {
"type": "string"
}
},
"type": "object"
},
"wirelessLanControllerId": {
"type": "string"
},
"wirelessLanControllerName": {
"type": "string"
}
},
"type": "object"
},
"id": {
"type": "string"
},
"ingressPhysicalInterface": {
"properties": {
"aclAnalysis": {
"properties": {
"aclName": {
"type": "string"
},
"matchingAces": {
"items": {
"properties": {
"ace": {
"type": "string"
},
"matchingPorts": {
"items": {
"properties": {
"ports": {
"items": {
"properties": {
"destPorts": {
"items": {
"type": "string"
},
"type": "array"
},
"sourcePorts": {
"items": {
"type": "string"
},
"type": "array"
}
},
"type": "object"
},
"type": "array"
},
"protocol": {
"type": "string"
}
},
"type": "object"
},
"type": "array"
},
"result": {
"type": "string"
}
},
"type": "object"
},
"type": "array"
},
"result": {
"type": "string"
}
},
"type": "object"
},
"id": {
"type": "string"
},
"interfaceStatistics": {
"properties": {
"adminStatus": {
"type": "string"
},
"inputPackets": {
"type": "integer"
},
"inputQueueCount": {
"type": "integer"
},
"inputQueueDrops": {
"type": "integer"
},
"inputQueueFlushes": {
"type": "integer"
},
"inputQueueMaxDepth": {
"type": "integer"
},
"inputRatebps": {
"type": "integer"
},
"operationalStatus": {
"type": "string"
},
"outputDrop": {
"type": "integer"
},
"outputPackets": {
"type": "integer"
},
"outputQueueCount": {
"type": "integer"
},
"outputQueueDepth": {
"type": "integer"
},
"outputRatebps": {
"type": "integer"
},
"refreshedAt": {
"type": "integer"
}
},
"type": "object"
},
"interfaceStatsCollection": {
"type": "string"
},
"interfaceStatsCollectionFailureReason": {
"type": "string"
},
"name": {
"type": "string"
},
"pathOverlayInfo": {
"items": {
"properties": {
"controlPlane": {
"type": "string"
},
"dataPacketEncapsulation": {
"type": "string"
},
"destIp": {
"type": "string"
},
"destPort": {
"type": "string"
},
"protocol": {
"type": "string"
},
"sourceIp": {
"type": "string"
},
"sourcePort": {
"type": "string"
},
"vxlanInfo": {
"properties": {
"dscp": {
"type": "string"
},
"vnid": {
"type": "string"
}
},
"type": "object"
}
},
"type": "object"
},
"type": "array"
},
"qosStatistics": {
"items": {
"properties": {
"classMapName": {
"type": "string"
},
"dropRate": {
"type": "integer"
},
"numBytes": {
"type": "integer"
},
"numPackets": {
"type": "integer"
},
"offeredRate": {
"type": "integer"
},
"queueBandwidthbps": {
"type": "string"
},
"queueDepth": {
"type": "integer"
},
"queueNoBufferDrops": {
"type": "integer"
},
"queueTotalDrops": {
"type": "integer"
},
"refreshedAt": {
"type": "integer"
}
},
"type": "object"
},
"type": "array"
},
"qosStatsCollection": {
"type": "string"
},
"qosStatsCollectionFailureReason": {
"type": "string"
},
"usedVlan": {
"type": "string"
},
"vrfName": {
"type": "string"
}
},
"type": "object"
},
"ingressVirtualInterface": {
"properties": {
"aclAnalysis": {
"properties": {
"aclName": {
"type": "string"
},
"matchingAces": {
"items": {
"properties": {
"ace": {
"type": "string"
},
"matchingPorts": {
"items": {
"properties": {
"ports": {
"items": {
"properties": {
"destPorts": {
"items": {
"type": "string"
},
"type": "array"
},
"sourcePorts": {
"items": {
"type": "string"
},
"type": "array"
}
},
"type": "object"
},
"type": "array"
},
"protocol": {
"type": "string"
}
},
"type": "object"
},
"type": "array"
},
"result": {
"type": "string"
}
},
"type": "object"
},
"type": "array"
},
"result": {
"type": "string"
}
},
"type": "object"
},
"id": {
"type": "string"
},
"interfaceStatistics": {
"properties": {
"adminStatus": {
"type": "string"
},
"inputPackets": {
"type": "integer"
},
"inputQueueCount": {
"type": "integer"
},
"inputQueueDrops": {
"type": "integer"
},
"inputQueueFlushes": {
"type": "integer"
},
"inputQueueMaxDepth": {
"type": "integer"
},
"inputRatebps": {
"type": "integer"
},
"operationalStatus": {
"type": "string"
},
"outputDrop": {
"type": "integer"
},
"outputPackets": {
"type": "integer"
},
"outputQueueCount": {
"type": "integer"
},
"outputQueueDepth": {
"type": "integer"
},
"outputRatebps": {
"type": "integer"
},
"refreshedAt": {
"type": "integer"
}
},
"type": "object"
},
"interfaceStatsCollection": {
"type": "string"
},
"interfaceStatsCollectionFailureReason": {
"type": "string"
},
"name": {
"type": "string"
},
"pathOverlayInfo": {
"items": {
"properties": {
"controlPlane": {
"type": "string"
},
"dataPacketEncapsulation": {
"type": "string"
},
"destIp": {
"type": "string"
},
"destPort": {
"type": "string"
},
"protocol": {
"type": "string"
},
"sourceIp": {
"type": "string"
},
"sourcePort": {
"type": "string"
},
"vxlanInfo": {
"properties": {
"dscp": {
"type": "string"
},
"vnid": {
"type": "string"
}
},
"type": "object"
}
},
"type": "object"
},
"type": "array"
},
"qosStatistics": {
"items": {
"properties": {
"classMapName": {
"type": "string"
},
"dropRate": {
"type": "integer"
},
"numBytes": {
"type": "integer"
},
"numPackets": {
"type": "integer"
},
"offeredRate": {
"type": "integer"
},
"queueBandwidthbps": {
"type": "string"
},
"queueDepth": {
"type": "integer"
},
"queueNoBufferDrops": {
"type": "integer"
},
"queueTotalDrops": {
"type": "integer"
},
"refreshedAt": {
"type": "integer"
}
},
"type": "object"
},
"type": "array"
},
"qosStatsCollection": {
"type": "string"
},
"qosStatsCollectionFailureReason": {
"type": "string"
},
"usedVlan": {
"type": "string"
},
"vrfName": {
"type": "string"
}
},
"type": "object"
},
"ip": {
"type": "string"
},
"linkInformationSource": {
"type": "string"
},
"name": {
"type": "string"
},
"perfMonCollection": {
"type": "string"
},
"perfMonCollectionFailureReason": {
"type": "string"
},
"perfMonStatistics": {
"items": {
"properties": {
"byteRate": {
"type": "integer"
},
"destIpAddress": {
"type": "string"
},
"destPort": {
"type": "string"
},
"inputInterface": {
"type": "string"
},
"ipv4DSCP": {
"type": "string"
},
"ipv4TTL": {
"type": "integer"
},
"outputInterface": {
"type": "string"
},
"packetBytes": {
"type": "integer"
},
"packetCount": {
"type": "integer"
},
"packetLoss": {
"type": "integer"
},
"packetLossPercentage": {
"type": "number"
},
"protocol": {
"type": "string"
},
"refreshedAt": {
"type": "integer"
},
"rtpJitterMax": {
"type": "integer"
},
"rtpJitterMean": {
"type": "integer"
},
"rtpJitterMin": {
"type": "integer"
},
"sourceIpAddress": {
"type": "string"
},
"sourcePort": {
"type": "string"
}
},
"type": "object"
},
"type": "array"
},
"role": {
"type": "string"
},
"ssid": {
"type": "string"
},
"tunnels": {
"items": {
"type": "string"
},
"type": "array"
},
"type": {
"type": "string"
},
"wlanId": {
"type": "string"
}
},
"type": "object"
},
"type": "array"
},
"networkElementsInfo": {
"items": {
"properties": {
"accuracyList": {
"items": {
"properties": {
"percent": {
"type": "integer"
},
"reason": {
"type": "string"
}
},
"type": "object"
},
"type": "array"
},
"detailedStatus": {
"properties": {
"aclTraceCalculation": {
"type": "string"
},
"aclTraceCalculationFailureReason": {
"type": "string"
}
},
"type": "object"
},
"deviceStatistics": {
"properties": {
"cpuStatistics": {
"properties": {
"fiveMinUsageInPercentage": {
"type": "number"
},
"fiveSecsUsageInPercentage": {
"type": "number"
},
"oneMinUsageInPercentage": {
"type": "number"
},
"refreshedAt": {
"type": "integer"
}
},
"type": "object"
},
"memoryStatistics": {
"properties": {
"memoryUsage": {
"type": "integer"
},
"refreshedAt": {
"type": "integer"
},
"totalMemory": {
"type": "integer"
}
},
"type": "object"
}
},
"type": "object"
},
"deviceStatsCollection": {
"type": "string"
},
"deviceStatsCollectionFailureReason": {
"type": "string"
},
"egressInterface": {
"properties": {
"physicalInterface": {
"properties": {
"aclAnalysis": {
"properties": {
"aclName": {
"type": "string"
},
"matchingAces": {
"items": {
"properties": {
"ace": {
"type": "string"
},
"matchingPorts": {
"items": {
"properties": {
"ports": {
"items": {
"properties": {
"destPorts": {
"items": {
"type": "string"
},
"type": "array"
},
"sourcePorts": {
"items": {
"type": "string"
},
"type": "array"
}
},
"type": "object"
},
"type": "array"
},
"protocol": {
"type": "string"
}
},
"type": "object"
},
"type": "array"
},
"result": {
"type": "string"
}
},
"type": "object"
},
"type": "array"
},
"result": {
"type": "string"
}
},
"type": "object"
},
"id": {
"type": "string"
},
"interfaceStatistics": {
"properties": {
"adminStatus": {
"type": "string"
},
"inputPackets": {
"type": "integer"
},
"inputQueueCount": {
"type": "integer"
},
"inputQueueDrops": {
"type": "integer"
},
"inputQueueFlushes": {
"type": "integer"
},
"inputQueueMaxDepth": {
"type": "integer"
},
"inputRatebps": {
"type": "integer"
},
"operationalStatus": {
"type": "string"
},
"outputDrop": {
"type": "integer"
},
"outputPackets": {
"type": "integer"
},
"outputQueueCount": {
"type": "integer"
},
"outputQueueDepth": {
"type": "integer"
},
"outputRatebps": {
"type": "integer"
},
"refreshedAt": {
"type": "integer"
}
},
"type": "object"
},
"interfaceStatsCollection": {
"type": "string"
},
"interfaceStatsCollectionFailureReason": {
"type": "string"
},
"name": {
"type": "string"
},
"pathOverlayInfo": {
"items": {
"properties": {
"controlPlane": {
"type": "string"
},
"dataPacketEncapsulation": {
"type": "string"
},
"destIp": {
"type": "string"
},
"destPort": {
"type": "string"
},
"protocol": {
"type": "string"
},
"sourceIp": {
"type": "string"
},
"sourcePort": {
"type": "string"
},
"vxlanInfo": {
"properties": {
"dscp": {
"type": "string"
},
"vnid": {
"type": "string"
}
},
"type": "object"
}
},
"type": "object"
},
"type": "array"
},
"qosStatistics": {
"items": {
"properties": {
"classMapName": {
"type": "string"
},
"dropRate": {
"type": "integer"
},
"numBytes": {
"type": "integer"
},
"numPackets": {
"type": "integer"
},
"offeredRate": {
"type": "integer"
},
"queueBandwidthbps": {
"type": "string"
},
"queueDepth": {
"type": "integer"
},
"queueNoBufferDrops": {
"type": "integer"
},
"queueTotalDrops": {
"type": "integer"
},
"refreshedAt": {
"type": "integer"
}
},
"type": "object"
},
"type": "array"
},
"qosStatsCollection": {
"type": "string"
},
"qosStatsCollectionFailureReason": {
"type": "string"
},
"usedVlan": {
"type": "string"
},
"vrfName": {
"type": "string"
}
},
"type": "object"
},
"virtualInterface": {
"items": {
"properties": {
"aclAnalysis": {
"properties": {
"aclName": {
"type": "string"
},
"matchingAces": {
"items": {
"properties": {
"ace": {
"type": "string"
},
"matchingPorts": {
"items": {
"properties": {
"ports": {
"items": {
"properties": {
"destPorts": {
"items": {
"type": "string"
},
"type": "array"
},
"sourcePorts": {
"items": {
"type": "string"
},
"type": "array"
}
},
"type": "object"
},
"type": "array"
},
"protocol": {
"type": "string"
}
},
"type": "object"
},
"type": "array"
},
"result": {
"type": "string"
}
},
"type": "object"
},
"type": "array"
},
"result": {
"type": "string"
}
},
"type": "object"
},
"id": {
"type": "string"
},
"interfaceStatistics": {
"properties": {
"adminStatus": {
"type": "string"
},
"inputPackets": {
"type": "integer"
},
"inputQueueCount": {
"type": "integer"
},
"inputQueueDrops": {
"type": "integer"
},
"inputQueueFlushes": {
"type": "integer"
},
"inputQueueMaxDepth": {
"type": "integer"
},
"inputRatebps": {
"type": "integer"
},
"operationalStatus": {
"type": "string"
},
"outputDrop": {
"type": "integer"
},
"outputPackets": {
"type": "integer"
},
"outputQueueCount": {
"type": "integer"
},
"outputQueueDepth": {
"type": "integer"
},
"outputRatebps": {
"type": "integer"
},
"refreshedAt": {
"type": "integer"
}
},
"type": "object"
},
"interfaceStatsCollection": {
"type": "string"
},
"interfaceStatsCollectionFailureReason": {
"type": "string"
},
"name": {
"type": "string"
},
"pathOverlayInfo": {
"items": {
"properties": {
"controlPlane": {
"type": "string"
},
"dataPacketEncapsulation": {
"type": "string"
},
"destIp": {
"type": "string"
},
"destPort": {
"type": "string"
},
"protocol": {
"type": "string"
},
"sourceIp": {
"type": "string"
},
"sourcePort": {
"type": "string"
},
"vxlanInfo": {
"properties": {
"dscp": {
"type": "string"
},
"vnid": {
"type": "string"
}
},
"type": "object"
}
},
"type": "object"
},
"type": "array"
},
"qosStatistics": {
"items": {
"properties": {
"classMapName": {
"type": "string"
},
"dropRate": {
"type": "integer"
},
"numBytes": {
"type": "integer"
},
"numPackets": {
"type": "integer"
},
"offeredRate": {
"type": "integer"
},
"queueBandwidthbps": {
"type": "string"
},
"queueDepth": {
"type": "integer"
},
"queueNoBufferDrops": {
"type": "integer"
},
"queueTotalDrops": {
"type": "integer"
},
"refreshedAt": {
"type": "integer"
}
},
"type": "object"
},
"type": "array"
},
"qosStatsCollection": {
"type": "string"
},
"qosStatsCollectionFailureReason": {
"type": "string"
},
"usedVlan": {
"type": "string"
},
"vrfName": {
"type": "string"
}
},
"type": "object"
},
"type": "array"
}
},
"type": "object"
},
"flexConnect": {
"properties": {
"authentication": {
"enum": [
"LOCAL",
"CENTRAL"
],
"type": "string"
},
"dataSwitching": {
"enum": [
"LOCAL",
"CENTRAL"
],
"type": "string"
},
"egressAclAnalysis": {
"properties": {
"aclName": {
"type": "string"
},
"matchingAces": {
"items": {
"properties": {
"ace": {
"type": "string"
},
"matchingPorts": {
"items": {
"properties": {
"ports": {
"items": {
"properties": {
"destPorts": {
"items": {
"type": "string"
},
"type": "array"
},
"sourcePorts": {
"items": {
"type": "string"
},
"type": "array"
}
},
"type": "object"
},
"type": "array"
},
"protocol": {
"type": "string"
}
},
"type": "object"
},
"type": "array"
},
"result": {
"type": "string"
}
},
"type": "object"
},
"type": "array"
},
"result": {
"type": "string"
}
},
"type": "object"
},
"ingressAclAnalysis": {
"properties": {
"aclName": {
"type": "string"
},
"matchingAces": {
"items": {
"properties": {
"ace": {
"type": "string"
},
"matchingPorts": {
"items": {
"properties": {
"ports": {
"items": {
"properties": {
"destPorts": {
"items": {
"type": "string"
},
"type": "array"
},
"sourcePorts": {
"items": {
"type": "string"
},
"type": "array"
}
},
"type": "object"
},
"type": "array"
},
"protocol": {
"type": "string"
}
},
"type": "object"
},
"type": "array"
},
"result": {
"type": "string"
}
},
"type": "object"
},
"type": "array"
},
"result": {
"type": "string"
}
},
"type": "object"
},
"wirelessLanControllerId": {
"type": "string"
},
"wirelessLanControllerName": {
"type": "string"
}
},
"type": "object"
},
"id": {
"type": "string"
},
"ingressInterface": {
"properties": {
"physicalInterface": {
"properties": {
"aclAnalysis": {
"properties": {
"aclName": {
"type": "string"
},
"matchingAces": {
"items": {
"properties": {
"ace": {
"type": "string"
},
"matchingPorts": {
"items": {
"properties": {
"ports": {
"items": {
"properties": {
"destPorts": {
"items": {
"type": "string"
},
"type": "array"
},
"sourcePorts": {
"items": {
"type": "string"
},
"type": "array"
}
},
"type": "object"
},
"type": "array"
},
"protocol": {
"type": "string"
}
},
"type": "object"
},
"type": "array"
},
"result": {
"type": "string"
}
},
"type": "object"
},
"type": "array"
},
"result": {
"type": "string"
}
},
"type": "object"
},
"id": {
"type": "string"
},
"interfaceStatistics": {
"properties": {
"adminStatus": {
"type": "string"
},
"inputPackets": {
"type": "integer"
},
"inputQueueCount": {
"type": "integer"
},
"inputQueueDrops": {
"type": "integer"
},
"inputQueueFlushes": {
"type": "integer"
},
"inputQueueMaxDepth": {
"type": "integer"
},
"inputRatebps": {
"type": "integer"
},
"operationalStatus": {
"type": "string"
},
"outputDrop": {
"type": "integer"
},
"outputPackets": {
"type": "integer"
},
"outputQueueCount": {
"type": "integer"
},
"outputQueueDepth": {
"type": "integer"
},
"outputRatebps": {
"type": "integer"
},
"refreshedAt": {
"type": "integer"
}
},
"type": "object"
},
"interfaceStatsCollection": {
"type": "string"
},
"interfaceStatsCollectionFailureReason": {
"type": "string"
},
"name": {
"type": "string"
},
"pathOverlayInfo": {
"items": {
"properties": {
"controlPlane": {
"type": "string"
},
"dataPacketEncapsulation": {
"type": "string"
},
"destIp": {
"type": "string"
},
"destPort": {
"type": "string"
},
"protocol": {
"type": "string"
},
"sourceIp": {
"type": "string"
},
"sourcePort": {
"type": "string"
},
"vxlanInfo": {
"properties": {
"dscp": {
"type": "string"
},
"vnid": {
"type": "string"
}
},
"type": "object"
}
},
"type": "object"
},
"type": "array"
},
"qosStatistics": {
"items": {
"properties": {
"classMapName": {
"type": "string"
},
"dropRate": {
"type": "integer"
},
"numBytes": {
"type": "integer"
},
"numPackets": {
"type": "integer"
},
"offeredRate": {
"type": "integer"
},
"queueBandwidthbps": {
"type": "string"
},
"queueDepth": {
"type": "integer"
},
"queueNoBufferDrops": {
"type": "integer"
},
"queueTotalDrops": {
"type": "integer"
},
"refreshedAt": {
"type": "integer"
}
},
"type": "object"
},
"type": "array"
},
"qosStatsCollection": {
"type": "string"
},
"qosStatsCollectionFailureReason": {
"type": "string"
},
"usedVlan": {
"type": "string"
},
"vrfName": {
"type": "string"
}
},
"type": "object"
},
"virtualInterface": {
"items": {
"properties": {
"aclAnalysis": {
"properties": {
"aclName": {
"type": "string"
},
"matchingAces": {
"items": {
"properties": {
"ace": {
"type": "string"
},
"matchingPorts": {
"items": {
"properties": {
"ports": {
"items": {
"properties": {
"destPorts": {
"items": {
"type": "string"
},
"type": "array"
},
"sourcePorts": {
"items": {
"type": "string"
},
"type": "array"
}
},
"type": "object"
},
"type": "array"
},
"protocol": {
"type": "string"
}
},
"type": "object"
},
"type": "array"
},
"result": {
"type": "string"
}
},
"type": "object"
},
"type": "array"
},
"result": {
"type": "string"
}
},
"type": "object"
},
"id": {
"type": "string"
},
"interfaceStatistics": {
"properties": {
"adminStatus": {
"type": "string"
},
"inputPackets": {
"type": "integer"
},
"inputQueueCount": {
"type": "integer"
},
"inputQueueDrops": {
"type": "integer"
},
"inputQueueFlushes": {
"type": "integer"
},
"inputQueueMaxDepth": {
"type": "integer"
},
"inputRatebps": {
"type": "integer"
},
"operationalStatus": {
"type": "string"
},
"outputDrop": {
"type": "integer"
},
"outputPackets": {
"type": "integer"
},
"outputQueueCount": {
"type": "integer"
},
"outputQueueDepth": {
"type": "integer"
},
"outputRatebps": {
"type": "integer"
},
"refreshedAt": {
"type": "integer"
}
},
"type": "object"
},
"interfaceStatsCollection": {
"type": "string"
},
"interfaceStatsCollectionFailureReason": {
"type": "string"
},
"name": {
"type": "string"
},
"pathOverlayInfo": {
"items": {
"properties": {
"controlPlane": {
"type": "string"
},
"dataPacketEncapsulation": {
"type": "string"
},
"destIp": {
"type": "string"
},
"destPort": {
"type": "string"
},
"protocol": {
"type": "string"
},
"sourceIp": {
"type": "string"
},
"sourcePort": {
"type": "string"
},
"vxlanInfo": {
"properties": {
"dscp": {
"type": "string"
},
"vnid": {
"type": "string"
}
},
"type": "object"
}
},
"type": "object"
},
"type": "array"
},
"qosStatistics": {
"items": {
"properties": {
"classMapName": {
"type": "string"
},
"dropRate": {
"type": "integer"
},
"numBytes": {
"type": "integer"
},
"numPackets": {
"type": "integer"
},
"offeredRate": {
"type": "integer"
},
"queueBandwidthbps": {
"type": "string"
},
"queueDepth": {
"type": "integer"
},
"queueNoBufferDrops": {
"type": "integer"
},
"queueTotalDrops": {
"type": "integer"
},
"refreshedAt": {
"type": "integer"
}
},
"type": "object"
},
"type": "array"
},
"qosStatsCollection": {
"type": "string"
},
"qosStatsCollectionFailureReason": {
"type": "string"
},
"usedVlan": {
"type": "string"
},
"vrfName": {
"type": "string"
}
},
"type": "object"
},
"type": "array"
}
},
"type": "object"
},
"ip": {
"type": "string"
},
"linkInformationSource": {
"type": "string"
},
"name": {
"type": "string"
},
"perfMonCollection": {
"type": "string"
},
"perfMonCollectionFailureReason": {
"type": "string"
},
"perfMonitorStatistics": {
"items": {
"properties": {
"byteRate": {
"type": "integer"
},
"destIpAddress": {
"type": "string"
},
"destPort": {
"type": "string"
},
"inputInterface": {
"type": "string"
},
"ipv4DSCP": {
"type": "string"
},
"ipv4TTL": {
"type": "integer"
},
"outputInterface": {
"type": "string"
},
"packetBytes": {
"type": "integer"
},
"packetCount": {
"type": "integer"
},
"packetLoss": {
"type": "integer"
},
"packetLossPercentage": {
"type": "number"
},
"protocol": {
"type": "string"
},
"refreshedAt": {
"type": "integer"
},
"rtpJitterMax": {
"type": "integer"
},
"rtpJitterMean": {
"type": "integer"
},
"rtpJitterMin": {
"type": "integer"
},
"sourceIpAddress": {
"type": "string"
},
"sourcePort": {
"type": "string"
}
},
"type": "object"
},
"type": "array"
},
"role": {
"type": "string"
},
"ssid": {
"type": "string"
},
"tunnels": {
"items": {
"type": "string"
},
"type": "array"
},
"type": {
"type": "string"
},
"wlanId": {
"type": "string"
}
},
"type": "object"
},
"type": "array"
},
"properties": {
"items": {
"type": "string"
},
"type": "array"
},
"request": {
"properties": {
"controlPath": {
"type": "boolean"
},
"createTime": {
"type": "integer"
},
"destIP": {
"type": "string"
},
"destPort": {
"type": "string"
},
"failureReason": {
"type": "string"
},
"id": {
"type": "string"
},
"inclusions": {
"items": {
"type": "string"
},
"type": "array"
},
"lastUpdateTime": {
"type": "integer"
},
"periodicRefresh": {
"type": "boolean"
},
"protocol": {
"type": "string"
},
"sourceIP": {
"type": "string"
},
"sourcePort": {
"type": "string"
},
"status": {
"type": "string"
}
},
"type": "object"
}
},
"type": "object"
},
"version": {
"type": "string"
}
},
"type": "object"
}'''.replace("\n" + ' ' * 16, '')
))
def validate(self, request):
try:
self._validator(request)
except fastjsonschema.exceptions.JsonSchemaException as e:
raise MalformedRequest(
'{} is invalid. Reason: {}'.format(request, e.message)
)
|
[
"wastorga@altus.co.cr"
] |
wastorga@altus.co.cr
|
9518b3790b2967f59fe55686f22287926c8fd7fe
|
599709e7687a78f92b268315590d6ad750ce97d6
|
/src_py/l2func.py
|
490832659c4a2d9e01839b63a45ed3f2d32af2da
|
[] |
no_license
|
ReiMatsuzaki/cbasis2
|
b99d096150d87f9301ed0e34f7be5f0203e4a81e
|
86f21146fab6fc6f750d02fb2200ea94616ca896
|
refs/heads/master
| 2021-01-19T23:15:32.864686
| 2017-04-27T07:29:26
| 2017-04-27T07:29:26
| 88,953,186
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 190
|
py
|
from l2func_bind import *
from linspace import *
from set_l2func import *
from hatom import *
from basis_set import *
from l2func_io import *
from lindep import *
from d_basis import *
|
[
"matsuzaki.rei@sepia.chem.keio.ac.jp"
] |
matsuzaki.rei@sepia.chem.keio.ac.jp
|
57ea1c2505e1c09b048701ba91772ab40663dfce
|
c234f93c1812d8c5cf07b6f91574d8b0818989ae
|
/restoran/restconf/main.py
|
1dcb9111bf692418a2c32f74294e814054beb199
|
[] |
no_license
|
Alymbekov/RESTORAN
|
6a8cd6117eee40be82dee737ccbddc51f34fbf8e
|
fdd82aaa80ad70bf1a9645bd3e5d00675948ebe7
|
refs/heads/master
| 2020-05-06T12:33:31.206625
| 2019-05-20T14:17:07
| 2019-05-20T14:17:07
| 180,128,403
| 2
| 0
| null | 2019-05-20T14:17:08
| 2019-04-08T10:46:57
|
Python
|
UTF-8
|
Python
| false
| false
| 1,371
|
py
|
import datetime
from django.conf import settings
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
# 'rest_framework.authentication.BasicAuthentication',
'rest_framework_jwt.authentication.JSONWebTokenAuthentication',
'rest_framework.authentication.SessionAuthentication',
),
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticatedOrReadOnly',
),
'DEFAULT_PAGINATION_CLASS': 'restoran.restconf.pagination.CustomPagination',
'DEFAULT_FILTER_BACKENDS': (
'rest_framework.filters.SearchFilter',
'rest_framework.filters.OrderingFilter',
),
'SEARCH_PARAM': 'q',
'ORDERING_PARAM': 'ordering'
}
JWT_AUTH = {
'JWT_ENCODE_HANDLER':
'rest_framework_jwt.utils.jwt_encode_handler',
'JWT_DECODE_HANDLER':
'rest_framework_jwt.utils.jwt_decode_handler',
'JWT_PAYLOAD_HANDLER':
'rest_framework_jwt.utils.jwt_payload_handler',
'JWT_PAYLOAD_GET_USER_ID_HANDLER':
'rest_framework_jwt.utils.jwt_get_user_id_from_payload_handler',
'JWT_RESPONSE_PAYLOAD_HANDLER':
# 'rest_framework_jwt.utils.jwt_response_payload_handler',
'users.utils.jwt_response_payload_handler',
'JWT_ALLOW_REFRESH': True,
'JWT_REFRESH_EXPIRATION_DELTA': datetime.timedelta(days=7),
'JWT_AUTH_HEADER_PREFIX': 'JWT',
'JWT_AUTH_COOKIE': None,
}
|
[
"maxim.makarov.1997@mail.ru"
] |
maxim.makarov.1997@mail.ru
|
41eedf17f955552608d5964a14ccb3227ffbbd8c
|
99287c727e2249336d6c27025920df620d7b124c
|
/streams/consumers.py
|
7b8d6a6ac941a9bc6fda109bc50daa6acf6a6215
|
[] |
no_license
|
dadoeyad/event-stream
|
30809bdfec1958754dc10050bf4330c8e37a9a03
|
6c6aa6536fbd8e57b4dfbc6b519f8da2d418ae64
|
refs/heads/master
| 2021-01-12T09:54:28.396750
| 2016-12-17T15:45:00
| 2016-12-17T15:45:00
| 76,292,264
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,591
|
py
|
import logging
from django.conf import settings
from channels import Group
from channels.sessions import channel_session
from .tweets import tweets
from slack import slack
log = logging.getLogger(__name__)
@channel_session
def ws_connect(message):
try:
prefix, label = message['path'].decode('ascii').strip('/').split('/')
if prefix != 'streams':
log.debug('invalid ws path=%s', message['path'])
return
except ValueError:
log.debug('invalid ws path=%s', message['path'])
return
log.debug('stream connect to label=%s', label)
Group(label).add(message.reply_channel)
message.channel_session['label'] = label
if label == 'tweets':
tweets.listener.set_group()
tweets.filter(track=settings.LISTENER_WORDS, async=True)
elif label == 'slack':
slack.set_group()
slack.start()
else:
log.warning('unknown label=%s', label)
return
@channel_session
def ws_disconnect(message):
try:
prefix, label = message['path'].decode('ascii').strip('/').split('/')
if prefix != 'streams':
log.debug('invalid ws path=%s', message['path'])
return
except ValueError:
log.debug('invalid ws path=%s', message['path'])
return
log.debug('stream disconnect to label=%s', label)
Group(label).discard(message.reply_channel)
if label == 'tweets':
tweets.disconnect()
elif label == 'slack':
slack.disconnect()
else:
log.warning('unknown label=%s', label)
return
|
[
"dado_eyad@Eyads-MacBook-Pro.local"
] |
dado_eyad@Eyads-MacBook-Pro.local
|
e3474b32d77a685b3c23b5eca37bf340c0143dd5
|
44ee7102af2f141a51fb1086b0bb9f97fa214859
|
/p20.py
|
38517fbd6b150cdf1a3ec74a78aecc7d603e91b7
|
[] |
no_license
|
ramyaramy/ramya
|
3ced7c19e543559d33525a09ba5e8453d4a6c2f1
|
5cc45ea822c1238299ad796cad5d0918c8cc5564
|
refs/heads/master
| 2021-05-10T12:09:00.570336
| 2018-02-21T17:11:11
| 2018-02-21T17:11:11
| 118,432,451
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 181
|
py
|
def main():
str=input()
list=[]
for i in str:
x=ord(i)+3
y=chr(n)
list.append(y)
print(''.join(list))
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
ramyaramy.noreply@github.com
|
b062a23c7a8e0e59d13d4a6a55b09da524ed6b52
|
6262c5dc440d9b2e595d0a6d567d5e78d45b5ca6
|
/Coursera/compute_pay_func.py
|
8063ecd9b1ba66172bb1f637ae296309746018c2
|
[] |
no_license
|
hasija-bhawna/Python
|
d19da940078afe44c40dbe1f1b35942e758f5681
|
eb207f7147c75ec3593b4365c6c2604e5bfef860
|
refs/heads/master
| 2022-11-18T08:13:16.340242
| 2020-07-14T00:48:06
| 2020-07-14T00:48:06
| 279,179,489
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 411
|
py
|
#compute pay with a function
h = input('Enter the number of hours:')
hours = float(h)
r = input('Enter the rate per hour:')
rate = float(r)
def computepay(hours, rate):
if hours <= 40:
pay = hours*rate
else:
extra_hours = (hours-40)
new_rate = (1.5*rate)
pay = (40*rate) + (new_rate*extra_hours)
return pay
Pay = computepay(hours,rate)
print('The pay is:', Pay)
|
[
"hasija.bhawna@gmail.com"
] |
hasija.bhawna@gmail.com
|
d8a1362bd606a719b92a372f935715deb8496ba1
|
7e04c5851097c9f18a56b758022c16a8be3853d0
|
/conTable.py
|
c3309d23df63f261a5b17602c32d3fccd4eb34da
|
[] |
no_license
|
fareise/segment-cluster
|
5faaa9e54ae96401539ff6968f82b10286c01907
|
e86d3359eac888b574de028e19d74ef5f849511a
|
refs/heads/master
| 2021-01-23T07:44:14.478443
| 2017-04-05T07:33:01
| 2017-04-05T07:33:01
| 86,437,759
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,015
|
py
|
def makeTable(files):
skip = 0
rollSstick = ['_ROLL_CAPT_SSTICK', '_ROLL_CAPT_SSTICK-1', '_ROLL_CAPT_SSTICK-2', '_ROLL_CAPT_SSTICK-3']
sstick = ['_SSTICK_CAPT', '_SSTICK_CAPT-1','_SSTICK_CAPT-2','_SSTICK_CAPT-3']
tableFactor = pd.DataFrame(columns=['FILNAME','LAST','OPERNUM','RANGENUM','VAR','GAP','NI'])
for i, fileName in enumerate(files):
try:
df = pd.read_csv(wd+fileName)
df = df.fillna(method='pad')
df = df.loc[(df['_ALTITUDE'] < 1000) & (df['_ALTITUDE'] >100), :]
rollSum = pd.Series(df[rollSstick].values.ravel())
sstickSum = pd.Series(df[sstick].values.ravel())
factors = segmentOne(rollSum, sstickSum)
#其他参数
height = df['_ALT_RADIO'][(factors['start']+factors['end'])/2]
wind = df['_WIND_SPD'][factors['start']:factors['end']]
windir = df['_WINDIR'][factors['start']:factors['end']]
windMean = sum(wind)/(factors['end']-factors['start'])
windirMean = sum(windir)/(factors['end']-factors['start'])
windVar = calVar(wind)
windirVar = calVar(windir)
for m in range(0, len(factors["last"])):
newFactor = pd.DataFrame({
"FILNAME": fileName,
"LAST": factors["last"][m],
"OPERNUM": factors["operNum"][m],
"RANGENUM": factors["rangeNum"][m],
"VAR": factors["var"][m],
"GAP": factors["gap"][m],
"NI": factors["ni"][m],
"HEIGHT": height,
"WINDMEAN": windMean,
"WINDVAR": windVar,
"WINDIRMEAN": windirMean,
"WINDIRVAR": windir}, index=[i])
tableFactor = tableFactor.append(newFactor)
except:
skip = skip + 1
continue
return tableFactor, skip
|
[
"noreply@github.com"
] |
fareise.noreply@github.com
|
09791a4e30b09ef8ec9f800ca7aca461ff8193fd
|
0df42ff286efb98f3b62a0bfa6c59548114cd3bb
|
/train_coarse_type_lstm_glove.py
|
d221d47017e9dc9a1e4537975b228cf87977b12f
|
[] |
no_license
|
SearchGuru/DNN4QueryType
|
738882bbd402075edda355b7eb135c0dd60ed7bf
|
2940131767e910b772bb9ed7350afb5d121d62f0
|
refs/heads/master
| 2021-01-10T15:36:06.223671
| 2016-04-02T23:06:20
| 2016-04-02T23:06:20
| 55,320,161
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,799
|
py
|
from __future__ import print_function
import numpy as np
np.random.seed(1337) # for reproducibility
from keras.preprocessing import sequence
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.embeddings import Embedding
from keras.layers.recurrent import LSTM
from six.moves import cPickle
import deepctxt_util
from deepctxt_util import DCTokenizer
maxlen = 25 # cut texts after this number of words (among top max_features most common words)
batch_size = 100
epoch = 3
tokenizer = DCTokenizer()
print('Loading tokenizer')
tokenizer.load('./glove.6B.100d.txt')
#tokenizer.load('./glove.42B.300d.txt')
print('Done')
max_features = tokenizer.n_symbols
vocab_dim = tokenizer.vocab_dim
print('Loading data... (Train)')
(X1, y_train) = deepctxt_util.load_raw_data_x_y(path='./raw_data/Train_CoarseType.tsv')
print('Done')
print('Loading data... (Test)')
(X2, y_test) = deepctxt_util.load_raw_data_x_y(path='./raw_data/Test_CoarseType.tsv')
print('Done')
print('Converting data... (Train)')
X_train = tokenizer.texts_to_sequences(X1, maxlen)
print('Done')
print('Converting data... (Test)')
X_test = tokenizer.texts_to_sequences(X2, maxlen)
print('Done')
print(len(X_train), 'y_train sequences')
print(len(X_test), 'y_test sequences')
nb_classes = np.max(y_train)+1
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
print('Y_train shape:', Y_train.shape)
print('Y_test shape:', Y_test.shape)
print("Pad sequences (samples x time)")
X_train = sequence.pad_sequences(X_train, maxlen=maxlen)
X_test = sequence.pad_sequences(X_test, maxlen=maxlen)
print('X_train shape:', X_train.shape)
print('X_test shape:', X_test.shape)
print('Build model...')
model = Sequential()
model.add(Embedding(input_dim=max_features, output_dim=vocab_dim, input_length=maxlen, weights=[tokenizer.embedding_weights]))
model.add(LSTM(128)) # try using a GRU instead, for fun
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
# try using different optimizers and different optimizer configs
model.compile(loss='categorical_crossentropy', optimizer='adam')
print("Train...")
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=epoch,
validation_data=(X_test, Y_test), show_accuracy=True)
score, acc = model.evaluate(X_test, Y_test,
batch_size=100,
show_accuracy=True)
print('Test score:', score)
print('Test accuracy:', acc)
json_model_string = model.to_json()
with open("./coarse_type_model_lstm_glove_"+str(batch_size)+"b.json", "w") as f:
f.write(json_model_string)
model.save_weights("./coarse_type_model_lstm_glove_" + str(batch_size) + "b.h5")
|
[
"cguihong@hotmail.com"
] |
cguihong@hotmail.com
|
67744552c910cdc284977b1a4a0740afe3f8ebb5
|
853c6a09af16fd4dd8a53efa8bde631e63315b59
|
/This is a coding test with python/page 367.py
|
8e040c71a88756a236725473eab281f8e2cb1cef
|
[] |
no_license
|
Areum0921/Abox
|
92840897b53e9bbab35c0e0aae5a017ab19a0500
|
f4739c0c0835054afeca82484769e71fb8de47c8
|
refs/heads/master
| 2021-12-13T11:16:33.583366
| 2021-10-10T08:09:50
| 2021-10-10T08:09:50
| 176,221,995
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 348
|
py
|
#bisect 이용
import bisect
answer = -1
N,x = map(int,input().split(" "))
a = list(map(int,input().split(" ")))
left_a = bisect.bisect_left(a,x) # 첫번째 a 인덱스
right_a = bisect.bisect_right(a,x) # 마지막 a 다음 인덱스
print(left_a,right_a)
if right_a - left_a>0:
answer = right_a - left_a
print(answer)
|
[
"a90907@gmail.com"
] |
a90907@gmail.com
|
65edb385111ef5fdbfc756935b882b4509d75d7d
|
5ba734d46818acd8f29eb61a5ff2502bc1f69424
|
/Code/utils.py
|
6e56358ebd15f429070905343588ebb05cf5a96d
|
[] |
no_license
|
sprihap/Learning-generative-principles-of-a-symbol-system
|
b4ae2bfaa67bbf3739c2d02a11ff359f651c669d
|
7fedfcf0684874d197f439ad97d61339fc6bf54a
|
refs/heads/master
| 2023-05-28T01:14:20.219417
| 2021-06-17T19:26:21
| 2021-06-17T19:26:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,862
|
py
|
"""
This script is from:
https://github.com/sgrvinod/a-PyTorch-Tutorial-to-Image-Captioning/blob/master/utils.py
"""
import os
import numpy as np
import h5py
import json
import torch
from scipy.misc import imread, imresize
from tqdm import tqdm
from collections import Counter
from random import seed, choice, sample
def create_input_files(dataset, karpathy_json_path, image_folder, captions_per_image, min_word_freq, output_folder,
max_len=100):
"""
Creates input files for training, validation, and test data.
:param dataset: name of dataset, one of 'coco', 'flickr8k', 'flickr30k'
:param karpathy_json_path: path of Karpathy JSON file with splits and captions
:param image_folder: folder with downloaded images
:param captions_per_image: number of captions to sample per image
:param min_word_freq: words occuring less frequently than this threshold are binned as <unk>s
:param output_folder: folder to save files
:param max_len: don't sample captions longer than this length
"""
assert dataset in {'coco', 'flickr8k', 'flickr30k'}
# Read Karpathy JSON
with open(karpathy_json_path, 'r') as j:
data = json.load(j)
# Read image paths and captions for each image
train_image_paths = []
train_image_captions = []
val_image_paths = []
val_image_captions = []
test_image_paths = []
test_image_captions = []
word_freq = Counter()
for img in data['images']:
captions = []
for c in img['sentences']:
# Update word frequency
word_freq.update(c['tokens'])
if len(c['tokens']) <= max_len:
captions.append(c['tokens'])
if len(captions) == 0:
continue
path = os.path.join(image_folder, img['filepath'], img['filename']) if dataset == 'coco' else os.path.join(
image_folder, img['filename'])
if img['split'] in {'train', 'restval'}:
train_image_paths.append(path)
train_image_captions.append(captions)
elif img['split'] in {'val'}:
val_image_paths.append(path)
val_image_captions.append(captions)
elif img['split'] in {'test'}:
test_image_paths.append(path)
test_image_captions.append(captions)
# Sanity check
assert len(train_image_paths) == len(train_image_captions)
assert len(val_image_paths) == len(val_image_captions)
assert len(test_image_paths) == len(test_image_captions)
# Create word map
#words = [w for w in word_freq.keys() if word_freq[w] > min_word_freq]
# All the number words should be included even if they are not in training set
words = ['one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight',
'nine', 'ten', 'eleven', 'twelve', 'thirteen', 'fourteen',
'fifteen', 'sixteen', 'seventeen', 'eighteen', 'nineteen',
'twenty', 'thirty', 'forty', 'fifty', 'sixty', 'seventy',
'eighty', 'ninety', 'hundred', 'thousand']
word_map = {k: v + 1 for v, k in enumerate(words)}
word_map['<unk>'] = len(word_map) + 1
word_map['<start>'] = len(word_map) + 1
word_map['<end>'] = len(word_map) + 1
word_map['<pad>'] = 0
# Create a base/root name for all output files
base_filename = dataset + '_' + str(captions_per_image) + '_cap_per_img_' + str(min_word_freq) + '_min_word_freq'
# Save word map to a JSON
with open(os.path.join(output_folder, 'WORDMAP_' + base_filename + '.json'), 'w') as j:
json.dump(word_map, j)
# Sample captions for each image, save images to HDF5 file, and captions and their lengths to JSON files
seed(123)
for impaths, imcaps, split in [(train_image_paths, train_image_captions, 'TRAIN'),
(val_image_paths, val_image_captions, 'VAL'),
(test_image_paths, test_image_captions, 'TEST')]:
with h5py.File(os.path.join(output_folder, split + '_IMAGES_' + base_filename + '.hdf5'), 'a') as h:
# Make a note of the number of captions we are sampling per image
h.attrs['captions_per_image'] = captions_per_image
# Create dataset inside HDF5 file to store images
images = h.create_dataset('images', (len(impaths), 3, 256, 256), dtype='uint8')
print("\nReading %s images and captions, storing to file...\n" % split)
enc_captions = []
caplens = []
for i, path in enumerate(tqdm(impaths)):
# Sample captions
if len(imcaps[i]) < captions_per_image:
captions = imcaps[i] + [choice(imcaps[i]) for _ in range(captions_per_image - len(imcaps[i]))]
else:
captions = sample(imcaps[i], k=captions_per_image)
# Sanity check
assert len(captions) == captions_per_image
# Read images
img = imread(impaths[i])
if len(img.shape) == 2:
img = img[:, :, np.newaxis]
img = np.concatenate([img, img, img], axis=2)
img = imresize(img, (256, 256))
img = img.transpose(2, 0, 1)
assert img.shape == (3, 256, 256)
assert np.max(img) <= 255
# Save image to HDF5 file
images[i] = img
for j, c in enumerate(captions):
# Encode captions
enc_c = [word_map['<start>']] + [word_map.get(word, word_map['<unk>']) for word in c] + [
word_map['<end>']] + [word_map['<pad>']] * (max_len - len(c))
# Find caption lengths
c_len = len(c) + 2
enc_captions.append(enc_c)
caplens.append(c_len)
# Sanity check
assert images.shape[0] * captions_per_image == len(enc_captions) == len(caplens)
# Save encoded captions and their lengths to JSON files
with open(os.path.join(output_folder, split + '_CAPTIONS_' + base_filename + '.json'), 'w') as j:
json.dump(enc_captions, j)
with open(os.path.join(output_folder, split + '_CAPLENS_' + base_filename + '.json'), 'w') as j:
json.dump(caplens, j)
def init_embedding(embeddings):
"""
Fills embedding tensor with values from the uniform distribution.
:param embeddings: embedding tensor
"""
bias = np.sqrt(3.0 / embeddings.size(1))
torch.nn.init.uniform_(embeddings, -bias, bias)
def load_embeddings(emb_file, word_map):
"""
Creates an embedding tensor for the specified word map, for loading into the model.
:param emb_file: file containing embeddings (stored in GloVe format)
:param word_map: word map
:return: embeddings in the same order as the words in the word map, dimension of embeddings
"""
# Find embedding dimension
with open(emb_file, 'r') as f:
emb_dim = len(f.readline().split(' ')) - 1
vocab = set(word_map.keys())
# Create tensor to hold embeddings, initialize
embeddings = torch.FloatTensor(len(vocab), emb_dim)
init_embedding(embeddings)
# Read embedding file
print("\nLoading embeddings...")
for line in open(emb_file, 'r'):
line = line.split(' ')
emb_word = line[0]
embedding = list(map(lambda t: float(t), filter(lambda n: n and not n.isspace(), line[1:])))
# Ignore word if not in train_vocab
if emb_word not in vocab:
continue
embeddings[word_map[emb_word]] = torch.FloatTensor(embedding)
return embeddings, emb_dim
def clip_gradient(optimizer, grad_clip):
"""
Clips gradients computed during backpropagation to avoid explosion of gradients.
:param optimizer: optimizer with the gradients to be clipped
:param grad_clip: clip value
"""
for group in optimizer.param_groups:
for param in group['params']:
if param.grad is not None:
param.grad.data.clamp_(-grad_clip, grad_clip)
def save_checkpoint(data_name, epoch, epochs_since_improvement, encoder, decoder, encoder_optimizer, decoder_optimizer,
bleu4, is_best):
"""
Saves model checkpoint.
:param data_name: base name of processed dataset
:param epoch: epoch number
:param epochs_since_improvement: number of epochs since last improvement in BLEU-4 score
:param encoder: encoder model
:param decoder: decoder model
:param encoder_optimizer: optimizer to update encoder's weights, if fine-tuning
:param decoder_optimizer: optimizer to update decoder's weights
:param bleu4: validation BLEU-4 score for this epoch
:param is_best: is this checkpoint the best so far?
"""
state = {'epoch': epoch,
'epochs_since_improvement': epochs_since_improvement,
'bleu-4': bleu4,
'encoder': encoder,
'decoder': decoder,
'encoder_optimizer': encoder_optimizer,
'decoder_optimizer': decoder_optimizer}
filename = 'checkpoint_' + data_name + '.pth.tar'
torch.save(state, filename)
# If this checkpoint is the best so far, store a copy so it doesn't get overwritten by a worse checkpoint
if is_best:
torch.save(state, 'BEST_' + filename)
class AverageMeter(object):
"""
Keeps track of most recent, average, sum, and count of a metric.
"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(optimizer, shrink_factor):
"""
Shrinks learning rate by a specified factor.
:param optimizer: optimizer whose learning rate must be shrunk.
:param shrink_factor: factor in interval (0, 1) to multiply learning rate with.
"""
print("\nDECAYING learning rate.")
for param_group in optimizer.param_groups:
param_group['lr'] = param_group['lr'] * shrink_factor
print("The new learning rate is %f\n" % (optimizer.param_groups[0]['lr'],))
def accuracy(scores, targets, k):
"""
Computes top-k accuracy, from predicted and true labels.
:param scores: scores from the model
:param targets: true labels
:param k: k in top-k accuracy
:return: top-k accuracy
"""
batch_size = targets.size(0)
_, ind = scores.topk(k, 1, True, True)
correct = ind.eq(targets.view(-1, 1).expand_as(ind))
correct_total = correct.view(-1).float().sum() # 0D tensor
return correct_total.item() * (100.0 / batch_size)
|
[
"ziyxiang@electrode.sice.indiana.edu"
] |
ziyxiang@electrode.sice.indiana.edu
|
970ac043cb0d2a47e40e680f4553f16167d33ddf
|
02d0714edfef5a2d3630d7659c553c157e291e52
|
/tempest/api/volume/admin/test_volumes_actions.py
|
6c32321fc70831776f88b575d81d10b782f2264c
|
[
"Apache-2.0"
] |
permissive
|
atulbangar09/tempest
|
f07dced592481a7ec71a9c7469b7d50d30cdc171
|
9f5644ce2784cd882e86ac89236f8f8f828d7c43
|
refs/heads/master
| 2023-02-06T10:26:40.112917
| 2020-01-21T16:17:16
| 2020-01-21T16:17:16
| 234,520,093
| 0
| 0
|
Apache-2.0
| 2020-01-17T09:52:53
| 2020-01-17T09:52:52
| null |
UTF-8
|
Python
| false
| false
| 4,119
|
py
|
# Copyright 2013 Huawei Technologies Co.,LTD
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.volume import base
from tempest.common.utils import data_utils as utils
from tempest import test
class VolumesActionsV2Test(base.BaseVolumeAdminTest):
@classmethod
def setup_clients(cls):
super(VolumesActionsV2Test, cls).setup_clients()
cls.client = cls.volumes_client
@classmethod
def resource_setup(cls):
super(VolumesActionsV2Test, cls).resource_setup()
# Create a test shared volume for tests
vol_name = utils.rand_name(cls.__name__ + '-Volume')
cls.name_field = cls.special_fields['name_field']
params = {cls.name_field: vol_name}
cls.volume = cls.client.create_volume(**params)['volume']
cls.client.wait_for_volume_status(cls.volume['id'], 'available')
@classmethod
def resource_cleanup(cls):
# Delete the test volume
cls.client.delete_volume(cls.volume['id'])
cls.client.wait_for_resource_deletion(cls.volume['id'])
super(VolumesActionsV2Test, cls).resource_cleanup()
def _reset_volume_status(self, volume_id, status):
# Reset the volume status
body = self.admin_volume_client.reset_volume_status(volume_id,
status)
return body
def tearDown(self):
# Set volume's status to available after test
self._reset_volume_status(self.volume['id'], 'available')
super(VolumesActionsV2Test, self).tearDown()
def _create_temp_volume(self):
# Create a temp volume for force delete tests
vol_name = utils.rand_name('Volume')
params = {self.name_field: vol_name}
temp_volume = self.client.create_volume(**params)['volume']
self.client.wait_for_volume_status(temp_volume['id'], 'available')
return temp_volume
def _create_reset_and_force_delete_temp_volume(self, status=None):
# Create volume, reset volume status, and force delete temp volume
temp_volume = self._create_temp_volume()
if status:
self._reset_volume_status(temp_volume['id'], status)
self.admin_volume_client.force_delete_volume(temp_volume['id'])
self.client.wait_for_resource_deletion(temp_volume['id'])
@test.idempotent_id('d063f96e-a2e0-4f34-8b8a-395c42de1845')
def test_volume_reset_status(self):
# test volume reset status : available->error->available
self._reset_volume_status(self.volume['id'], 'error')
volume_get = self.admin_volume_client.show_volume(
self.volume['id'])['volume']
self.assertEqual('error', volume_get['status'])
@test.idempotent_id('21737d5a-92f2-46d7-b009-a0cc0ee7a570')
def test_volume_force_delete_when_volume_is_creating(self):
# test force delete when status of volume is creating
self._create_reset_and_force_delete_temp_volume('creating')
@test.idempotent_id('db8d607a-aa2e-4beb-b51d-d4005c232011')
def test_volume_force_delete_when_volume_is_attaching(self):
# test force delete when status of volume is attaching
self._create_reset_and_force_delete_temp_volume('attaching')
@test.idempotent_id('3e33a8a8-afd4-4d64-a86b-c27a185c5a4a')
def test_volume_force_delete_when_volume_is_error(self):
# test force delete when status of volume is error
self._create_reset_and_force_delete_temp_volume('error')
class VolumesActionsV1Test(VolumesActionsV2Test):
_api_version = 1
|
[
"jignasha.vithalani@triliodata.com"
] |
jignasha.vithalani@triliodata.com
|
bd6f314634efe8002e6d96e077d9e37cf04f2978
|
58d024706f26350706e52670daa135b37b84a30f
|
/srcb/.idea/gen_loan_acct.py
|
d9aaaab83fb631d2fb9b7be72e12b4db83cec084
|
[] |
no_license
|
ccf738/ccfCodes
|
295238991a497f9a6f38b89bf76f8f39a3a965b1
|
3ececda49eb17bdb5f8ffa30e019bd307a73f86e
|
refs/heads/master
| 2021-01-01T06:44:38.630535
| 2015-05-11T14:04:49
| 2015-05-11T14:04:49
| 8,829,196
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,681
|
py
|
# -*- coding:utf-8 -*-
__author__ = 'qctest'
import os
import shutil
import stat
import excel
import time
import win32com.client
import sqlite3
year = 2002
acct_seq = 0
num_seq = 0
id_seq = 0
def set_acct_no(file_name, branch_no, row_no):
global acct_seq
acct_seq += 1
acct_no = "68" + str(branch_no) + "709" + str(acct_seq).rjust(7,'0')
file_name.set_value(row_no, 2, acct_no)
def set_acct_seq(file_name, row_no):
global year, num_seq
num_seq += 1
if num_seq == 10000:
num_seq = 1
year += 1
loan_seq = str(year) + "9" + str(num_seq).rjust(4, '0')
file_name.set_value(row_no, 4, loan_seq)
def set_id_no(file_name, branch_no,row_no, id_type, name_len):
global id_seq
id_seq += 1
file_name.set_value(row_no, 20, '01')
if name_len <= 4:
id_type = "11"
id_no = "zhdk" + str(branch_no) + str(id_seq).rjust(6,'0')
else:
id_type = "44"
id_no = "zhkr" + str(id_seq).rjust(6, '0')
file_name.set_value(row_no, 19, id_no)
file_name.set_value(row_no, 18, id_type)
rep_loan_file = excel.Excel(unicode(r'D:\陈超峰\陈超峰\数据\20141203接收的20141124补录\1124补录发何东杰\置换贷款汇总表.xlsx', "utf-8"), "zhdk")
for i in xrange(rep_loan_file.used_range(), 2, -1):
row = rep_loan_file.get_row_data(i)
if row[2] is None:
continue
try:
branch_no = int(row[0])
except:
branch_no = '85611001'
id_type = row[18]
set_acct_no(rep_loan_file,branch_no,i)
set_acct_seq(rep_loan_file,i)
if row[18] is None or row[19] is None:
set_id_no(rep_loan_file,branch_no,i,id_type, len(row[2]))
rep_loan_file.quit()
|
[
"ccf738@sina.com"
] |
ccf738@sina.com
|
b2af9ea8f603ad82066c9c195b15839ee775b30e
|
02c9c208d20a04b50b9a88483ca87dd0922fb106
|
/vision/faceDetection.py
|
b57050df2c1b6926133ef263d54490bac1a03f5f
|
[] |
no_license
|
shubhamagarwal92/deepLearning
|
ca80aa6f310d0fff0c3f3172bcbeb156336ef949
|
a9a73544a5b5e5d4a10cfcaa8ff4d43fb251a27c
|
refs/heads/master
| 2020-04-18T12:25:28.431481
| 2016-09-29T16:30:22
| 2016-09-29T16:30:22
| 67,206,546
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,054
|
py
|
# Face detection in images using haar classifier from opencv
# Download first haarcascade_frontalface_alt.xml to get the code working
import os
import cv2
from PIL import Image
cascadeClassifier = '/path-to-dir/haarcascade_frontalface_alt.xml'
rootDir = "/path-to-dir/"
classDirPath = rootDir + "binaryClasses/"
faceFileDir = rootDir + 'faces/'
classDirNames = next(os.walk(classDirPath))[1]
for classDir in classDirNames:
imageDirPath = classDirPath+classDir+'/'
imageNames = next(os.walk(imageDirPath))[2]
classFaceFilePath = faceFileDir + classDir+'/'
print(classDir)
for imageName in imageNames:
imageFile = imageDirPath+imageName
img = cv2.imread(imageFile)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faceCascade = cv2.CascadeClassifier(cascadeClassifier)
faces = faceCascade.detectMultiScale(gray, 1.3, 5)
if(len(faces)==1):
for (x,y,w,h) in faces:
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
sub_face = img[y:y+h, x:x+w]
face_file_name = classFaceFilePath + imageName
cv2.imwrite(face_file_name, sub_face)
|
[
"shubhamagarwal92@gmail.com"
] |
shubhamagarwal92@gmail.com
|
cda2db1015b7a58998812c5701846d0dbfa533a5
|
1caa642313c4b6a8350dca501a2d6da80a14f2a1
|
/Acessar_site.py
|
cb1dbc9410a9350dcef91009c9ce0f3400f0717e
|
[
"MIT"
] |
permissive
|
LuanPetruitis/minis_programas_python
|
f78648269251cdc0d7ec77fd727e6e08b47ce340
|
5fbc4c3fbe832303511e612f320d31e2b91f1ef0
|
refs/heads/master
| 2021-05-24T13:33:55.373089
| 2021-01-29T19:44:37
| 2021-01-29T19:44:37
| 253,585,118
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 221
|
py
|
import urllib
import urllib.request
try:
site = urllib.request.urlopen('http://www.pudim.com.br')
except urllib.error.URLError:
print('O site não está funcionando.')
else:
print('Consegui acessar o site.')
|
[
"luanpetruitis@hotmail.com"
] |
luanpetruitis@hotmail.com
|
afd21c4d65fe2397b52703dbb5e7844fd2dd620e
|
be62bda9e4984a057109db70848d8b6e5586beed
|
/watch/views.py
|
f9e24083309c19dce31ae632d20cb0287628209e
|
[] |
no_license
|
grim-GO/worldIT1
|
649c393f6fd059d893259373929042654cb9137e
|
a8e9c8641ed7f77695740d042c87dfd547fe0ab1
|
refs/heads/master
| 2021-01-09T00:57:35.202900
| 2020-02-25T16:11:59
| 2020-02-25T16:11:59
| 242,195,482
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 99
|
py
|
from django.shortcuts import render
def watch(request):
return render(request, 'watch.html')
|
[
"56546892+grim-GO@users.noreply.github.com"
] |
56546892+grim-GO@users.noreply.github.com
|
e39ba4e517ce77e9d879a36cff7cf0d9f91ea2b7
|
4a8c1f7d9935609b780aff95c886ef7781967be0
|
/atcoder/ABC/194_c.py
|
efb956133dd25d430f186e3691a7980d59834987
|
[] |
no_license
|
recuraki/PythonJunkTest
|
d5e5f5957ac5dd0c539ef47759b1fe5ef7a2c52a
|
2556c973d468a6988d307ce85c5f2f8ab15e759a
|
refs/heads/master
| 2023-08-09T17:42:21.875768
| 2023-07-18T23:06:31
| 2023-07-18T23:06:31
| 13,790,016
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,548
|
py
|
import sys
from io import StringIO
import unittest
import logging
logging.basicConfig(level=logging.DEBUG)
def resolve():
def do():
import collections
n = int(input())
dat = list(map(int, input().split()))
d = collections.defaultdict(int)
for i in range(n):
d[dat[i]] += 1
keys = list(d.keys())
keys.sort()
#print(keys)
res = 0
l = len(keys)
for i in range(l):
numi = keys[i]
counti = d[numi]
for j in range(i+1, l):
numj = keys[j]
countj = d[numj]
#print(numi, counti, numj, countj)
x = (numi - numj) ** 2
res += x * (counti * countj)
print(res)
do()
class TestClass(unittest.TestCase):
def assertIO(self, input, output):
stdout, stdin = sys.stdout, sys.stdin
sys.stdout, sys.stdin = StringIO(), StringIO(input)
resolve()
sys.stdout.seek(0)
out = sys.stdout.read()[:-1]
sys.stdout, sys.stdin = stdout, stdin
self.assertEqual(out, output)
def test_input_1(self):
print("test_input_1")
input = """3
2 8 4"""
output = """56"""
self.assertIO(input, output)
def test_input_2(self):
print("test_input_2")
input = """5
-5 8 9 -4 -3"""
output = """950"""
self.assertIO(input, output)
if __name__ == "__main__":
unittest.main()
|
[
"kanai@wide.ad.jp"
] |
kanai@wide.ad.jp
|
72d9f888432bd18afeb0e389537741ec6f5a4396
|
e457ef64e939acc769d3b4609184f1603fdd875a
|
/tests/test_fingerprint.py
|
f8cc93db8d90891d4a8e47ec0acb33cc6ed2ba00
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
fastavro/fastavro
|
dbad8c55fabc9f22b16273ee1a926f22c840c694
|
40dfd526076446cc7f7eef97e40da216b910d047
|
refs/heads/master
| 2023-09-01T04:16:13.510802
| 2023-08-25T10:19:13
| 2023-08-25T11:05:36
| 3,845,895
| 430
| 105
|
MIT
| 2023-09-14T20:14:34
| 2012-03-27T16:29:38
|
Python
|
UTF-8
|
Python
| false
| false
| 33,787
|
py
|
import pytest
from fastavro.schema import (
FINGERPRINT_ALGORITHMS,
fingerprint,
to_parsing_canonical_form,
)
@pytest.mark.parametrize(
"fingerprint", ["CRC-64-AVRO", "SHA-256", "MD5", "sha256", "md5"]
)
def test_required_fingerprints(fingerprint):
assert fingerprint in FINGERPRINT_ALGORITHMS
def test_unknown_algorithm():
unknown_algorithm = "UNKNOWN"
assert unknown_algorithm not in FINGERPRINT_ALGORITHMS
with pytest.raises(ValueError, match="Unknown schema fingerprint algorithm"):
fingerprint("string", unknown_algorithm)
@pytest.mark.parametrize(
"original_schema,algorithm,expected_fingerprint",
[
("int", "CRC-64-AVRO", "8f5c393f1ad57572"),
("int", "md5", "ef524ea1b91e73173d938ade36c1db32"),
(
"int",
"sha256",
"3f2b87a9fe7cc9b13835598c3981cd45e3e355309e5090aa0933d7becb6fba45",
),
(
{"type": "int"},
"CRC-64-AVRO",
"8f5c393f1ad57572",
),
(
{"type": "int"},
"md5",
"ef524ea1b91e73173d938ade36c1db32",
),
(
{"type": "int"},
"sha256",
"3f2b87a9fe7cc9b13835598c3981cd45e3e355309e5090aa0933d7becb6fba45",
),
(
"float",
"CRC-64-AVRO",
"90d7a83ecb027c4d",
),
(
"float",
"md5",
"50a6b9db85da367a6d2df400a41758a6",
),
(
"float",
"sha256",
"1e71f9ec051d663f56b0d8e1fc84d71aa56ccfe9fa93aa20d10547a7abeb5cc0",
),
(
{"type": "float"},
"CRC-64-AVRO",
"90d7a83ecb027c4d",
),
(
{"type": "float"},
"md5",
"50a6b9db85da367a6d2df400a41758a6",
),
(
{"type": "float"},
"sha256",
"1e71f9ec051d663f56b0d8e1fc84d71aa56ccfe9fa93aa20d10547a7abeb5cc0",
),
(
"long",
"CRC-64-AVRO",
"b71df49344e154d0",
),
(
"long",
"md5",
"e1dd9a1ef98b451b53690370b393966b",
),
(
"long",
"sha256",
"c32c497df6730c97fa07362aa5023f37d49a027ec452360778114cf427965add",
),
(
{"type": "long"},
"CRC-64-AVRO",
"b71df49344e154d0",
),
(
{"type": "long"},
"md5",
"e1dd9a1ef98b451b53690370b393966b",
),
(
{"type": "long"},
"sha256",
"c32c497df6730c97fa07362aa5023f37d49a027ec452360778114cf427965add",
),
(
"double",
"CRC-64-AVRO",
"7e95ab32c035758e",
),
(
"double",
"md5",
"bfc71a62f38b99d6a93690deeb4b3af6",
),
(
"double",
"sha256",
"730a9a8c611681d7eef442e03c16c70d13bca3eb8b977bb403eaff52176af254",
),
(
{"type": "double"},
"CRC-64-AVRO",
"7e95ab32c035758e",
),
(
{"type": "double"},
"md5",
"bfc71a62f38b99d6a93690deeb4b3af6",
),
(
{"type": "double"},
"sha256",
"730a9a8c611681d7eef442e03c16c70d13bca3eb8b977bb403eaff52176af254",
),
(
"bytes",
"CRC-64-AVRO",
"651920c3da16c04f",
),
(
"bytes",
"md5",
"b462f06cb909be57c85008867784cde6",
),
(
"bytes",
"sha256",
"9ae507a9dd39ee5b7c7e285da2c0846521c8ae8d80feeae5504e0c981d53f5fa",
),
(
{"type": "bytes"},
"CRC-64-AVRO",
"651920c3da16c04f",
),
(
{"type": "bytes"},
"md5",
"b462f06cb909be57c85008867784cde6",
),
(
{"type": "bytes"},
"sha256",
"9ae507a9dd39ee5b7c7e285da2c0846521c8ae8d80feeae5504e0c981d53f5fa",
),
(
"string",
"CRC-64-AVRO",
"c70345637248018f",
),
(
"string",
"md5",
"095d71cf12556b9d5e330ad575b3df5d",
),
(
"string",
"sha256",
"e9e5c1c9e4f6277339d1bcde0733a59bd42f8731f449da6dc13010a916930d48",
),
(
{"type": "string"},
"CRC-64-AVRO",
"c70345637248018f",
),
(
{"type": "string"},
"md5",
"095d71cf12556b9d5e330ad575b3df5d",
),
(
{"type": "string"},
"sha256",
"e9e5c1c9e4f6277339d1bcde0733a59bd42f8731f449da6dc13010a916930d48",
),
(
"boolean",
"CRC-64-AVRO",
"64f7d4a478fc429f",
),
(
"boolean",
"md5",
"01f692b30d4a1c8a3e600b1440637f8f",
),
(
"boolean",
"sha256",
"a5b031ab62bc416d720c0410d802ea46b910c4fbe85c50a946ccc658b74e677e",
),
(
{"type": "boolean"},
"CRC-64-AVRO",
"64f7d4a478fc429f",
),
(
{"type": "boolean"},
"md5",
"01f692b30d4a1c8a3e600b1440637f8f",
),
(
{"type": "boolean"},
"sha256",
"a5b031ab62bc416d720c0410d802ea46b910c4fbe85c50a946ccc658b74e677e",
),
(
"null",
"CRC-64-AVRO",
"8a8f25cce724dd63",
),
(
"null",
"md5",
"9b41ef67651c18488a8b08bb67c75699",
),
(
"null",
"sha256",
"f072cbec3bf8841871d4284230c5e983dc211a56837aed862487148f947d1a1f",
),
(
{"type": "null"},
"CRC-64-AVRO",
"8a8f25cce724dd63",
),
(
{"type": "null"},
"md5",
"9b41ef67651c18488a8b08bb67c75699",
),
(
{"type": "null"},
"sha256",
"f072cbec3bf8841871d4284230c5e983dc211a56837aed862487148f947d1a1f",
),
(
{"type": "fixed", "name": "Test", "size": 1},
"CRC-64-AVRO",
"6869897b4049355b",
),
(
{"type": "fixed", "name": "Test", "size": 1},
"md5",
"db01bc515fcfcd2d4be82ed385288261",
),
(
{"type": "fixed", "name": "Test", "size": 1},
"sha256",
"f527116a6f44455697e935afc31dc60ad0f95caf35e1d9c9db62edb3ffeb9170",
),
(
{
"type": "fixed",
"name": "MyFixed",
"namespace": "org.apache.hadoop.avro",
"size": 1,
},
"CRC-64-AVRO",
"fadbd138e85bdf45",
),
(
{
"type": "fixed",
"name": "MyFixed",
"namespace": "org.apache.hadoop.avro",
"size": 1,
},
"md5",
"d74b3726484422711c465d49e857b1ba",
),
(
{
"type": "fixed",
"name": "MyFixed",
"namespace": "org.apache.hadoop.avro",
"size": 1,
},
"sha256",
"28e493a44771cecc5deca4bd938cdc3d5a24cfe1f3760bc938fa1057df6334fc",
),
(
{"type": "enum", "name": "Test", "symbols": ["A", "B"]},
"CRC-64-AVRO",
"03a2f2c2e27f7a16",
),
(
{"type": "enum", "name": "Test", "symbols": ["A", "B"]},
"md5",
"d883f2a9b16ed085fcc5e4ca6c8f6ed1",
),
(
{"type": "enum", "name": "Test", "symbols": ["A", "B"]},
"sha256",
"9b51286144f87ce5aebdc61ca834379effa5a41ce6ac0938630ff246297caca8",
),
(
{"type": "array", "items": "long"},
"CRC-64-AVRO",
"715e2ea28bc91654",
),
(
{"type": "array", "items": "long"},
"md5",
"c1c387e8d6a58f0df749b698991b1f43",
),
(
{"type": "array", "items": "long"},
"sha256",
"f78e954167feb23dcb1ce01e8463cebf3408e0a4259e16f24bd38f6d0f1d578b",
),
(
{
"type": "array",
"items": {"type": "enum", "name": "Test", "symbols": ["A", "B"]},
},
"CRC-64-AVRO",
"10d9ade1fa3a0387",
),
(
{
"type": "array",
"items": {"type": "enum", "name": "Test", "symbols": ["A", "B"]},
},
"md5",
"cfc7b861c7cfef082a6ef082948893fa",
),
(
{
"type": "array",
"items": {"type": "enum", "name": "Test", "symbols": ["A", "B"]},
},
"sha256",
"0d8edd49d7f7e9553668f133577bc99f842852b55d9f84f1f7511e4961aa685c",
),
(
{"type": "map", "values": "long"},
"CRC-64-AVRO",
"6f74f4e409b1334e",
),
(
{"type": "map", "values": "long"},
"md5",
"32b3f1a3177a0e73017920f00448b56e",
),
(
{"type": "map", "values": "long"},
"sha256",
"b8fad07d458971a07692206b8a7cf626c86c62fe6bcff7c1b11bc7295de34853",
),
(
{
"type": "map",
"values": {"type": "enum", "name": "Test", "symbols": ["A", "B"]},
},
"CRC-64-AVRO",
"df2ab0626f6b812d",
),
(
{
"type": "map",
"values": {"type": "enum", "name": "Test", "symbols": ["A", "B"]},
},
"md5",
"c588da6ba99701c41e73fd30d23f994e",
),
(
{
"type": "map",
"values": {"type": "enum", "name": "Test", "symbols": ["A", "B"]},
},
"sha256",
"3886747ed1669a8af476b549e97b34222afb2fed5f18bb27c6f367ea0351a576",
),
(
["string", "null", "long"],
"CRC-64-AVRO",
"65a5be410d687566",
),
(
["string", "null", "long"],
"md5",
"b11cf95f0a55dd55f9ee515a37bf937a",
),
(
["string", "null", "long"],
"sha256",
"ed8d254116441bb35e237ad0563cf5432b8c975334bd222c1ee84609435d95bb",
),
(
{
"type": "record",
"name": "Test",
"fields": [{"name": "f", "type": "long"}],
},
"CRC-64-AVRO",
"ed94e5f5e6eb588e",
),
(
{
"type": "record",
"name": "Test",
"fields": [{"name": "f", "type": "long"}],
},
"md5",
"69531a03db788afe353244cd049b1e6d",
),
(
{
"type": "record",
"name": "Test",
"fields": [{"name": "f", "type": "long"}],
},
"sha256",
"9670f15a8f96d23e92830d00b8bd57275e02e3e173ffef7c253c170b6beabeb8",
),
(
{
"type": "error",
"name": "Test",
"fields": [{"name": "f", "type": "long"}],
},
"CRC-64-AVRO",
"ed94e5f5e6eb588e",
),
(
{
"type": "error",
"name": "Test",
"fields": [{"name": "f", "type": "long"}],
},
"md5",
"69531a03db788afe353244cd049b1e6d",
),
(
{
"type": "error",
"name": "Test",
"fields": [{"name": "f", "type": "long"}],
},
"sha256",
"9670f15a8f96d23e92830d00b8bd57275e02e3e173ffef7c253c170b6beabeb8",
),
(
{
"type": "record",
"name": "Node",
"fields": [
{"name": "label", "type": "string"},
{"name": "children", "type": {"type": "array", "items": "Node"}},
],
},
"CRC-64-AVRO",
"52cba544c3e756b7",
),
(
{
"type": "record",
"name": "Node",
"fields": [
{"name": "label", "type": "string"},
{"name": "children", "type": {"type": "array", "items": "Node"}},
],
},
"md5",
"99625b0cc02050363e89ef66b0f406c9",
),
(
{
"type": "record",
"name": "Node",
"fields": [
{"name": "label", "type": "string"},
{"name": "children", "type": {"type": "array", "items": "Node"}},
],
},
"sha256",
"65d80dc8c95c98a9671d92cf0415edfabfee2cb058df2138606656cd6ae4dc59",
),
(
{
"type": "record",
"name": "Lisp",
"fields": [
{
"name": "value",
"type": [
"null",
"string",
{
"type": "record",
"name": "Cons",
"fields": [
{"name": "car", "type": "Lisp"},
{"name": "cdr", "type": "Lisp"},
],
},
],
},
],
},
"CRC-64-AVRO",
"68d91a23eda0b306",
),
(
{
"type": "record",
"name": "Lisp",
"fields": [
{
"name": "value",
"type": [
"null",
"string",
{
"type": "record",
"name": "Cons",
"fields": [
{"name": "car", "type": "Lisp"},
{"name": "cdr", "type": "Lisp"},
],
},
],
},
],
},
"md5",
"9e1d0d15b52789fcb8e3a88b53059d5f",
),
(
{
"type": "record",
"name": "Lisp",
"fields": [
{
"name": "value",
"type": [
"null",
"string",
{
"type": "record",
"name": "Cons",
"fields": [
{"name": "car", "type": "Lisp"},
{"name": "cdr", "type": "Lisp"},
],
},
],
},
],
},
"sha256",
"e5ce4f4a15ce19fa1047cfe16a3b0e13a755db40f00f23284fdd376fc1c7dd21",
),
(
{
"type": "record",
"name": "HandshakeRequest",
"namespace": "org.apache.avro.ipc",
"fields": [
{
"name": "clientHash",
"type": {"type": "fixed", "name": "MD5", "size": 16},
},
{"name": "clientProtocol", "type": ["null", "string"]},
{"name": "serverHash", "type": "MD5"},
{
"name": "meta",
"type": ["null", {"type": "map", "values": "bytes"}],
},
],
},
"CRC-64-AVRO",
"b96ad79e5a7c5757",
),
(
{
"type": "record",
"name": "HandshakeRequest",
"namespace": "org.apache.avro.ipc",
"fields": [
{
"name": "clientHash",
"type": {"type": "fixed", "name": "MD5", "size": 16},
},
{"name": "clientProtocol", "type": ["null", "string"]},
{"name": "serverHash", "type": "MD5"},
{
"name": "meta",
"type": ["null", {"type": "map", "values": "bytes"}],
},
],
},
"md5",
"4c822af2e17eecd92422827eede97f5b",
),
(
{
"type": "record",
"name": "HandshakeRequest",
"namespace": "org.apache.avro.ipc",
"fields": [
{
"name": "clientHash",
"type": {"type": "fixed", "name": "MD5", "size": 16},
},
{"name": "clientProtocol", "type": ["null", "string"]},
{"name": "serverHash", "type": "MD5"},
{
"name": "meta",
"type": ["null", {"type": "map", "values": "bytes"}],
},
],
},
"sha256",
"2b2f7a9b22991fe0df9134cb6b5ff7355343e797aaea337e0150e20f3a35800e",
),
(
{
"type": "record",
"name": "HandshakeResponse",
"namespace": "org.apache.avro.ipc",
"fields": [
{
"name": "match",
"type": {
"type": "enum",
"name": "HandshakeMatch",
"symbols": ["BOTH", "CLIENT", "NONE"],
},
},
{"name": "serverProtocol", "type": ["null", "string"]},
{
"name": "serverHash",
"type": ["null", {"name": "MD5", "size": 16, "type": "fixed"}],
},
{
"name": "meta",
"type": ["null", {"type": "map", "values": "bytes"}],
},
],
},
"CRC-64-AVRO",
"00feee01de4ea50e",
),
(
{
"type": "record",
"name": "HandshakeResponse",
"namespace": "org.apache.avro.ipc",
"fields": [
{
"name": "match",
"type": {
"type": "enum",
"name": "HandshakeMatch",
"symbols": ["BOTH", "CLIENT", "NONE"],
},
},
{"name": "serverProtocol", "type": ["null", "string"]},
{
"name": "serverHash",
"type": ["null", {"name": "MD5", "size": 16, "type": "fixed"}],
},
{
"name": "meta",
"type": ["null", {"type": "map", "values": "bytes"}],
},
],
},
"md5",
"afe529d01132daab7f4e2a6663e7a2f5",
),
(
{
"type": "record",
"name": "HandshakeResponse",
"namespace": "org.apache.avro.ipc",
"fields": [
{
"name": "match",
"type": {
"type": "enum",
"name": "HandshakeMatch",
"symbols": ["BOTH", "CLIENT", "NONE"],
},
},
{"name": "serverProtocol", "type": ["null", "string"]},
{
"name": "serverHash",
"type": ["null", {"name": "MD5", "size": 16, "type": "fixed"}],
},
{
"name": "meta",
"type": ["null", {"type": "map", "values": "bytes"}],
},
],
},
"sha256",
"a303cbbfe13958f880605d70c521a4b7be34d9265ac5a848f25916a67b11d889",
),
(
{
"type": "record",
"name": "Interop",
"namespace": "org.apache.avro",
"fields": [
{"name": "intField", "type": "int"},
{"name": "longField", "type": "long"},
{"name": "stringField", "type": "string"},
{"name": "boolField", "type": "boolean"},
{"name": "floatField", "type": "float"},
{"name": "doubleField", "type": "double"},
{"name": "bytesField", "type": "bytes"},
{"name": "nullField", "type": "null"},
{
"name": "arrayField",
"type": {"type": "array", "items": "double"},
},
{
"name": "mapField",
"type": {
"type": "map",
"values": {
"name": "Foo",
"type": "record",
"fields": [{"name": "label", "type": "string"}],
},
},
},
{
"name": "unionField",
"type": [
"boolean",
"double",
{"type": "array", "items": "bytes"},
],
},
{
"name": "enumField",
"type": {
"type": "enum",
"name": "Kind",
"symbols": ["A", "B", "C"],
},
},
{
"name": "fixedField",
"type": {"type": "fixed", "name": "MD5", "size": 16},
},
{
"name": "recordField",
"type": {
"type": "record",
"name": "Node",
"fields": [
{"name": "label", "type": "string"},
{
"name": "children",
"type": {"type": "array", "items": "Node"},
},
],
},
},
],
},
"CRC-64-AVRO",
"e82c0a93a6a0b5a4",
),
(
{
"type": "record",
"name": "Interop",
"namespace": "org.apache.avro",
"fields": [
{"name": "intField", "type": "int"},
{"name": "longField", "type": "long"},
{"name": "stringField", "type": "string"},
{"name": "boolField", "type": "boolean"},
{"name": "floatField", "type": "float"},
{"name": "doubleField", "type": "double"},
{"name": "bytesField", "type": "bytes"},
{"name": "nullField", "type": "null"},
{
"name": "arrayField",
"type": {"type": "array", "items": "double"},
},
{
"name": "mapField",
"type": {
"type": "map",
"values": {
"name": "Foo",
"type": "record",
"fields": [{"name": "label", "type": "string"}],
},
},
},
{
"name": "unionField",
"type": [
"boolean",
"double",
{"type": "array", "items": "bytes"},
],
},
{
"name": "enumField",
"type": {
"type": "enum",
"name": "Kind",
"symbols": ["A", "B", "C"],
},
},
{
"name": "fixedField",
"type": {"type": "fixed", "name": "MD5", "size": 16},
},
{
"name": "recordField",
"type": {
"type": "record",
"name": "Node",
"fields": [
{"name": "label", "type": "string"},
{
"name": "children",
"type": {"type": "array", "items": "Node"},
},
],
},
},
],
},
"md5",
"994fea1a1be7ff8603cbe40c3bc7e4ca",
),
(
{
"type": "record",
"name": "Interop",
"namespace": "org.apache.avro",
"fields": [
{"name": "intField", "type": "int"},
{"name": "longField", "type": "long"},
{"name": "stringField", "type": "string"},
{"name": "boolField", "type": "boolean"},
{"name": "floatField", "type": "float"},
{"name": "doubleField", "type": "double"},
{"name": "bytesField", "type": "bytes"},
{"name": "nullField", "type": "null"},
{
"name": "arrayField",
"type": {"type": "array", "items": "double"},
},
{
"name": "mapField",
"type": {
"type": "map",
"values": {
"name": "Foo",
"type": "record",
"fields": [{"name": "label", "type": "string"}],
},
},
},
{
"name": "unionField",
"type": [
"boolean",
"double",
{"type": "array", "items": "bytes"},
],
},
{
"name": "enumField",
"type": {
"type": "enum",
"name": "Kind",
"symbols": ["A", "B", "C"],
},
},
{
"name": "fixedField",
"type": {"type": "fixed", "name": "MD5", "size": 16},
},
{
"name": "recordField",
"type": {
"type": "record",
"name": "Node",
"fields": [
{"name": "label", "type": "string"},
{
"name": "children",
"type": {"type": "array", "items": "Node"},
},
],
},
},
],
},
"sha256",
"cccfd6e3f917cf53b0f90c206342e6703b0d905071f724a1c1f85b731c74058d",
),
(
{
"type": "record",
"name": "ipAddr",
"fields": [
{
"name": "addr",
"type": [
{"name": "IPv6", "type": "fixed", "size": 16},
{"name": "IPv4", "type": "fixed", "size": 4},
],
}
],
},
"CRC-64-AVRO",
"8d961b4e298a1844",
),
(
{
"type": "record",
"name": "ipAddr",
"fields": [
{
"name": "addr",
"type": [
{"name": "IPv6", "type": "fixed", "size": 16},
{"name": "IPv4", "type": "fixed", "size": 4},
],
}
],
},
"md5",
"45d85c69b353a99b93d7c4f2fcf0c30d",
),
(
{
"type": "record",
"name": "ipAddr",
"fields": [
{
"name": "addr",
"type": [
{"name": "IPv6", "type": "fixed", "size": 16},
{"name": "IPv4", "type": "fixed", "size": 4},
],
}
],
},
"sha256",
"6f6fc8f685a4f07d99734946565d63108806d55a8620febea047cf52cb0ac181",
),
(
{
"type": "record",
"name": "TestDoc",
"doc": "Doc string",
"fields": [{"name": "name", "type": "string", "doc": "Doc String"}],
},
"CRC-64-AVRO",
"0e6660f02bcdc109",
),
(
{
"type": "record",
"name": "TestDoc",
"doc": "Doc string",
"fields": [{"name": "name", "type": "string", "doc": "Doc String"}],
},
"md5",
"f2da75f5131f5ab80629538287b8beb2",
),
(
{
"type": "record",
"name": "TestDoc",
"doc": "Doc string",
"fields": [{"name": "name", "type": "string", "doc": "Doc String"}],
},
"sha256",
"0b3644f7aa5ca2fc4bad93ca2d3609c12aa9dbda9c15e68b34c120beff08e7b9",
),
(
{
"type": "enum",
"name": "Test",
"symbols": ["A", "B"],
"doc": "Doc String",
},
"CRC-64-AVRO",
"03a2f2c2e27f7a16",
),
(
{
"type": "enum",
"name": "Test",
"symbols": ["A", "B"],
"doc": "Doc String",
},
"md5",
"d883f2a9b16ed085fcc5e4ca6c8f6ed1",
),
(
{
"type": "enum",
"name": "Test",
"symbols": ["A", "B"],
"doc": "Doc String",
},
"sha256",
"9b51286144f87ce5aebdc61ca834379effa5a41ce6ac0938630ff246297caca8",
),
(
{"type": "int"},
"MD5", # JAVA Name
"ef524ea1b91e73173d938ade36c1db32",
),
(
{"type": "int"},
"SHA-256", # JAVA Name
"3f2b87a9fe7cc9b13835598c3981cd45e3e355309e5090aa0933d7becb6fba45",
),
],
)
def test_random_cases(original_schema, algorithm, expected_fingerprint):
# All of these random test cases came from the test cases here:
# https://github.com/apache/avro/blob/0552c674637dd15b8751ed5181387cdbd81480d5/lang/py3/avro/tests/test_normalization.py
canonical_form = to_parsing_canonical_form(original_schema)
assert fingerprint(canonical_form, algorithm) == expected_fingerprint
|
[
"scottabelden+github.sb@gmail.com"
] |
scottabelden+github.sb@gmail.com
|
4e1b78b9c648e807f68585113cc426c3bdc0cfd1
|
fc8fbda7dba622a2242a6783919c36e5e0fd6cc8
|
/final/run.py
|
e031666b5f15c90a865bdcb96ea4a098df9b62b3
|
[] |
no_license
|
canibal/Course6
|
c110fed13338bca9509948b82a1d4dd0e7200e00
|
005d8eb8265f8f735d991c39af19f88d441f303b
|
refs/heads/master
| 2022-05-06T14:09:01.587671
| 2020-04-20T08:29:39
| 2020-04-20T08:29:39
| 255,115,793
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,217
|
py
|
#! /usr/bin/env python3
import os
import requests
data_dir = ('supplier-data/descriptions/')
file_list = os.listdir(data_dir)
def post_request(p):
response = requests.post("http://35.223.215.137/fruits/", json=p)
code = response.status_code
body = response.text
print("The request returned code {}.".format(code), body)
def create_dicts(files):
description_d = {}
fd = []
for f in files:
description = os.path.join(data_dir, f)
with open(description, 'r') as file:
key_list = ['name', 'weight', 'description', 'image_name']
val_list = []
fil = file.readlines()
for line in fil:
if 'lbs' in line:
line = line.split(' ')
line = int(line[0])
val_list.append(line)
else:
val_list.append(line.strip())
val_list.append(os.path.splitext(f)[0] + '.jpeg')
print(val_list)
z = zip(key_list, val_list)
f_d = dict(z)
post_request(f_d)
#feedback_d = dict(list(enumerate(fd)))
#return feedback_d
if __name__=="__main__":
create_dicts(file_list)
|
[
"canaan@thetomato.co"
] |
canaan@thetomato.co
|
70f0f5a1e3be3fd611edb474c21c64ad9da1b84b
|
0b358a0d64eb03655c030b36c0ae87880b153951
|
/mmcv-1.4.7/tests/test_load_model_zoo.py
|
35492fa8a0a51952d458374c17770eee716d996a
|
[
"Apache-2.0"
] |
permissive
|
jshilong/DDQ
|
db05ff309d63316c62faa59b28c66d65eef973d1
|
de9331e4579aaafab4d69e3a9a3c6638efc5392c
|
refs/heads/main
| 2023-06-03T15:02:09.949907
| 2023-05-24T03:32:12
| 2023-05-24T03:32:12
| 498,974,099
| 199
| 6
|
Apache-2.0
| 2022-06-02T05:01:53
| 2022-06-02T03:10:25
| null |
UTF-8
|
Python
| false
| false
| 5,478
|
py
|
# Copyright (c) OpenMMLab. All rights reserved.
import os
import os.path as osp
from unittest.mock import patch
import pytest
import mmcv
from mmcv.runner.checkpoint import (DEFAULT_CACHE_DIR, ENV_MMCV_HOME,
ENV_XDG_CACHE_HOME, _get_mmcv_home,
_load_checkpoint,
get_deprecated_model_names,
get_external_models)
from mmcv.utils import TORCH_VERSION
@patch('mmcv.__path__', [osp.join(osp.dirname(__file__), 'data/')])
def test_set_mmcv_home():
os.environ.pop(ENV_MMCV_HOME, None)
mmcv_home = osp.join(osp.dirname(__file__), 'data/model_zoo/mmcv_home/')
os.environ[ENV_MMCV_HOME] = mmcv_home
assert _get_mmcv_home() == mmcv_home
@patch('mmcv.__path__', [osp.join(osp.dirname(__file__), 'data/')])
def test_default_mmcv_home():
os.environ.pop(ENV_MMCV_HOME, None)
os.environ.pop(ENV_XDG_CACHE_HOME, None)
assert _get_mmcv_home() == os.path.expanduser(
os.path.join(DEFAULT_CACHE_DIR, 'mmcv'))
model_urls = get_external_models()
assert model_urls == mmcv.load(
osp.join(mmcv.__path__[0], 'model_zoo/open_mmlab.json'))
@patch('mmcv.__path__', [osp.join(osp.dirname(__file__), 'data/')])
def test_get_external_models():
os.environ.pop(ENV_MMCV_HOME, None)
mmcv_home = osp.join(osp.dirname(__file__), 'data/model_zoo/mmcv_home/')
os.environ[ENV_MMCV_HOME] = mmcv_home
ext_urls = get_external_models()
assert ext_urls == {
'train': 'https://localhost/train.pth',
'test': 'test.pth',
'val': 'val.pth',
'train_empty': 'train.pth'
}
@patch('mmcv.__path__', [osp.join(osp.dirname(__file__), 'data/')])
def test_get_deprecated_models():
os.environ.pop(ENV_MMCV_HOME, None)
mmcv_home = osp.join(osp.dirname(__file__), 'data/model_zoo/mmcv_home/')
os.environ[ENV_MMCV_HOME] = mmcv_home
dep_urls = get_deprecated_model_names()
assert dep_urls == {
'train_old': 'train',
'test_old': 'test',
}
def load_from_http(url, map_location=None):
return 'url:' + url
def load_url(url, map_location=None, model_dir=None):
return load_from_http(url)
def load(filepath, map_location=None):
return 'local:' + filepath
@patch('mmcv.__path__', [osp.join(osp.dirname(__file__), 'data/')])
@patch('mmcv.runner.checkpoint.load_from_http', load_from_http)
@patch('mmcv.runner.checkpoint.load_url', load_url)
@patch('torch.load', load)
def test_load_external_url():
# test modelzoo://
url = _load_checkpoint('modelzoo://resnet50')
if TORCH_VERSION < '1.9.0':
assert url == ('url:https://download.pytorch.org/models/resnet50-19c8e'
'357.pth')
else:
# filename of checkpoint is renamed in torch1.9.0
assert url == ('url:https://download.pytorch.org/models/resnet50-0676b'
'a61.pth')
# test torchvision://
url = _load_checkpoint('torchvision://resnet50')
if TORCH_VERSION < '1.9.0':
assert url == ('url:https://download.pytorch.org/models/resnet50-19c8e'
'357.pth')
else:
# filename of checkpoint is renamed in torch1.9.0
assert url == ('url:https://download.pytorch.org/models/resnet50-0676b'
'a61.pth')
# test open-mmlab:// with default MMCV_HOME
os.environ.pop(ENV_MMCV_HOME, None)
os.environ.pop(ENV_XDG_CACHE_HOME, None)
url = _load_checkpoint('open-mmlab://train')
assert url == 'url:https://localhost/train.pth'
# test open-mmlab:// with deprecated model name
os.environ.pop(ENV_MMCV_HOME, None)
os.environ.pop(ENV_XDG_CACHE_HOME, None)
with pytest.warns(
Warning,
match='open-mmlab://train_old is deprecated in favor of '
'open-mmlab://train'):
url = _load_checkpoint('open-mmlab://train_old')
assert url == 'url:https://localhost/train.pth'
# test openmmlab:// with deprecated model name
os.environ.pop(ENV_MMCV_HOME, None)
os.environ.pop(ENV_XDG_CACHE_HOME, None)
with pytest.warns(
Warning,
match='openmmlab://train_old is deprecated in favor of '
'openmmlab://train'):
url = _load_checkpoint('openmmlab://train_old')
assert url == 'url:https://localhost/train.pth'
# test open-mmlab:// with user-defined MMCV_HOME
os.environ.pop(ENV_MMCV_HOME, None)
mmcv_home = osp.join(osp.dirname(__file__), 'data/model_zoo/mmcv_home')
os.environ[ENV_MMCV_HOME] = mmcv_home
url = _load_checkpoint('open-mmlab://train')
assert url == 'url:https://localhost/train.pth'
with pytest.raises(FileNotFoundError, match='train.pth can not be found.'):
_load_checkpoint('open-mmlab://train_empty')
url = _load_checkpoint('open-mmlab://test')
assert url == f'local:{osp.join(_get_mmcv_home(), "test.pth")}'
url = _load_checkpoint('open-mmlab://val')
assert url == f'local:{osp.join(_get_mmcv_home(), "val.pth")}'
# test http:// https://
url = _load_checkpoint('http://localhost/train.pth')
assert url == 'url:http://localhost/train.pth'
# test local file
with pytest.raises(FileNotFoundError, match='train.pth can not be found.'):
_load_checkpoint('train.pth')
url = _load_checkpoint(osp.join(_get_mmcv_home(), 'test.pth'))
assert url == f'local:{osp.join(_get_mmcv_home(), "test.pth")}'
|
[
"2392587229zsl@gmail.com"
] |
2392587229zsl@gmail.com
|
7db54fa4b19419ad6f5a61d90b947002b137d91a
|
42ab657221e7a7f7e3e185f7ea0f04821477e3d9
|
/240/searchMatrix.py
|
1db4b305730129e847603143bd03441397c815d6
|
[] |
no_license
|
Sevendeadlys/leetcode
|
2238f86e3b6c687c48e2d612658730826d4be983
|
e030f32b26daffbb57ca9bc13c2e8d3ea1c1c1eb
|
refs/heads/master
| 2021-01-10T15:35:52.912711
| 2016-02-25T03:14:02
| 2016-02-25T03:14:02
| 48,724,741
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,036
|
py
|
class Solution1(object):
def searchMatrix(self, matrix, target):
"""
:type matrix: List[List[int]]
:type target: int
:rtype: bool
"""
if not matrix or not matrix[0]: return False
m = len(matrix)
n = len(matrix[0])
i = 0
while i < m:
"""
Binary search every list
"""
if target in matrix[i]:
return True
i += 1
return False
class Solution(object):
def searchMatrix(self, matrix, target):
"""
:type matrix: List[List[int]]
:type target: int
:rtype: bool
"""
if not matrix or not matrix[0]: return False
m = len(matrix)
n = len(matrix[0])
r = 0
c = n - 1
while r < m and c >= 0:
if target == matrix[r][c]:
return True
elif target > matrix[r][c]:
r += 1
else :
c -= 1
return False
|
[
"yi_nan@615-PC76.careri.com"
] |
yi_nan@615-PC76.careri.com
|
089a617446ed4d8811d8d82c91893eefc52314f3
|
44a071e30cf5ab17e6519e4d3edb2a6cb207ecef
|
/피보나치 함수/main.py
|
34a67a6319b18460ed6173b7ffccdba8366933ce
|
[] |
no_license
|
isp5708/Algorithm_python_bj_2cotae
|
f56e957677b0f2a6756172c21583a0be00c6b2cd
|
56e630dc36b976177fd0c2d0d4396e1a1ba0ad33
|
refs/heads/master
| 2023-01-30T23:07:18.594616
| 2020-12-11T02:31:06
| 2020-12-11T02:31:06
| 320,441,238
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 308
|
py
|
t= int(input())
array=[]
for i in range(t):
array.append(int(input()))
n=max(array)
dp0=[0]*(n+1)
dp1=[0]*(n+1)
dp0[0],dp1[0]=1,0
dp0[1],dp1[1]=0,1
for i in range(2,n+1):
dp0[i],dp1[i]=dp0[i-2]+dp0[i-1],dp1[i-2]+dp1[i-1]
for i in range(t):
print(str(dp0[array[i]])+' '+str(dp1[array[i]]))
|
[
"dlwnsdud3737@naver.com"
] |
dlwnsdud3737@naver.com
|
fc32e950371b1885f07097d5fa5b19c2fe75426e
|
4bc24011c65cb5194eb94abfd8d394a6b0dc6a50
|
/packages/OpenWeatherMap/nodes/OpenWeatherMap___BreakTemp0/OpenWeatherMap___BreakTemp0.py
|
be0fa2f76d4d13639e7780853aa71a56c8e552b8
|
[
"MIT"
] |
permissive
|
ManojKumarTiwari/Ryven
|
6c76ebdf89599bb7c9b4ce020f195eea135d9da1
|
2b8ef0bdcf05a458a6cf8791cbc2fda6870932f8
|
refs/heads/master
| 2022-11-12T00:23:45.303378
| 2020-07-08T09:32:10
| 2020-07-08T09:32:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,699
|
py
|
from custom_src.NodeInstance import NodeInstance
from custom_src.Node import Node
from custom_src.retain import m
# API METHODS
# self.main_widget <- access to main widget
# self.update_shape() <- recomputes the whole shape and content positions
# Ports
# self.input(index) <- access to input data
# self.set_output_val(self, index, val) <- set output data port value
# self.exec_output(index) <- executes an execution output
# self.create_new_input(type_, label, append=True, widget_type='', widget_name='', widget_pos='under', pos=-1)
# self.delete_input(index or input)
# self.create_new_output(type_, label, append=True, pos=-1)
# self.delete_output(index or output)
# Logging
# mylog = self.new_log('Example Log')
# mylog.log('I\'m alive!!')
# self.log_message('hello global!', 'global')
# self.log_message('that\'s not good', 'error')
# ------------------------------------------------------------------------------
from pyowm.utils.measurables import kelvin_to_celsius, kelvin_to_fahrenheit
class BreakTemp_NodeInstance(NodeInstance):
def __init__(self, parent_node: Node, flow, configuration=None):
super(BreakTemp_NodeInstance, self).__init__(parent_node, flow, configuration)
# self.special_actions['action name'] = self.actionmethod ...
# ...
self.initialized()
# don't call self.update_event() directly, use self.update() instead
def update_event(self, input_called=-1):
temp_dict = self.input(0)
if self.input(1) != 'kelvin':
for key in list(temp_dict.keys()):
item = temp_dict[key]
if item is not None:
if self.input(1) == 'celsius':
temp_dict[key] = kelvin_to_celsius(item)
elif self.input(1) == 'fahrenheit':
temp_dict[key] = kelvin_to_fahrenheit(item)
# temp_dict = kelvin_dict_to(temp_dict, self.input(1)) doesn't work with NoneType values -.- which happen to persist
temp = temp_dict['temp']
temp_kf = temp_dict['temp_kf']
temp_max = temp_dict['temp_max']
temp_min = temp_dict['temp_min']
feels_like = temp_dict['feels_like']
self.set_output_val(0, temp)
self.set_output_val(1, temp_kf)
self.set_output_val(2, temp_min)
self.set_output_val(3, temp_max)
self.set_output_val(4, feels_like)
def get_data(self):
data = {}
# ...
return data
def set_data(self, data):
pass # ...
# optional - important for threading - stop everything here
def removing(self):
pass
|
[
"leon.thomm@gmx.de"
] |
leon.thomm@gmx.de
|
749f5c88ee3cfd6b790f1c722c970946167645ae
|
235267656f98a7583b39f0550d147aca17880c75
|
/machine_learning_basics/layers/dense.py
|
67419dd58659f63f5b3cb623d8d6d625fe764cb0
|
[] |
no_license
|
calvinfeng/machine-learning-notebook
|
5cd2695947cd6058916f39a51431b162bc6a4e32
|
7da789ef34d5e5bcf9033cfbe0ff5df607b2437a
|
refs/heads/master
| 2023-08-16T10:49:28.114020
| 2023-06-16T19:23:43
| 2023-06-16T19:23:43
| 127,679,318
| 38
| 12
| null | 2023-08-14T22:59:02
| 2018-04-01T23:22:12
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,159
|
py
|
import numpy as np
class Dense:
def __init__(self):
self.x = None
self.w = None
self.b = None
def __call__(self, x, w, b):
"""Perform forward propagation
Args:
x (np.ndarray): Input
w (np.ndarray): Kernel weights
b (np.ndarray): Biases
Returns:
np.ndarray: Output
"""
self.x = x
self.w = w
self.b = b
return x @ w + b
def gradients(self, grad_out):
"""Perform back propagation and return gradients with respect to upstream loss function.
Args:
grad_out (np.ndarray): Gradient of loss with respect to output.
Returns:
np.ndarray: Gradient of loss with respect to x
np.ndarray: Gradient of loss with respect to w
np.ndarray: Gradient of loss with respect to b
"""
if self.x is None:
raise ValueError("layer must be forward propagated first")
grad_x = grad_out @ self.w.T
grad_w = self.x.T @ grad_out
grad_b = np.sum(grad_out, axis=0)
return grad_x, grad_w, grad_b
|
[
"calvin.j.feng@gmail.com"
] |
calvin.j.feng@gmail.com
|
93d424fffa96884d8805000d5236dce8511322b6
|
0c477e8196d94216bbea8b260579c7e84aba4363
|
/gui/TopBarUI.py
|
2e88f9ab74500348bab56745b4c1c8bbed38afbb
|
[] |
no_license
|
industry4-0/1UP
|
caa00eed15f6fbccbadea62596519291738232d2
|
7903fbca7a478c9618a776bdcc29ee95be5c1f94
|
refs/heads/master
| 2020-09-22T02:10:50.516217
| 2019-12-01T13:57:32
| 2019-12-01T13:57:32
| 225,013,860
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,061
|
py
|
import datetime
from gui.core.UINode import UINode
from enums.UIState import UIState
from utils.commons import DEFAULT_FONT, FONT_AWESOME_FONT_FILE, ICONS, right_text, make_font
class TopBarUI(UINode):
def __init__(self):
super().__init__(UIState.ActivityList)
self._battery_level = 100
self.fontawesome_font = make_font(FONT_AWESOME_FONT_FILE, 48)
self.keys = list(ICONS.keys())
self.index = 0
self.iterations = 0
def _getTime(self):
return datetime.datetime.now().strftime("%d/%m/%y %H:%M %S")
def render(self, engine):
engine.text((0, 0), self._getTime(), font=DEFAULT_FONT, fill="white")
right_text(engine, 0, 128, 0, text="{}%".format(self._battery_level))
self.iterations += 1
if (self.iterations > 20):
self.iterations = 0
self.index += 1
if (self.index >= len(self.keys)):
self.index = 0
engine.text((40, 40), ICONS[self.keys[self.index]], font=self.fontawesome_font, fill="yellow")
|
[
"noreply@github.com"
] |
industry4-0.noreply@github.com
|
dde66bb3f35b3fa9cd30f1ad61a5d6953a0be77b
|
b706b62f91bcf1010d865d0eaa9adecd86dc0b67
|
/login_registration_2.py
|
a7da7cfea642344fbbe2d50b68ef85783f15c6a0
|
[] |
no_license
|
IvanPliska/Book-store-testing
|
d4ed0896fb885df4456b701d5aa8d4788915df5d
|
7d4a96b653b53673c1df574219ea6525e0026ec8
|
refs/heads/master
| 2023-08-30T00:32:34.609825
| 2021-11-16T17:28:15
| 2021-11-16T17:28:15
| 428,748,032
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 883
|
py
|
import time
from selenium import webdriver
from selenium.webdriver.support.select import Select
driver = webdriver.Chrome()
driver.maximize_window()
driver.get("http://practice.automationtesting.in/")
time.sleep(3)
My_Account = driver.find_element_by_link_text("My Account").click()
time.sleep(3)
Email_address = driver.find_element_by_id("username").send_keys("plis_in@mail.ru")
time.sleep(3)
Password_in = driver.find_element_by_id("password").send_keys("123Qaz456!@#$<>?")
time.sleep(3)
Remember_me = driver.find_element_by_id("rememberme").click()
time.sleep(3)
Register = driver.find_element_by_xpath("//input[@value='Login']").click()
time.sleep(2)
Logout = driver.find_element_by_link_text("Logout")
if Logout is not None:
print("Присутствует элемент Logout!")
else:
print("Отсутствует элемент Logout!")
time.sleep(2)
driver.quit()
|
[
"plis_in@mail.ru"
] |
plis_in@mail.ru
|
153ab10f878388c0574877515a1f65a1e68dea68
|
abb51df4a0bac65428be0bbe79aabcf7cb3676a6
|
/Invaders.py
|
6659e506ca66a096eab248f78ffc97e461585d2e
|
[] |
no_license
|
Sinaeskandari/Invaders
|
3aee4fc0b9046e755c385add186c305b2e8f6dd0
|
74de652d952a8feccc0ae6d02ea87435241c97de
|
refs/heads/master
| 2020-08-06T19:51:19.622445
| 2019-10-06T08:20:39
| 2019-10-06T08:20:39
| 213,131,794
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,547
|
py
|
# Sina Eskandari
# Student number = 97521054
# for more information read 'readme.txt'
import pygame
import sys
import random
from pygame.locals import *
# This part is for initializing the pygame
pygame.init()
# Variables for window
windowWidth = 680
windowHeight = 680
window = pygame.display.set_mode((windowWidth, windowHeight))
pygame.display.set_caption('Game')
# Used to manage how fast the screen updates
clock = pygame.time.Clock()
FPS = 60
# Choosing a font for displaying texts
font_obj = pygame.font.Font('freesansbold.ttf', 20)
class Ship(pygame.sprite.Sprite):
'''With This class we can initialize our player(It's a space ship) '''
global windowWidth, windowHeight
def __init__(self):
'''Create the player'''
# Call the parent class (Sprite) constructor
super().__init__()
# Create the image of the ship
self.image = pygame.image.load('ship.png') # Download from here : http://s9.picofile.com/file/8350448434/ship.png
# Fetch the rectangle object that has the dimensions of the ship
self.rect = self.image.get_rect()
self.shipSize = self.image.get_rect().size
# Some black part bellow of the ship
self.distanceFromBottom = 45
# Define location of ship
self.X = (windowWidth - self.shipSize[0]) / 2
self.Y = windowHeight - self.distanceFromBottom
# Define velocity of ship
self.velShip = 5
def update(self):
'''Update the location of ship'''
# Moving right
if keys[K_RIGHT]:
self.X += self.velShip
if self.X + self.shipSize[0] > windowWidth:
self.X = windowWidth - self.shipSize[0]
# Moving left
if keys[K_LEFT]:
self.X -= self.velShip
if self.X < 0:
self.X = 0
class Invader(pygame.sprite.Sprite):
'''This class is for enemy soldiers('Invaders')'''
def __init__(self):
'''Create Invaders'''
# Call the parent class (Sprite) constructor
super().__init__()
# Create the image of the invader
self.image = pygame.image.load('invader1.png') # Download from : http://s8.picofile.com/file/8350448284/invader1.png
# Fetch the rectangle object that has the dimensions of the invader
self.rect = self.image.get_rect()
# Bellow part is not necessary.It's just for reducing the typing
self.size = self.image.get_rect().size
class Bullet(pygame.sprite.Sprite):
'''Class for the bullets that our spaceship shoots'''
def __init__(self):
'''Create the bullets'''
# Call the parent class (Sprite) constructor
super().__init__()
# Create the image of the bullet and fill it with red
self.image = pygame.Surface([2, 10])
self.image.fill((255, 0, 0))
# Fetch the rectangle object that has the dimensions of the bullet
self.rect = self.image.get_rect()
def update(self):
'''Updating the location of bullets'''
# With this method our bullets will rise up
self.rect.y -= 5
class Obstacle(pygame.sprite.Sprite):
'''This class is for making some obstacles for protecting our spaceship'''
def __init__(self):
'''Create the obstacles'''
# Call the parent class (Sprite) constructor
super().__init__()
# Create the image of the obstacle
self.image = pygame.image.load('obstacle1.png') # Download from: http://s8.picofile.com/file/8350438384/obstacle1.png
# Fetch the rectangle object that has the dimensions of the obstacle
self.rect = self.image.get_rect()
class InvBullet(pygame.sprite.Sprite):
'''This class is for making invaders bullets so they can attack our spaceship'''
def __init__(self):
'''Create the bullets'''
# Call the parent class (Sprite) constructor
super().__init__()
# Create the image of the bullet and fill it with blue
self.image = pygame.Surface([2, 10])
self.image.fill((0, 0, 255))
# Fetch the rectangle object that has the dimensions of the obstacle
self.rect = self.image.get_rect()
def update(self):
'''Updating th location of bullets'''
# With this method bullets can descend and hurt our spaceship
self.rect.y += 5
class BossFight(pygame.sprite.Sprite):
'''This class is for making 'THE BOSSFIGHT'
the boss fight shoots our space ship and if the spaceship kills its invaders he will die and we win the game '''
def __init__(self):
'''Creating the bossfight'''
super().__init__()
# Create the image of the bossfight
self.image = pygame.image.load('bossfight.png') # Download from here : http://s9.picofile.com/file/8350449134/bossfight.png
# Fetch the rectangle object that has the dimensions of the obstacle
self.rect = self.image.get_rect()
# Define a object of BossFight class
boss = BossFight()
# Define a object of Ship class
ship = Ship()
# Make some sprite groups for drawing and colliding the
shipGroup = pygame.sprite.Group()
inv_list = pygame.sprite.Group()
all_sprites_list = pygame.sprite.Group()
bullet_list = pygame.sprite.Group()
obstacle_list = pygame.sprite.Group()
inv_bullet_list = pygame.sprite.Group()
# Add ship and boss to sprite groups
all_sprites_list.add(ship)
all_sprites_list.add(boss)
shipGroup.add(ship)
# This is for making invaders and adding them to sprite groups
for i in range(17):
for j in range(10):
invader = Invader()
invader.rect.x = 50 + (35 * i)
invader.rect.y = 90 + (27 * j)
inv_list.add(invader)
all_sprites_list.add(invader)
# This is for making obstacles and adding them to sprite groups
for i in range(6):
obstacle = Obstacle()
obstacle.rect.y = 515
obstacle.rect.x = 40 + (110 * i)
obstacle_list.add(obstacle)
all_sprites_list.add(obstacle)
def collide():
'''This function checks if enemy's bullets hits the obstacles don't pass from them'''
for iBullet in inv_bullet_list:
for obs in obstacle_list:
if pygame.sprite.collide_rect(iBullet, obs):
all_sprites_list.remove(iBullet)
inv_bullet_list.remove(iBullet)
def game_over():
'''This function quits the game'''
pygame.quit()
sys.exit()
# User Score and health
score = 0
health = 100
while True:
# Fill our surface with black
window.fill((0, 0, 0))
# A variable for checking when a key got pressed
# Actually this for pressing and holding a key because with handling the events if we hold a key the function just calls one time
keys = pygame.key.get_pressed()
# Handling the events in game
for event in pygame.event.get():
if event.type == QUIT:
game_over()
if event.type == KEYDOWN:
if event.key == K_ESCAPE:
game_over()
# When we press the space;Makes a bullet and shoots it
if event.type == KEYDOWN:
if event.key == K_SPACE:
bullet = Bullet()
bullet.rect.x = ship.rect.x + (ship.shipSize[0]) / 2
bullet.rect.y = ship.rect.y
all_sprites_list.add(bullet)
bullet_list.add(bullet)
# Win or lose the game
if (health <= 0) or (score >= 170):
game_over()
# Check if the bullet hits any invader, the invader will be killed
for bullet in bullet_list:
hit_list = pygame.sprite.spritecollide(bullet, inv_list, True)
# This is for removing the bullet when hits the invader
for i in hit_list:
bullet_list.remove(bullet)
all_sprites_list.remove(bullet)
# If the bullets leave the window gets destroyed
if bullet.rect.y < -10:
bullet_list.remove(bullet)
# We get a score if we kill a invader
for i in hit_list:
score += 1
# Locating the bossfight with rectangular method
boss.rect.x = 50
boss.rect.y = 0
# Making the enemy's bullets
inv_bullet = InvBullet()
inv_bullet.rect.x = random.randrange(boss.rect.x, boss.rect.x + boss.rect.size[0])
inv_bullet.rect.y = boss.rect.size[1]
inv_bullet_list.add(inv_bullet)
all_sprites_list.add(inv_bullet)
for invbullet in inv_bullet_list:
# If the bullets leave the window gets destroyed
if invbullet.rect.y > 710:
inv_bullet_list.remove(invbullet)
# If the bullet hits us, our health get reduced and also the bullet get destroyed
if pygame.sprite.collide_rect(invbullet, ship):
inv_bullet_list.remove(invbullet)
all_sprites_list.remove(invbullet)
health -= 2
# Printing the score and health
score_text = font_obj.render('Score=' + str(score), True, (255, 255, 255))
score_text_rect = score_text.get_rect()
score_text_rect.x = 0
score_text_rect.y = 0
health_text = font_obj.render('Health=' + str(health), True, (255, 255, 255))
health_text_rect = health_text.get_rect()
health_text_rect.top = 0
health_text_rect.right = windowWidth
window.blit(health_text, health_text_rect)
window.blit(score_text, score_text_rect)
# Call 'update' method for all sprites
all_sprites_list.update()
# Locating our spaceship
ship.rect.x = ship.X
ship.rect.y = ship.Y
# drawing all of sprites
all_sprites_list.draw(window)
collide()
clock.tick(FPS)
pygame.display.update()
|
[
"sinaeskandari007@gmail.com"
] |
sinaeskandari007@gmail.com
|
672c0a23335d9aa8fddf37073f82d135259b0af1
|
e10c08b3480eec73ceab0f4316e567a7cd0e95da
|
/glancesync/glance/sync/clients.py
|
cadc8f4acf63c25e2f1cd27c2d81506c7f9bc455
|
[
"Apache-2.0"
] |
permissive
|
luqitao/tricircle
|
ca612a4d4fc0287af3857091f2bfbf0c90dbe7bd
|
d5acf44b5f097e1fe1e94e220138e11c7fdd25fe
|
refs/heads/master
| 2021-01-16T23:06:35.882882
| 2015-04-16T09:30:57
| 2015-04-16T09:30:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,940
|
py
|
# Copyright (c) 2014 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Jia Dong, HuaWei
from oslo.config import cfg
from keystoneclient.v2_0 import client as ksclient
import glance.openstack.common.log as logging
from glanceclient.v2 import client as gclient2
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class Clients(object):
def __init__(self, auth_token=None, tenant_id=None):
self._keystone = None
self._glance = None
self._cxt_token = auth_token
self._tenant_id = tenant_id
self._ks_conf = cfg.CONF.keystone_authtoken
@property
def auth_token(self, token=None):
return token or self.keystone().auth_token
@property
def ks_url(self):
protocol = self._ks_conf.auth_protocol or 'http'
auth_host = self._ks_conf.auth_host or '127.0.0.1'
auth_port = self._ks_conf.auth_port or '35357'
return protocol + '://' + auth_host + ':' + str(auth_port) + '/v2.0/'
def url_for(self, **kwargs):
return self.keystone().service_catalog.url_for(**kwargs)
def get_urls(self, **kwargs):
return self.keystone().service_catalog.get_urls(**kwargs)
def keystone(self):
if self._keystone:
return self._keystone
if self._cxt_token and self._tenant_id:
creds = {'token': self._cxt_token,
'auth_url': self.ks_url,
'project_id': self._tenant_id
}
else:
creds = {'username': self._ks_conf.admin_user,
'password': self._ks_conf.admin_password,
'auth_url': self.ks_url,
'project_name': self._ks_conf.admin_tenant_name}
try:
self._keystone = ksclient.Client(**creds)
except Exception as e:
LOG.error(_('create keystone client error: reason: %s') % (e))
return None
return self._keystone
def glance(self, auth_token=None, url=None):
gclient = gclient2
if gclient is None:
return None
if self._glance:
return self._glance
args = {
'token': auth_token or self.auth_token,
'endpoint': url or self.url_for(service_type='image')
}
self._glance = gclient.Client(**args)
return self._glance
|
[
"joehuang@huawei.com"
] |
joehuang@huawei.com
|
09eace698786d266b683b6e9ab3515c4ea35c1aa
|
f809a4c10e6f49938a47471714c5c048265f6c01
|
/src/python/py27hash/key.py
|
453720684b1f2a86cde296f3657b2f6953976a0f
|
[
"MIT"
] |
permissive
|
Amli/py27hash
|
57e0c56bcdb43d6d9490e0d5e93afae9ebc7660d
|
02ebc8a97f4eb6c9838288ec875873971097a456
|
refs/heads/master
| 2023-01-04T03:13:08.855753
| 2020-11-03T00:46:42
| 2020-11-03T00:46:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,353
|
py
|
"""
Compatibility methods to support Python 2.7 style key iteration in Python 3.X+
This is designed for compatibility not performance.
"""
import ctypes
# pylint: disable = E0401
from .hash import Hash
class Keys(object):
"""
Compatibility class to support Python 2.7 style iteration in Python 3.X+
Logic ported from the 2.7 Python branch: cpython/Objects/dictobject.c
Logic ported from the 2.7 Python branch: cpython/Objects/setobject.c
"""
# Min dict size
MINSIZE = 8
# Hash collisions
PERTURB_SHIFT = 5
def __init__(self):
"""
Initializes a keys object.
"""
self.keylist = []
self.keysort = None
# Python 2 dict default size
self.mask = Keys.MINSIZE - 1
def __setstate__(self, state):
"""
Overrides default pickling object to force re-adding all keys and match Python 2.7 deserialization logic.
Args:
state: input state
"""
self.__dict__ = state
keys = self.keys()
# Clear keys and re-add to match deserialization logic
self.__init__()
for k in keys:
self.add(k)
def __iter__(self):
"""
Default iterator.
Returns:
iterator
"""
return iter(self.keys())
def keys(self):
"""
Returns keys ordered using Python 2.7's iteration algorithm.
Method: static PyDictEntry *lookdict(PyDictObject *mp, PyObject *key, register long hash)
Returns:
list of keys
"""
if not self.keysort:
keys = []
hids = set()
for k in self.keylist:
# C API uses unsigned values
h = ctypes.c_size_t(Hash.hash(k)).value
i = h & self.mask
hid = i
perturb = h
while hid in hids:
i = (i << 2) + i + perturb + 1
hid = i & self.mask
perturb >>= Keys.PERTURB_SHIFT
keys.append((hid, k))
hids.add(hid)
# Cache result - performance - clear if more keys added
self.keysort = [v for (k, v) in sorted(keys, key=lambda x: x[0])]
return self.keysort
def add(self, key):
"""
Called each time a new item is inserted. Tracks via insertion order and will maintain the same order
as a dict in Python 2.7.
Method: static int dict_set_item_by_hash_or_entry(register PyObject *op, PyObject *key, long hash,
PyDictEntry *ep, PyObject *value)
Args:
key: key to add
"""
# Add key to list. If this is a replace/update then size won't change.
if key and key not in self.keylist:
# Append key to list
self.keylist.append(key)
# Clear cached keys
self.keysort = None
# Resize dict if 2/3 capacity
if len(self.keylist) * 3 >= ((self.mask + 1) * 2):
# Reset key list to simulate the dict resize + copy operation
self.keylist = self.keys()
self.keysort = None
self.setMask()
def remove(self, key):
"""
Remove a key from the backing list.
Args:
key: key to remove
"""
if key in self.keylist:
# Remove key from list
self.keylist.remove(key)
# Clear cached keys
self.keysort = None
def merge(self, d):
"""
Merges keys from an existing iterable into this key list.
Method: int PyDict_Merge(PyObject *a, PyObject *b, int override)
Args:
d: input dict
"""
# PyDict_Merge initial merge size is double the size of the current + incoming dict
self.setMask((len(self.keylist) + len(d)) * 2)
# Copy actual keys
for k in d:
self.add(k)
def copy(self):
"""
Makes a copy of self.
Method: PyObject *PyDict_Copy(PyObject *o)
Returns:
copy of self
"""
# Copy creates a new object and merges keys in
new = Keys()
new.merge(self.keys())
return new
def pop(self):
"""
Pops the top element from the sorted keys if it exists. Returns None otherwise.
Method: static PyObject *dict_popitem(PyDictObject *mp)
Return:
top element or None if Keys is empty
"""
if self.keylist:
# Pop the top element
value = self.keys()[0]
self.remove(value)
return value
return None
def setMask(self, request=None):
"""
Key based on the total size of this dict. Matches ma_mask in Python 2.7's dict.
Method: static int dictresize(PyDictObject *mp, Py_ssize_t minused)
"""
if not request:
length = len(self.keylist)
# Python 2 dict increases by a factor of 4 for small dicts, 2 for larger ones
request = length * (2 if length > 50000 else 4)
newsize = Keys.MINSIZE
while newsize <= request:
newsize <<= 1
self.mask = newsize - 1
|
[
"561939+davidmezzetti@users.noreply.github.com"
] |
561939+davidmezzetti@users.noreply.github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.