blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 213 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 246 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
32598ef7ce2a675fd19eeb58a2b8b4829865353f | 1b47fd7877bc49870eba64d424bcd2a607072b8f | /na-components/nengo-fpga/nengo_fpga/version.py | e151e2b52ef7840a88589515933aab9716b10eb4 | [
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.0-or-later",
"BSD-3-Clause",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-secret-labs-2011",
"Python-2.0",
"MIT"
] | permissive | neurom-iot/naide | a7cc7d821f6dbfd0467aa801e4526e0a78099723 | 3e92134c970cf87c800a6ac91482d6b1ab6e681d | refs/heads/master | 2023-03-11T02:47:10.033264 | 2022-02-26T10:32:43 | 2022-02-26T10:32:43 | 175,844,919 | 12 | 5 | MIT | 2023-03-07T11:56:13 | 2019-03-15T15:22:22 | JavaScript | UTF-8 | Python | false | false | 507 | py | """Nengo version information.
We use semantic versioning (see http://semver.org/).
and conform to PEP440 (see https://www.python.org/dev/peps/pep-0440/).
'.devN' will be added to the version unless the code base represents
a release version. Release versions are git tagged with the version.
"""
name = "nengo-fpga"
version_info = (0, 2, 3) # (major, minor, patch)
dev = 0
version = "{v}{dev}".format(
v=".".join(str(v) for v in version_info),
dev=(".dev%d" % dev) if dev is not None else "",
)
| [
"kskim.hci@gmail.com"
] | kskim.hci@gmail.com |
bf57d3cff2ccb4659f1cc8e307879d4511e1cd48 | 1e0ad0c9223a79b67335e3b64f568fd888a5d374 | /LiveAnalyzer_FINAL2.py | 343cef604925905216be59037b20a68dfa238605 | [] | no_license | aeshon/Code_Samples | 47bdf01e69b235699885d4d196daa4eb0da8213e | 09a7966b7ad6736f0259eb1360c467a0fe72960f | refs/heads/master | 2021-03-31T23:35:54.655128 | 2020-03-18T05:10:33 | 2020-03-18T05:10:33 | 248,138,755 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,907 | py | from imutils.video import VideoStream
import datetime
import argparse
import imutils
import time
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from tensorflow.lite.python.interpreter import Interpreter as inter
import os
import cv2
import numpy as np
from PIL import Image
def set_input_tensor(interpreter, image):
tensor_index = interpreter.get_input_details()[0]['index']
input_tensor = interpreter.tensor(tensor_index)()[0]
input_tensor[:, :] = image
def classify_image(interpreter, image, top_k=1):
"""Returns a sorted array of classification results."""
set_input_tensor(interpreter, image)
interpreter.invoke()
output_details = interpreter.get_output_details()[0]
output = np.squeeze(interpreter.get_tensor(output_details['index']))
if output_details['dtype'] == np.uint8:
scale, zero_point = output_details['quantization']
output = scale * (output - zero_point)
ordered = np.argpartition(-output, top_k)
return [(i, output[i]) for i in ordered[:top_k]]
def authorize():
gauth = GoogleAuth()
# Try to load saved client credentials
gauth.LoadCredentialsFile("mycreds.txt")
if gauth.credentials is None:
# Authenticate if they're not there
# This is what solved the issues:
gauth.GetFlow()
gauth.flow.params.update({'access_type': 'offline'})
gauth.flow.params.update({'approval_prompt': 'force'})
gauth.LocalWebserverAuth()
elif gauth.access_token_expired:
# Refresh them if expired
gauth.Refresh()
else:
# Initialize the saved creds
gauth.Authorize()
# Save the current credentials to a file
gauth.SaveCredentialsFile("mycreds.txt")
drive = GoogleDrive(gauth)
return drive
def picture():
# ec.capture(0, False, "C:/Users/aeshon/Desktop/pic/img.jpg")
video_capture = cv2.VideoCapture(0)
# Check success
if not video_capture.isOpened():
raise Exception("Could not open video device")
# Read picture. ret === True on success
ret, frame = video_capture.read()
new_frame = cv2.resize(frame, (320, 240))
cv2.imwrite("C:/Users/aeshon/Desktop/pic/img.jpg", new_frame)
# Close device
video_capture.release()
def object_detector():
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", help="path to the video file")
ap.add_argument("-a", "--min-area", type=int, default=500, help="minimum area size")
args = vars(ap.parse_args())
# if the video argument is None, then we are reading from webcam
if args.get("video", None) is None:
vs = VideoStream(src=0).start()
time.sleep(2.0)
# otherwise, we are reading from a video file
else:
vs = cv2.VideoCapture(args["video"])
# initialize the first frame in the video stream
firstFrame = None
while True:
# grab the current frame and initialize the occupied/unoccupied
# text
frame = vs.read()
frame = frame if args.get("video", None) is None else frame[1]
text = "Unoccupied"
# if the frame could not be grabbed, then we have reached the end
# of the video
if frame is None:
break
# resize the frame, convert it to grayscale, and blur it
frame = imutils.resize(frame, width=500)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (21, 21), 0)
# if the first frame is None, initialize it
if firstFrame is None:
firstFrame = gray
continue
# compute the absolute difference between the current frame and
# first frame
frameDelta = cv2.absdiff(firstFrame, gray)
thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]
# dilate the thresholded image to fill in holes, then find contours
# on thresholded image
thresh = cv2.dilate(thresh, None, iterations=2)
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
# loop over the contours
for c in cnts:
# if the contour is too small, ignore it
if cv2.contourArea(c) < args["min_area"]:
continue
# compute the bounding box for the contour, draw it on the frame,
# and update the text
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
text = "Occupied"
if text == "Occupied":
cv2.destroyAllWindows()
return True
cv2.putText(frame, "Room Status: {}".format(text), (10, 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.putText(frame, datetime.datetime.now().strftime("%A %d %B %Y %I:%M:%S%p"),
(10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)
# show the frame and record if the user presses a key
cv2.imshow("Security Feed", frame)
key = cv2.waitKey(1) & 0xFF
# if the `q` key is pressed, break from the lop
if key == ord("q"):
break
# cleanup the camera and close any open windows
vs.stop() if args.get("video", None) is None else vs.release()
cv2.destroyAllWindows()
def predict(drive):
while True:
if len(os.listdir('C:/Users/aeshon/Desktop/StarlingsFolder')) == 0:
continue
model = "C:/Users/aeshon/Downloads/exportedModel.tflite"
interpreter = inter(model)
interpreter.allocate_tensors()
_, height, width, _ = interpreter.get_input_details()[0]['shape']
for file in os.listdir("C:/Users/aeshon/Desktop/StarlingsFolder"):
img = Image.open("C:/Users/aeshon/Desktop/StarlingsFolder/" + file).convert('RGB').resize((224, 224), Image.ANTIALIAS)
results = classify_image(interpreter, img)
label_id, prob = results[0]
if label_id == 0:
print("Animal with " + str(round((prob * 100), 1)) + "% confidence")
else:
model = "C:/Users/aeshon/Downloads/exportedModelBVS.tflite"
interpreter = inter(model)
interpreter.allocate_tensors()
_, height, width, _ = interpreter.get_input_details()[0]['shape']
results = classify_image(interpreter, img)
label_id, prob = results[0]
if label_id == 0:
print("Bird with " + str(round((prob * 100), 1)) + "% confidence")
else:
string = "Starling sighted with " + str(round((prob*100), 1)) + "% confidence"
write(string)
os.remove("C:/Users/aeshon/Desktop/StarlingsFolder/" + file)
upload(drive)
def write(class_result):
file1 = open("C:/Users/aeshon/Desktop/AlertLog.txt", "a")
file1.write(str(datetime.datetime.now()) + " " + class_result)
file1.write("\n")
def upload(drive):
with open("C:/Users/aeshon/Desktop/AlertLog.txt", "r") as file:
upload_file_to_specific_folder(drive=drive, file=file)
def upload_file_to_specific_folder(drive, file, folder_id="1Vm0tcB0E3z0rcmhcj8JI92T5vIfpv6hs"):
file_metadata = {'title': os.path.basename(file.name), "parents": [{"id": folder_id, "kind": "drive#childList"}]}
folder = drive.CreateFile(file_metadata)
folder.SetContentFile(file.name)
folder.Upload()
drive1 = authorize()
if object_detector():
picture()
predict(drive1)
| [
"noreply@github.com"
] | aeshon.noreply@github.com |
e21f6e163ee9906420a48fd0c1c3bab68a8b1543 | d640541bf0ca2f9179ef451837bcb2c571db1484 | /03b.py | aa8ce4ca6a5534b96660c053921d149fc0e58998 | [
"Unlicense"
] | permissive | jpparent/aoc2020 | e30620ca4d04d677c0a00289cb56b736c87d0920 | abac15925e99fedcca27f015dd73c09a55c4da86 | refs/heads/main | 2023-02-02T13:32:04.836685 | 2020-12-18T23:37:48 | 2020-12-18T23:37:48 | 318,365,146 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 518 | py | import re
import math
f = open('03.txt')
rows = f.readlines()
f.close()
treeCounts = [0,0,0,0,0]
slopes = [[1,1],[3,1],[5,1],[7,1],[1,2]]
for s in range(0,len(slopes)):
x = 0
y = 0
# use a while loop to be able to change the index dynamically
while y < len(rows):
if rows[y][x] == '#':
treeCounts[s] += 1
x += slopes[s][0]
# loop around the input's horizontally
if x >= len(rows[y])-1:
x -= len(rows[y])-1
# go down the slope
y += slopes[s][1]
print(str(math.prod(treeCounts))) | [
"jeanp.parent@gmail.com"
] | jeanp.parent@gmail.com |
fae63e0ae8a040c911813758ee97fb64496967ad | 2b732a2c645f5176e7a4f262f15e8e33576f89b6 | /code-injection/preprocess-vulnerable2.py | 95c63d7c5d4cb4b640c54a5aca5eed884ee52c27 | [] | no_license | yanruibo/code-injection | a76ba3b89dc62d34cc193e6751c60bf2fcbd211a | 7baf028150e5c5b692d46ba8d87b584bd28f0c36 | refs/heads/master | 2021-01-15T18:00:56.365688 | 2017-08-09T07:21:19 | 2017-08-09T07:21:19 | 99,771,250 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,117 | py | #!/usr/bin/python
# encoding: utf-8
import sys
import os
import time
def batchDecode():
permissions = ["android.permission.INTERNET", "android.permission.ACCESS_NETWORK_STATE",
"android.permission.CAMERA", "android.permission.FLASHLIGHT",
"android.permission.ACCESS_COARSE_LOCATION", "android.permission.ACCESS_FINE_LOCATION",
"android.permission.WRITE_EXTERNAL_STORAGE", "android.permission.RECORD_AUDIO",
"android.permission.RECORD_VIDEO", "android.permission.MODIFY_AUDIO_SETTINGS",
"android.permission.READ_PHONE_STATE", "android.permission.READ_CONTACTS",
"android.permission.WRITE_CONTACTS", "android.permission.GET_ACCOUNTS"
]
# functions = [
# "cordova.plugins.barcodeScanner.", "sms.send(",
# "nfc.", "cordova.file.",
# "connectivity.", "bluetoothSerial.",
# "navigator.contacts.", "batterystatus batterycritical batterylow",
#
# "document.write(", "document.writeln(",
# "innerHTML(", "outerHTML(",
# "html(", "append(",
# "prepend(", "before(",
# "after(", "replaceAll(",
# "replaceWith("
#
# ]
#infogain
functions = [
"cordova.plugins.barcodeScanner.", "sms.send(",
"nfc.", "cordova.file.",
"connectivity.", "bluetoothSerial.",
"navigator.contacts.", "batterystatus batterycritical batterylow",
"document.write(", "document.writeln(",
"innerHTML(", "outerHTML(",
"html(", "append(",
"prepend(", "before(",
"after(", "replaceAll(",
"replaceWith(",
"indexOf(",".name","toLowerCase(","split","replace(","setTimeout","parseInt(","document.createElement(","document.getElementById(",".value",
"append(","resize(","dblclick(","prependTo(","each(","focus(","toggleClass(","after(","mouseout(","one(",
"media.pause","CameraPopoverHandle","navigator.accelerometer.getCurrentAcceleration","navigator.globalization.dateToString","console.dirxml","capture.captureAudio","console.error","cordova.file.","console.dir","media.play",
]
fu = open("/home/yanruibo/code-injection/process-info/vulnerable/vulnerable-peprocessed-info-unscess.txt", "w")
# fz = open("/home/yanruibo/code-injection/process-info/vulnerable/vulnerable-peprocessed-uzip-info.txt","w")
dirname = "/home/yanruibo/code-injection/vulnerable-apks"
apknames = os.listdir(dirname)
function_dict = {}
# function_dict[key] = classCount.get(key, 0) + 1
ff = open("/home/yanruibo/code-injection/process-info/vulnerable/vulnerable-infogain.dat", "a")
counter = 0
for apkname in apknames:
print counter
counter += 1
# packagename = apkname[:-4]
# 解析AndroidManifest.xml 获取权限信息
tmp = os.popen('aapt dump permissions /home/yanruibo/code-injection/vulnerable-apks/' + apkname).read()
# fz.write(tmp)
# print tmp
for permision in permissions:
if(tmp.find(permision) != -1):
ff.write("1,")
else:
ff.write("0,")
# unzip -o -d /home/sunny myfile.zip
tmp = os.popen("unzip -o -d " + "/home/yanruibo/code-injection/vulnerable-decoded-apks/" + apkname[:-4] + " /home/yanruibo/code-injection/vulnerable-apks/" + apkname).read()
# fz.write(tmp)
rootDir = "/home/yanruibo/code-injection/vulnerable-decoded-apks/" + apkname[:-4]
list_dirs = os.walk(rootDir)
for root, dirs, files in list_dirs:
for f in files:
# print f
# print root
abspath_file = os.path.join(root, f)
# print abspath_file
extension_name = os.path.splitext(abspath_file)[1]
# print extension_name
if(extension_name.find(".htm") != -1):
fr = open(abspath_file, 'r')
content = fr.read()
fr.close()
for item in functions:
keywords = item.split()
for keyword in keywords:
if(content.find(keyword) != -1):
# 找到了
if(function_dict.has_key(keywords[0])):
if(function_dict[keywords[0]] != 1):
function_dict[keywords[0]] = 1
else:
function_dict[keywords[0]] = 1
else:
# 没找到
if(function_dict.has_key(keywords[0])):
pass
else:
function_dict[keywords[0]] = 0
if(extension_name == ".js"):
fread = open(abspath_file, 'r')
content = fread.read()
fread.close()
if((content.find("Copyright") != -1)
or (content.find("copyright") != -1)
or (content.find("jqueryui.com") != -1)
or (content.find("jquery.com") != -1) # 在线引用,几乎不会有手机在线引用
or (content.find("Licensed") != -1)
or (content.find("licensed") != -1)
or (content.find("jquery.org") != -1)
or (content.find("www.apache.org") != -1)
or (content.find("License") != -1)
or (content.find("license") != -1)):
pass
else:
for function in functions:
keywords = function.split()
for keyword in keywords:
if(content.find(keyword) != -1):
if(function_dict.has_key(keywords[0])):
if(function_dict[keywords[0]] != 1):
function_dict[keywords[0]] = 1
else:
function_dict[keywords[0]] = 1
else:
if(function_dict.has_key(keywords[0])):
pass
else:
function_dict[keywords[0]] = 0
try:
for function in functions:
keywords = function.split()
ff.write(str(function_dict[keywords[0]]) + ",")
except:
fu.write(apkname[:-4] + "\n")
fu.flush()
finally:
ff.write("1\n")
function_dict = {}
ff.close()
fu.close()
# fz.close()
if __name__ == "__main__":
startTimeStamp = time.time()
batchDecode()
endTimeStamp = time.time()
total_time = endTimeStamp - startTimeStamp
ft = open("/home/yanruibo/code-injection/process-info/vulnerable/vulnerable-peprocessed-info-total-time.txt", "w")
ft.write("Total Time : " + str(total_time) + "\n")
ft.close()
| [
"1254877260@qq.com"
] | 1254877260@qq.com |
ff8d748191c31b8b253034f9d62c1fd84965c6b0 | c36679186f669c6e3bd1c106c96d4a17be1f5ab1 | /Telusko/38.py | ab8c735ad7203fd88037ecaede6afb9d31e6eb10 | [] | no_license | touhiduzzaman-tuhin/python-code-university-life | 60a3d671b200a6f5222c6d176c13c5f20f013509 | 6d2e3d90d430faa5c83fe79e7fb1ebe516994762 | refs/heads/master | 2023-03-22T15:18:10.636203 | 2021-03-06T18:52:04 | 2021-03-06T18:52:04 | 332,467,190 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,400 | py | def count(lst):
even = 0
odd = 0
for i in lst:
if i % 2 == 0 :
even += 1
else:
odd += 1
return even, odd
lst = [1, 2, 3, 4, 5, 6, 7, 8, 9]
even, odd = count(lst)
print("Even : ", even)
print("Odd : ", odd)
print("-----")
def count(lst):
even = 0
odd = 0
for i in lst:
if i % 2 == 0 :
even += 1
else:
odd += 1
return even, odd
lst = [1, 2, 3, 4, 5, 6, 7, 8, 9]
even, odd = count(lst)
print("Even : {}, Odd : {}".format(even, odd))
print("-----")
def count(lst):
even = 0
odd = 0
for i in lst:
if i % 2 == 0 :
even += 1
else:
odd += 1
return even, odd
n = int(input("Enter Your List Length : "))
lst = []
for i in range(n):
n = int(input())
lst.append(n)
print(lst)
even, odd = count(lst)
print("Even : ", even)
print("Odd : ", odd)
print("-----")
def Student(name):
u = 0
l = 0
for i in name:
ln = len(i)
if ln > 5:
u += 1
else:
l += 1
return u, l
name = []
x = int(input("How Many Students : "))
print("Enter Students Name : ")
for i in range(x):
n = str(input())
name.append(n)
upper5, lower5 = Student(name)
print("Upper Students : ", upper5)
print("Lower Students : ", lower5) | [
"touhiduzzamantuhin95@gmail.com"
] | touhiduzzamantuhin95@gmail.com |
0ccc62939f1fb13e5b443b57aa48bebf5f5fb00b | fec7d40f11d13694ae1d6f72aa1f9c540e976682 | /alerta/common/alert.py | 27694428800a7a47e3ec132d450335cb6b81d362 | [
"Apache-2.0"
] | permissive | xioster/alerta | 9f697cf7bf4c752c9097234a1b27623f22ea7ccd | 3d7fd328221603c96e67ef20d61bb5320ea18f1a | refs/heads/master | 2021-01-21T06:02:14.634649 | 2014-03-20T15:38:23 | 2014-03-20T15:38:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,552 | py | import os
import sys
import datetime
import json
from uuid import uuid4
import re
import fnmatch
import yaml
from alerta.common import log as logging
from alerta.common import config
from alerta.common import status_code, severity_code
from alerta.common.utils import DateEncoder, isfloat
LOG = logging.getLogger(__name__)
CONF = config.CONF
ATTRIBUTES = [
'resource',
'event',
'correlatedEvents',
'group',
'value',
'status',
'severity',
'previousSeverity',
'environment',
'service',
'text',
'type',
'tags',
'origin',
'repeat',
'duplicateCount',
'thresholdInfo',
'summary',
'timeout',
'id',
'lastReceiveId',
'createTime',
'expireTime',
'receiveTime',
'lastReceiveTime',
'trendIndication',
'rawData',
'moreInfo',
'graphUrls',
'history',
]
prog = os.path.basename(sys.argv[0])
class Alert(object):
alert_opts = {
'yaml_config': '/etc/alerta/%s.yaml' % prog,
'parser_dir': '/etc/alerta/parsers',
}
def __init__(self, resource, event, correlate=None, group=None, value=None, status=status_code.UNKNOWN,
severity=severity_code.NORMAL, previous_severity=severity_code.UNKNOWN, environment=None, service=None,
text=None, event_type=None, tags=None, origin=None, repeat=False, duplicate_count=0,
threshold_info='n/a', summary=None, timeout=86400, alertid=None, last_receive_id=None,
create_time=None, expire_time=None, receive_time=None, last_receive_time=None, trend_indication=None,
raw_data=None, more_info=None, graph_urls=None, history=None):
config.register_opts(Alert.alert_opts)
prog = os.path.basename(sys.argv[0])
if not resource:
raise ValueError('Missing mandatory value for resource')
if not event:
raise ValueError('Missing mandatory value for event')
self.resource = resource
self.event = event
self.correlate = correlate or list()
self.group = group or 'Misc'
if isfloat(value):
self.value = '%.2f' % float(value)
else:
self.value = value or 'n/a'
self.status = status
self.severity = severity
self.previous_severity = previous_severity
self.environment = environment or ['PROD']
self.service = service or ['Undefined']
self.text = text or ''
self.event_type = event_type or 'exceptionAlert'
self.tags = tags or dict()
self.origin = origin or '%s/%s' % (prog, os.uname()[1])
self.repeat = repeat
self.duplicate_count = duplicate_count
self.threshold_info = threshold_info
self.summary = summary or '%s - %s %s is %s on %s %s' % (
','.join(self.environment), self.severity.capitalize(), self.event, self.value, ','.join(self.service), self.resource)
self.timeout = timeout or CONF.global_timeout
self.alertid = alertid or str(uuid4())
if last_receive_id:
self.last_receive_id = last_receive_id
else:
self.last_receive_id = self.alertid
self.create_time = create_time or datetime.datetime.utcnow()
self.expire_time = expire_time or self.create_time + datetime.timedelta(seconds=self.timeout)
if receive_time:
self.receive_time = receive_time
if last_receive_time:
self.last_receive_time = last_receive_time
if trend_indication:
self.trend_indication = trend_indication
self.raw_data = raw_data
self.more_info = more_info
self.graph_urls = graph_urls or list()
if history:
self.history = history
def get_id(self, short=False):
if short:
return self.alertid.split('-')[0]
else:
return self.alertid
def get_header(self):
header = {
'type': self.event_type,
'correlation-id': self.alertid,
}
return header
def get_body(self):
alert = {
'resource': self.resource,
'event': self.event,
'correlatedEvents': self.correlate,
'group': self.group,
'value': self.value,
'severity': self.severity,
'previousSeverity': self.previous_severity,
'environment': self.environment,
'service': self.service,
'text': self.text,
'type': self.event_type,
'tags': self.tags,
'origin': self.origin,
'repeat': self.repeat,
'duplicateCount': self.duplicate_count,
'thresholdInfo': self.threshold_info,
'summary': self.summary,
'timeout': self.timeout,
'id': self.alertid,
'createTime': self.create_time,
'expireTime': self.expire_time,
'rawData': self.raw_data,
'moreInfo': self.more_info,
'graphUrls': self.graph_urls,
}
if hasattr(self, 'status'):
alert['status'] = self.status
if hasattr(self, 'receive_time'):
alert['receiveTime'] = self.receive_time
if hasattr(self, 'last_receive_time'):
alert['lastReceiveTime'] = self.last_receive_time
if hasattr(self, 'last_receive_id'):
alert['lastReceiveId'] = self.last_receive_id
if hasattr(self, 'trend_indication'):
alert['trendIndication'] = self.trend_indication
if hasattr(self, 'history'):
alert['history'] = self.history
return alert
def get_type(self):
return self.event_type
def get_severity(self):
return self.severity
def get_create_time(self):
return self.create_time.replace(microsecond=0).isoformat() + ".%03dZ" % (self.create_time.microsecond // 1000)
def get_receive_time(self):
return self.receive_time.replace(microsecond=0).isoformat() + ".%03dZ" % (self.receive_time.microsecond // 1000)
def get_last_receive_time(self):
return self.last_receive_time.replace(microsecond=0).isoformat() + ".%03dZ" % (self.last_receive_time.microsecond // 1000)
def receive_now(self):
self.receive_time = datetime.datetime.utcnow()
def __repr__(self):
return 'Alert(header=%r, alert=%r)' % (self.get_header(), self.get_body())
def __str__(self):
return json.dumps(self.get_body(), cls=DateEncoder, indent=4)
@staticmethod
def parse_alert(alert):
try:
alert = json.loads(alert)
except ValueError, e:
LOG.error('Could not parse alert - %s: %s', e, alert)
raise
for k, v in alert.iteritems():
if k in ['createTime', 'receiveTime', 'lastReceiveTime', 'expireTime']:
try:
alert[k] = datetime.datetime.strptime(v, '%Y-%m-%dT%H:%M:%S.%fZ')
except ValueError, e:
LOG.error('Could not parse date time string: %s', e)
raise
return Alert(
resource=alert.get('resource', None),
event=alert.get('event', None),
correlate=alert.get('correlatedEvents', None),
group=alert.get('group', None),
value=alert.get('value', None),
status=status_code.parse_status(alert.get('status', status_code.UNKNOWN)),
severity=severity_code.parse_severity(alert.get('severity', severity_code.NORMAL)),
previous_severity=severity_code.parse_severity(alert.get('previousSeverity', status_code.UNKNOWN)),
environment=alert.get('environment', None),
service=alert.get('service', None),
text=alert.get('text', None),
event_type=alert.get('type', None),
tags=alert.get('tags', dict()),
origin=alert.get('origin', None),
repeat=alert.get('repeat', False),
duplicate_count=alert.get('duplicateCount', 0),
threshold_info=alert.get('thresholdInfo', None),
summary=alert.get('summary', None),
timeout=alert.get('timeout', None),
alertid=alert.get('id', None),
last_receive_id=alert.get('lastReceiveId', None),
create_time=alert.get('createTime', None),
expire_time=alert.get('expireTime', None),
receive_time=alert.get('receiveTime', None),
last_receive_time=alert.get('lastReceiveTime', None),
trend_indication=alert.get('trendIndication', None),
raw_data=alert.get('rawData', None),
more_info=alert.get('moreInfo', None),
graph_urls=alert.get('graphUrls', None),
)
def transform_alert(self, trapoid=None, facility=None, level=None, **kwargs):
LOG.info('Transform alert %s using %s', self.get_id(), CONF.yaml_config)
if not os.path.exists(CONF.yaml_config):
return
suppress = False
try:
conf = yaml.load(open(CONF.yaml_config))
LOG.info('Loaded %d transformer configurations OK', len(conf))
except Exception, e:
LOG.error('Failed to load transformer configuration %s: %s', CONF.yaml_config, e)
raise RuntimeError
for c in conf:
LOG.debug('YAML config: %s', c)
match = None
pattern = None
if self.get_type() == 'snmptrapAlert' and trapoid and c.get('trapoid'):
match = re.match(c['trapoid'], trapoid)
pattern = trapoid
elif self.get_type() == 'syslogAlert' and facility and level and c.get('priority'):
match = fnmatch.fnmatch('%s.%s' % (facility, level), c['priority'])
pattern = c['priority']
elif c.get('match'):
try:
match = all(item in self.__dict__.items() for item in c['match'].items())
pattern = c['match'].items()
except AttributeError:
pass
if match:
LOG.debug('Matched %s for %s', pattern, self.get_type())
# 1. Simple substitutions
if 'event' in c:
self.event = c['event']
if 'resource' in c:
self.resource = c['resource']
if 'severity' in c:
self.severity = c['severity']
if 'group' in c:
self.group = c['group']
if 'value' in c:
self.value = c['value']
if 'text' in c:
self.text = c['text']
if 'environment' in c:
self.environment = c['environment']
if 'service' in c:
self.service = c['service']
if 'tags' in c:
self.tags.update(c['tags']) # merge tags
if 'correlate' in c:
self.correlate = c['correlate']
if 'threshold_info' in c:
self.threshold_info = c['threshold_info']
if 'summary' in c:
self.summary = c['summary']
if 'timeout' in c:
self.timeout = c['timeout']
# 2. Complex transformations
if 'parser' in c:
LOG.debug('Loading parser %s', c['parser'])
context = kwargs
context.update(self.__dict__)
try:
exec(open('%s/%s.py' % (CONF.parser_dir, c['parser']))) in globals(), context
LOG.info('Parser %s/%s exec OK', CONF.parser_dir, c['parser'])
except Exception, e:
LOG.warning('Parser %s failed: %s', c['parser'], e)
raise RuntimeError
for k, v in context.iteritems():
if hasattr(self, k):
setattr(self, k, v)
if 'suppress' in context:
suppress = context['suppress']
# 3. Suppress based on results of 1 or 2
if 'suppress' in c:
suppress = suppress or c['suppress']
return suppress
def translate_alert(self, mappings):
LOG.debug('Translate alert using mappings: %s', mappings)
for k, v in mappings.iteritems():
LOG.debug('translate %s -> %s', k, v)
self.event = self.event.replace(k, v)
self.resource = self.resource.replace(k, v)
self.severity = self.severity.replace(k, v)
self.group = self.group.replace(k, v)
self.value = self.value.replace(k, v)
self.text = self.text.replace(k, v)
self.environment[:] = [e.replace(k, v) for e in self.environment]
self.service[:] = [s.replace(k, v) for s in self.service]
if self.tags is not None:
self.tags = dict([(tag[0], tag[1].replace(k, v)) for tag in self.tags.iteritems()])
if self.correlate is not None:
self.correlate[:] = [c.replace(k, v) for c in self.correlate]
if self.threshold_info is not None:
self.threshold_info = self.threshold_info.replace(k, v)
if self.summary is not None:
self.summary = self.summary.replace(k, v)
| [
"nick.satterly@guardian.co.uk"
] | nick.satterly@guardian.co.uk |
7bbd3df20566f69f128d3cb2acb3bdb80b98385e | f28681bdaf96688c45b35e7bb8fdf7300a5f6685 | /HW03(SSW-810).py | 74ae155b88e90ea7dd8a3171885c7e24eaee87c7 | [] | no_license | SachinMCReddy/810homework | a1b4bbefd8feb363dadca02abd732f280ddc4260 | 05e5e86f9d69b137ffa2a3561c00f8de1a38a49d | refs/heads/master | 2020-05-05T01:08:30.760615 | 2019-09-09T00:16:54 | 2019-09-09T00:16:54 | 179,593,925 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,300 | py | ''' python program that includes class fractions , plus , minus, times,
divide ,equal to perform tasks on calculator'''
import unittest
class Fraction:
def __init__(self, numerator, denominator):
self.numerator = numerator
self.denominator = denominator
if self.denominator == 0 :
raise ValueError("This is not possible to divide by zero ")
def __str__(self): # display fraction
return str(self.numerator) + "/" + str(self.denominator)
def __add__(self, a): # For addition
num = (self.numerator * a.denominator) + (self.denominator * a.numerator)
den = (self.denominator * a.denominator)
return Fraction(float(num), float(den))
def __sub__(self, a): # For subtraction
num = (self.numerator * a.denominator) - (self.denominator * a.numerator)
den = (self.denominator * a.denominator)
return Fraction(float(num), float(den))
def __mul__(self, a): # For multiplication
num = (self.numerator * a.denominator)
den = (self.denominator * a.denominator)
return Fraction(float(num), float(den))
def __truediv__(self, a): # For division
num = (self.numerator * a.denominator)
den = (self.denominator * a.denominator)
return Fraction((num), (den))
def __eq__(self, a): # For equal
if (self.numerator * a.denominator) == (self.denominator * a.numerator):
return True
else:
return False
def __ne__(self, a): # For not equal
if (self.numerator * a.denominator) != (self.denominator * a.numerator):
return True
else:
return False
def __lt__(self, a): # For less than
if (self.numerator * a.denominator) < (self.denominator * a.numerator):
return True
else:
return False
def __le__(self, a): # For less than or equal
if (self.numerator * a.denominator) <= (self.denominator * a.numerator):
return True
else:
return False
def __gt__(self, a): # For greater than
if (self.numerator * a.denominator) > (self.denominator * a.numerator):
return True
else:
return False
def __ge__(self, a): # For greater than equal
if (self.numerator * a.denominator) >= (self.denominator * a.numerator):
return True
else:
return False
def get_number(prompt): #input passed
while True:
inpt = input(prompt)
try:
return float(inpt)
except ValueError:
print("Error: Please try again")
class FractionTest(unittest.TestCase):
def test_init(self):
""" verify that the numerator and denominator are set properly """
f = Fraction(3, 4)
self.assertEqual(f.numerator, 3)
self.assertEqual(f.denominator, 4)
def test_str(self):
""" verify that __str__() works properly """
f = Fraction(3, 4)
self.assertEqual(str(f), '3/4')
def test_equal(self):
"""test fraction equality """
f1 = Fraction(3, 4)
f2 = Fraction(6, 8)
f3 = Fraction(9, 12)
self.assertTrue(f1 == f1)
self.assertTrue(f1 == f2)
self.assertTrue(f1 == f3)
self.assertTrue(f2 == f3)
self.assertFalse(f1 == Fraction(3, 5))
def test_add(self):
""" test fraction addition """
f1 = Fraction(3, 4)
f2 = Fraction(1, 2)
f3 = Fraction(1, 3)
f4 = Fraction(-1, 2)
f5 = Fraction(2, -3)
self.assertTrue((f1 + f1) == Fraction(6, 4))
self.assertTrue((f1 + f2) == Fraction(5, 4))
self.assertTrue((f1 + f3) == Fraction(13, 12))
self.assertTrue((f1 + f4) == Fraction(1, 4))
self.assertTrue((f1 + f5) == Fraction(1, 12))
self.assertTrue((f4 + f5) == Fraction(-7, 6))
def test_sub(self):
"""test fraction subtract"""
f1 = Fraction(1, 2)
f2 = Fraction(2, 5)
f3 = Fraction(3, 8)
f4 = Fraction(-1, 2)
f5 = Fraction(2, -3)
self.assertTrue((f1 - f3) == Fraction(1, 8))
self.assertTrue((f1 - f2) == Fraction(1, 10))
self.assertTrue((f2 - f3) == Fraction(1, 40))
self.assertTrue((f1 - f4) == Fraction(2, 2))
self.assertTrue((f1 - f5) == Fraction(7, 6))
self.assertTrue((f4 - f5) == Fraction(1, 6))
def test_mul(self):
"""test fraction multiple"""
f1 = Fraction(2, 3)
f2 = Fraction(3, 5)
f3 = Fraction(4, 7)
f4 = Fraction(-1, 2)
f5 = Fraction(2, -3)
self.assertFalse((f1 * f2) == Fraction(6, 15))
self.assertFalse((f1 * f3) == Fraction(8, 21))
self.assertFalse((f2 * f3) == Fraction(12, 35))
self.assertFalse((f1 * f4) == Fraction(-2, 6))
self.assertFalse((f1 * f5) == Fraction(-4, 9))
self.assertFalse((f4 * f5) == Fraction(2, 6))
def test_truediv(self):
"""test fraction truedivide"""
f1 = Fraction(1, 2)
f2 = Fraction(3, 5)
f3 = Fraction(4, 7)
f4 = Fraction(-1, 2)
f5 = Fraction(2, -3)
self.assertFalse((f1 / f2) == Fraction(5, 6))
self.assertFalse((f1 / f3) == Fraction(7, 8))
self.assertFalse((f2 / f3) == Fraction(21, 20))
self.assertFalse((f1 / f4) == Fraction(2, -2))
self.assertFalse((f1 / f5) == Fraction(-3, 4))
self.assertFalse((f4 / f5) == Fraction(3, 4))
def test_notequal(self):
"""test fraction notequal"""
f1 = Fraction(1, 2)
f2 = Fraction(3, 5)
f3 = Fraction(4, 7)
self.assertTrue(f1 != f2)
self.assertTrue(f1 != f3)
self.assertTrue(f2 != f3)
self.assertTrue(f1 != Fraction(1, 3))
def test_lessthan(self):
"""test fraction less than"""
f1 = Fraction(1, 2)
f2 = Fraction(3, 5)
f3 = Fraction(5, 7)
self.assertFalse(f2 < f1)
self.assertTrue(f1 < f3)
self.assertTrue(f2 < f3)
def test_lessorequal(self):
"""test fraction less than or equal"""
f1 = Fraction(2, 3)
f2 = Fraction(4, 5)
f3 = Fraction(6, 7)
self.assertTrue(f1 <= f2)
self.assertTrue(f1 <= f3)
self.assertTrue(f2 <= f3)
self.assertTrue(f1 <= f1)
self.assertTrue(f2 <= f2)
def test_greaterthan(self):
"""test fraction greater than"""
f1 = Fraction(5, 7)
f2 = Fraction(3, 5)
f3 = Fraction(1, 2)
f4 = Fraction(-1, 2)
f5 = Fraction(2, -3)
self.assertTrue(f1 > f2)
self.assertTrue(f1 > f3)
self.assertTrue(f2 > f3)
self.assertTrue(f5 > f4)
self.assertTrue(f1 > f4)
def test_greaterorequal(self):
"""test fraction greater than or equal"""
f1 = Fraction(5, 7)
f2 = Fraction(3, 5)
f3 = Fraction(1, 3)
self.assertTrue(f1 >= f2)
self.assertTrue(f1 >= f3)
self.assertTrue(f2 >= f3)
self.assertTrue(f1 >= f1)
if __name__ == '__main__':
# note: there is no main(). Only test cases here
unittest.main(exit = False, verbosity = 2) | [
"noreply@github.com"
] | SachinMCReddy.noreply@github.com |
14d0b9a109e4e6c4dc749ff927ec9aeaca68696d | 93652621e9e57141321bdd95ac23991a82eb5039 | /search_list_unknown_length.py | 491c4494388cefc5c05234e0bf06e1758719289c | [] | no_license | chefmohima/DS_Algo | 02912666d4cb42cdd293e7598e8419e8efdb69e6 | df2306f585a2e5a7aaa8cc8fc09f4c6ab2a40a46 | refs/heads/master | 2020-05-27T04:37:16.118227 | 2020-02-01T13:23:51 | 2020-02-01T13:23:51 | 188,485,743 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 565 | py | def search_list_unknown_length(l, upper_bound, item):
#start = 0
# no end pointer as we do not know length of list
end = 1
while end < upper_bound:
start = end
end = end*2
start = 0
# do a binary search between start and end
while start<= end:
mid = (start+end)//2
if l[mid] == item:
return mid
elif l[mid] < item:
start = mid+1
elif l[mid] > item:
end = mid-1
return -1
search_list_unknown_length([1,2,4,9,12,16],4,2)
| [
"38052670+chefmohima@users.noreply.github.com"
] | 38052670+chefmohima@users.noreply.github.com |
efba47c512c696460f8cf7188eb8de43a3e0bf42 | 2bd6727faeed9d4a0cade3bf3ca4bb8bed0d47dd | /demographicFiltering.py | 980a84d6fea1ca4643796504cd3b1fe0e847448c | [] | no_license | ishitha08/c142 | 844f296a78f64e65e4c77d40502b3283ccda50b8 | 4c442d69626fdcc9594d385fc91d56d666232baa | refs/heads/main | 2023-01-13T19:32:53.657739 | 2020-11-17T12:25:52 | 2020-11-17T12:25:52 | 313,611,502 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 556 | py | import pandas as pd
import numpy as np
df = pd.read_csv('final.csv')
c = df['vote_average'].mean()
m = df['vote_count'].quantile(0.9)
q_movies = df.copy().loc[df['vote_count']>=m]
def weighted_rating(x,m = m,c = c):
v = x['vote_count']
r = x['vote_average']
return (v/(v+m)*r)+(m/(m+v)*c)
q_movies['score'] = q_movies.apply(weighted_rating,axis = 1)
q_movies = q_movies.sort_values('score',ascending = False)
output = q_movies[['title','poster_link','release_date','runtime','vote_average','overview']].head(20).values.tolist()
print(output) | [
"noreply@github.com"
] | ishitha08.noreply@github.com |
612671af0b1146ea718e55e21d2773916fdab858 | d0eadb2b1196480594b4bda9cf948b22b18950b8 | /决策树/treePlotter.py | 15d7bfdc5f7d56212a365f243d2e979644ce133b | [] | no_license | HXACA/Machine-Learning-in-Action | 26b0005c6fd1890cab63719c7a4fb9f1abfa2b12 | cb0eeb8483b879e730a4c79d0be217f54740af27 | refs/heads/master | 2021-09-20T21:16:18.253554 | 2018-08-15T07:18:26 | 2018-08-15T07:18:26 | 115,706,010 | 5 | 4 | null | null | null | null | UTF-8 | Python | false | false | 3,903 | py | # -- coding: utf-8 --
import matplotlib.pyplot as plt
from pylab import *
mpl.rcParams['font.sans-serif'] = ['SimHei']
decisionNode = dict(boxstyle="sawtooth", fc="0.8")
leafNode = dict(boxstyle="round4", fc="0.8")
arrow_args = dict(arrowstyle="<-")
#定义叶节点,判断框和箭头的样式
def plotNode(nodeTxt, centerPt, parentPt, nodeType):
createPlot.ax1.annotate(nodeTxt, xy=parentPt,\
xycoords='axes fraction', xytext=centerPt, textcoords='axes fraction',\
va="center", ha="center", bbox=nodeType, arrowprops=arrow_args)
#注解函数,可以用来对坐标中的数据进行注解,让人更清晰的得知坐标点得意义,现在对其参数作用进行分析:
#xy -- 为点的坐标
#xytext -- 为注解内容位置坐标,当该值为None时,注解内容放置在xy处
#xycoords and textcoords 是坐标xy与xytext的说明,若textcoords=None,则默认textNone与xycoords相同,若都未设置,默认为data,
#arrowprops -- 用于设置箭头的形状,类型为字典类型
def createPlot(inTree):
fig = plt.figure(1, facecolor='white')
fig.clf()
#清空绘图区
axprops = dict(xticks=[], yticks=[])
createPlot.ax1 = plt.subplot(111, frameon=True, **axprops)
#frameon为外边框
#plotNode( U'决策节点', (0.5, 0.1), (0.1, 0.5), decisionNode)
#plotNode( U'叶节点', (0.8, 0.1), (0.3, 0.8) , leafNode)
#Unicode解码问题
plotTree.totalW = float(getNumLeafs(inTree))
plotTree.totalD = float(getTreeDepth(inTree))
plotTree.xOff = -0.5/plotTree.totalW
plotTree.yOff = 1.0
#记录当前x,y坐标
plotTree(inTree, (0.5, 1.0), ' ')
plt.show()
#获取叶节点的数目
def getNumLeafs(myTree) :
numLeafs = 0
firstStr = myTree.keys()[0]
secondDict = myTree[firstStr]
for key in secondDict.keys():
if type(secondDict[key]).__name__=='dict':
#判断子节点是否为字典类型
numLeafs += getNumLeafs(secondDict[key])
else:
numLeafs +=1
return numLeafs
#获取树的层数
def getTreeDepth(myTree):
maxDepth = 0
firstStr = myTree.keys()[0]
secondDict = myTree[firstStr]
for key in secondDict.keys():
if type(secondDict[key]).__name__=='dict':
thisDepth = 1+getTreeDepth(secondDict[key])
else: thisDepth =1
if thisDepth>maxDepth: maxDepth=thisDepth
return maxDepth
def plotMidText(cntrPt, parentPt, txtString):
#填充文本信息
xMid = (parentPt[0]-cntrPt[0])/2.0+cntrPt[0]
yMid = (parentPt[1]-cntrPt[1])/2.0+cntrPt[1]
#求父节点与子节点间的中点
createPlot.ax1.text(xMid, yMid, txtString)
def plotTree(myTree, parentPt, nodeTxt):
numLeafs = getNumLeafs(myTree)
depth = getTreeDepth(myTree)
firstStr = myTree.keys()[0]
cntrPt = (plotTree.xOff + (1.0+float(numLeafs))/2.0/plotTree.totalW, plotTree.yOff)
#标注点坐标
plotMidText(cntrPt, parentPt, nodeTxt)
#补充中间内容
plotNode(firstStr, cntrPt, parentPt, decisionNode)
#画出当前决策节点
secondDict = myTree[firstStr]
#取子节点
plotTree.yOff = plotTree.yOff-1.0/plotTree.totalD
#计算出下一层y的坐标
#偏移y坐标
for key in secondDict.keys():
if type(secondDict[key]).__name__=='dict':
plotTree(secondDict[key], cntrPt, str(key))
#非叶节点递归绘制,以中心点为父节点坐标位置
else:
plotTree.xOff = plotTree.xOff + 1.0/plotTree.totalW
plotNode(secondDict[key], (plotTree.xOff,plotTree.yOff), cntrPt, leafNode)
#绘制叶节点
plotMidText((plotTree.xOff, plotTree.yOff), cntrPt,str(key))
#补充中间内容
plotTree.yOff = plotTree.yOff + 1.0/plotTree.totalD
#返回上一层时恢复y | [
"718531794@qq.com"
] | 718531794@qq.com |
f1bd2b0a1cc1759a31d1e58ff56ef537fbbf3bbb | 8fcfe4980c8ce0825beafd881c83db153e2d39c2 | /tests/datasets/test_nwpu.py | c8e6fb1d7e9a72cc9d2e4b3977aeb5344d424985 | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | Z-Zheng/torchgeo | af1953b2595c4498814a63e805e80744b6ba6ee9 | 53ff5ffbe598cfeae785a37dcdf1d0949d333f08 | refs/heads/main | 2023-08-12T09:09:20.669949 | 2021-09-21T22:21:13 | 2021-09-21T22:21:13 | 408,154,101 | 1 | 0 | MIT | 2021-09-19T14:48:51 | 2021-09-19T14:48:51 | null | UTF-8 | Python | false | false | 3,123 | py | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import os
import shutil
import sys
from pathlib import Path
from typing import Generator
import pytest
import torch
from _pytest.fixtures import SubRequest
from _pytest.monkeypatch import MonkeyPatch
from torch.utils.data import ConcatDataset
import torchgeo.datasets.utils
from torchgeo.datasets import VHR10
from torchgeo.transforms import Identity
pytest.importorskip("rarfile")
pytest.importorskip("pycocotools")
def download_url(url: str, root: str, *args: str) -> None:
shutil.copy(url, root)
# TODO: figure out how to install unrar on Windows in GitHub Actions
@pytest.mark.skipif(sys.platform == "win32", reason="requires unrar executable")
class TestVHR10:
@pytest.fixture(params=["positive", "negative"])
def dataset(
self,
monkeypatch: Generator[MonkeyPatch, None, None],
tmp_path: Path,
request: SubRequest,
) -> VHR10:
monkeypatch.setattr( # type: ignore[attr-defined]
torchgeo.datasets.nwpu, "download_url", download_url
)
monkeypatch.setattr( # type: ignore[attr-defined]
torchgeo.datasets.utils, "download_url", download_url
)
url = os.path.join("tests", "data", "vhr10", "NWPU VHR-10 dataset.rar")
monkeypatch.setitem(VHR10.image_meta, "url", url) # type: ignore[attr-defined]
md5 = "e5c38351bd948479fe35a71136aedbc4"
monkeypatch.setitem(VHR10.image_meta, "md5", md5) # type: ignore[attr-defined]
url = os.path.join("tests", "data", "vhr10", "annotations.json")
monkeypatch.setitem(VHR10.target_meta, "url", url) # type: ignore[attr-defined]
md5 = "16fc6aa597a19179dad84151cc221873"
monkeypatch.setitem(VHR10.target_meta, "md5", md5) # type: ignore[attr-defined]
root = str(tmp_path)
split = request.param
transforms = Identity()
return VHR10(root, split, transforms, download=True, checksum=True)
def test_getitem(self, dataset: VHR10) -> None:
x = dataset[0]
assert isinstance(x, dict)
assert isinstance(x["image"], torch.Tensor)
assert isinstance(x["label"], dict)
def test_len(self, dataset: VHR10) -> None:
if dataset.split == "positive":
assert len(dataset) == 650
elif dataset.split == "negative":
assert len(dataset) == 150
def test_add(self, dataset: VHR10) -> None:
ds = dataset + dataset
assert isinstance(ds, ConcatDataset)
if dataset.split == "positive":
assert len(ds) == 1300
elif dataset.split == "negative":
assert len(ds) == 300
def test_already_downloaded(self, dataset: VHR10) -> None:
VHR10(root=dataset.root, download=True)
def test_invalid_split(self) -> None:
with pytest.raises(AssertionError):
VHR10(split="train")
def test_not_downloaded(self, tmp_path: Path) -> None:
with pytest.raises(RuntimeError, match="Dataset not found or corrupted."):
VHR10(str(tmp_path))
| [
"ajstewart426@gmail.com"
] | ajstewart426@gmail.com |
261395ceb3f6aaa8f700e67bdfe4e41ae847336a | b39c3568d2347dfb1dfa9581f0bf9e3e24d15a73 | /constant_buffer.py | e55076d7c534bc0f388bdaf2d8bfb48bff9bbf77 | [] | no_license | P-Schumacher/research | 2f55ea4fa5bad32c2537298a619c4f55d4338f06 | 61bcf6aed6594d57c3074b43a2b8ffa80e79ca5d | refs/heads/master | 2023-02-12T08:43:41.400651 | 2021-01-04T13:22:47 | 2021-01-04T13:22:47 | 242,512,373 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,423 | py | from per_exp.constant_buffer import main
from omegaconf import OmegaConf
import wandb
import sys
import os
from pudb import set_trace
os.environ['CUDA_VISIBLE_DEVICES']='-1'
ant_env = False
vrep = True
if vrep:
main_cnf = OmegaConf.load('configs/vrep_default/vrep_main_conf.yaml')
env_cnf = OmegaConf.load('configs/vrep_default/vrep_env_conf.yaml')
agent_cnf = OmegaConf.load('configs/vrep_default/vrep_agent_conf.yaml')
# Parameters of second cnf file overwrite those of first
cnf = OmegaConf.merge(main_cnf, env_cnf, agent_cnf)
exp_cnf = OmegaConf.load(f'per_exp/constant_buffer.yaml')
cnf = OmegaConf.merge(cnf, exp_cnf)
if ant_env:
main_cnf = OmegaConf.load('configs/ant_default/ant_main_conf.yaml')
agent_cnf = OmegaConf.load('configs/ant_default/ant_agent_conf.yaml')
# Parameters of second cnf file overwrite those of first
cnf = OmegaConf.merge(main_cnf, agent_cnf)
exp_cnf = OmegaConf.load(f'per_exp/constant_buffer.yaml')
cnf = OmegaConf.merge(cnf, exp_cnf)
cnf.merge_with_cli()
if vrep:
config = {**cnf.main, **cnf.agent, **cnf.coppeliagym.params, **cnf.coppeliagym.sim, **cnf.buffer, **cnf.agent.sub_model, **cnf.agent.meta_model}
else:
config = {**cnf.main, **cnf.agent, **cnf.buffer, **cnf.agent.sub_model}
if cnf.main.log:
wandb.init(project=cnf.project, entity=cnf.entity, config=config)
for idx in range(10):
main(cnf, idx)
| [
"pier_schumacher@hotmail.com"
] | pier_schumacher@hotmail.com |
e1165c401f337f497dfd1c72f0e9fa8db6123f58 | 3e59979847bc42e1c90934921c2c7dbeb404d28d | /load_data.py | 96fb3904ba7c11843c726ab4642568d8d84e86ec | [] | no_license | wangqianwen0418/keras_lab | aa5d80d9a30daa1a2b8a769f131e7d23118a3f2d | b72e7bcc4cf5106433c23c2e01acdb220fcf735f | refs/heads/master | 2021-01-16T19:23:38.196223 | 2017-08-27T03:00:27 | 2017-08-27T03:00:27 | 100,157,310 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,364 | py | import pickle
import os
import numpy as np
def unpickle(file, label_key='labels'):
import pickle
fo = open(file, 'rb')
d = pickle.load(fo, encoding='bytes')
d_decode={}
for k,v in d.items():
d_decode[k.decode('utf8')]=v
fo.close()
data = d_decode['data']
labels =d_decode[label_key]
data = data.reshape(data.shape[0], 3, 32, 32)
return data,labels
def readCifra10():
data_path = 'cifar-10-batches-py'
num_train_samples = 60000
x_train = np.zeros((num_train_samples, 3, 32, 32), dtype='uint8')
y_train = np.zeros((num_train_samples,), dtype='uint8')
for i in range(1, 6):
fpath = os.path.join(data_path, 'data_batch_' + str(i))
imgs, labels = unpickle(fpath)
x_train[(i - 1) * 10000: i * 10000, :, :, :] = imgs
y_train[(i - 1) * 10000: i * 10000] = labels
tpath = os.path.join(data_path, 'test_batch')
x_test, y_test = unpickle(tpath)
return (x_train,y_train),(x_test,y_test)
def readCifra100(key='fine'):
data_path = 'cifar-100-python'
fpath = os.path.join(data_path, 'train')
x_train,y_train = unpickle(fpath, '{}_labels'.format(key))
fpath = os.path.join(data_path, 'test')
x_test,y_test = unpickle(fpath, '{}_labels'.format(key))
return (x_train,y_train),(x_test,y_test)
readCifra10()
readCifra100()
| [
"noreply@github.com"
] | wangqianwen0418.noreply@github.com |
6ba990b9bf4b74fa5190e679058199fd47819f8e | 901c6cbb92896f70a5c670054ed9505be23cdf3b | /core/migrations/0016_order_coupon.py | 458fb3eb0e20694489ba243b63f1ec922a03e265 | [] | no_license | jofre44/django_eccomerce | 4ba8504579212088d06bb11902e327c1b0b8aca7 | c0fa45ce8bb821935f36b1aa351d6f75a2627600 | refs/heads/master | 2023-04-10T09:08:17.554182 | 2021-04-01T19:23:57 | 2021-04-01T19:23:57 | 349,452,342 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 475 | py | # Generated by Django 2.2.13 on 2021-03-30 17:06
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0015_coupon'),
]
operations = [
migrations.AddField(
model_name='order',
name='coupon',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='core.Coupon'),
),
]
| [
"jofre44@gmail.com"
] | jofre44@gmail.com |
e211fb352dbdd0695136c3ab8f1889f024b1ada5 | 03ad3139a48b826b3c685950bd6d1eb154c9e190 | /Python/python4/ChessGame.py | 13b36478efd3b83f47720bfc1a85a76b432f4fcd | [] | no_license | Jreema/Kloudone_Assignments | c7da1f7fd92660d32e32629614f265ca325d4084 | 81020429c7c2288e3a855222feb3431760490075 | refs/heads/master | 2022-08-31T16:36:03.585366 | 2020-05-21T10:17:52 | 2020-05-21T10:17:52 | 265,818,113 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,577 | py | #Two player Chess Game
import chess
import chess.svg
from IPython.display import SVG
board=chess.Board()
SVG(chess.svg.board(board=board,size=400))
#Prints the chess board
print(board)
global turn1
#Loop continus while game is not over or not stale mate or not check mate
while(board.is_game_over()==False or board.is_stalemate()==False or board.is_checkmate()== False):
#print(board.legal_moves)
#GEt input from user
inp = input("\nEnter the move: ")
#check if move is a valid move and if so make the move or else print invalid move
if chess.Move.from_uci(inp) in board.legal_moves:
board.push_uci(inp)
print(board)
if board.turn:
turn1="White"
print("\nWhite has to Move")
else:
turn1="Black"
print("\nBlack has to Move")
#Checks for a check to the King and if so Alerts the King
if board.is_check():
print("\nCheck to the King")
if board.is_checkmate():
print("Checkmate")
break;
else:
print(board)
print("\nInvalid Move")
if(board.is_game_over()):
print("The game is over")
#Checks for a check mate. then sees if white is mated or black is mated
if(board.is_checkmate()):
if turn1=="White":
print("White is mated. White has lost the game")
else:
print("Black is mated. Black has lost the game")
#checks for a stale mate to see if game is a draw
elif(board.is_stalemate()):
print("The game is a draw")
| [
"reemareach@gmail.com"
] | reemareach@gmail.com |
3b513bfffa4db411bd3bed92687039e796e43b32 | 62ad3a52e117674cfcd338f19b364d446bd122eb | /auth/forms.py | b34e8fab7c85b954d11032ffc2c0079bc353dd14 | [] | no_license | marcuswhybrow/django-user-silos | f52a39f7091592d4e52e7f9d5132abf8f8352bb6 | ab7ec92a5663ab063074222104a92308fd6cb6d1 | refs/heads/master | 2021-01-06T20:37:08.873638 | 2010-05-29T12:04:52 | 2010-05-29T12:04:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,970 | py | from django.contrib.auth.models import User
from django.contrib.auth import authenticate
from django.contrib.auth.tokens import default_token_generator
from django.contrib.sites.models import Site
from django.template import Context, loader
from django import forms
from django.utils.translation import ugettext_lazy as _
from django.utils.http import int_to_base36
class UserCreationForm(forms.ModelForm):
"""
A form that creates a user, with no privileges, from the given username and password.
"""
username = forms.RegexField(label=_("Username"), max_length=30, regex=r'^[\w.@+-]+$',
help_text = _("Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only."),
error_messages = {'invalid': _("This value may contain only letters, numbers and @/./+/-/_ characters.")})
password1 = forms.CharField(label=_("Password"), widget=forms.PasswordInput)
password2 = forms.CharField(label=_("Password confirmation"), widget=forms.PasswordInput,
help_text = _("Enter the same password as above, for verification."))
class Meta:
model = User
fields = ("username",)
def __init__(self, silo, *args, **kwargs):
self.silo = silo
super(UserCreationForm, self).__init__(*args, **kwargs)
def clean_username(self):
username = self.cleaned_data["username"]
try:
User.objects.get(username=username, silo=self.silo)
except User.DoesNotExist:
return username
raise forms.ValidationError(_("A user with that username already exists."))
def clean_password2(self):
password1 = self.cleaned_data.get("password1", "")
password2 = self.cleaned_data["password2"]
if password1 != password2:
raise forms.ValidationError(_("The two password fields didn't match."))
return password2
def save(self, commit=True):
user = super(UserCreationForm, self).save(commit=False)
user.set_password(self.cleaned_data["password1"])
if commit:
user.save()
return user
class UserChangeForm(forms.ModelForm):
username = forms.RegexField(label=_("Username"), max_length=30, regex=r'^[\w.@+-]+$',
help_text = _("Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only."),
error_messages = {'invalid': _("This value may contain only letters, numbers and @/./+/-/_ characters.")})
class Meta:
model = User
class AuthenticationForm(forms.Form):
"""
Base class for authenticating users. Extend this to get a form that accepts
username/password logins.
"""
username = forms.CharField(label=_("Username"), max_length=30)
password = forms.CharField(label=_("Password"), widget=forms.PasswordInput)
def __init__(self, silo, request=None, *args, **kwargs):
"""
If request is passed in, the form will validate that cookies are
enabled. Note that the request (a HttpRequest object) must have set a
cookie with the key TEST_COOKIE_NAME and value TEST_COOKIE_VALUE before
running this validation.
"""
self.silo = silo
self.request = request
self.user_cache = None
super(AuthenticationForm, self).__init__(*args, **kwargs)
def clean(self):
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
if username and password:
self.user_cache = authenticate(username=username, password=password, silo=self.silo)
if self.user_cache is None:
raise forms.ValidationError(_("Please enter a correct username and password, and select a silo. Note that both username and password are case-sensitive."))
elif not self.user_cache.is_active:
raise forms.ValidationError(_("This account is inactive."))
# TODO: determine whether this should move to its own method.
if self.request:
if not self.request.session.test_cookie_worked():
raise forms.ValidationError(_("Your Web browser doesn't appear to have cookies enabled. Cookies are required for logging in."))
return self.cleaned_data
def get_user_id(self):
if self.user_cache:
return self.user_cache.id
return None
def get_user(self):
return self.user_cache
class PasswordResetForm(forms.Form):
email = forms.EmailField(label=_("E-mail"), max_length=75)
def __init__(self, silo, *args, **kwargs):
self.silo = silo
super(PasswordRestForm, self).__init__(*args, **kwargs)
def clean_email(self):
"""
Validates that a user exists with the given e-mail address.
"""
email = self.cleaned_data["email"]
self.users_cache = User.objects.filter(email__iexact=email, silo=self.silo)
if len(self.users_cache) == 0:
raise forms.ValidationError(_("That e-mail address doesn't have an associated user account in the specified silo. Are you sure are registered within that silo?"))
return email
def save(self, domain_override=None, email_template_name='registration/password_reset_email.html',
use_https=False, token_generator=default_token_generator):
"""
Generates a one-use only link for resetting password and sends to the user
"""
from django.core.mail import send_mail
for user in self.users_cache:
if not domain_override:
current_site = Site.objects.get_current()
site_name = current_site.name
domain = current_site.domain
else:
site_name = domain = domain_override
t = loader.get_template(email_template_name)
c = {
'email': user.email,
'domain': domain,
'site_name': site_name,
'uid': int_to_base36(user.id),
'user': user,
'token': token_generator.make_token(user),
'protocol': use_https and 'https' or 'http',
'silo': silo,
}
send_mail(_("Password reset on %s") % site_name,
t.render(Context(c)), None, [user.email])
class SetPasswordForm(forms.Form):
"""
A form that lets a user change set his/her password without
entering the old password
"""
new_password1 = forms.CharField(label=_("New password"), widget=forms.PasswordInput)
new_password2 = forms.CharField(label=_("New password confirmation"), widget=forms.PasswordInput)
def __init__(self, user, *args, **kwargs):
self.user = user
super(SetPasswordForm, self).__init__(*args, **kwargs)
def clean_new_password2(self):
password1 = self.cleaned_data.get('new_password1')
password2 = self.cleaned_data.get('new_password2')
if password1 and password2:
if password1 != password2:
raise forms.ValidationError(_("The two password fields didn't match."))
return password2
def save(self, commit=True):
self.user.set_password(self.cleaned_data['new_password1'])
if commit:
self.user.save()
return self.user
class PasswordChangeForm(SetPasswordForm):
"""
A form that lets a user change his/her password by entering
their old password.
"""
old_password = forms.CharField(label=_("Old password"), widget=forms.PasswordInput)
def clean_old_password(self):
"""
Validates that the old_password field is correct.
"""
old_password = self.cleaned_data["old_password"]
if not self.user.check_password(old_password):
raise forms.ValidationError(_("Your old password was entered incorrectly. Please enter it again."))
return old_password
PasswordChangeForm.base_fields.keyOrder = ['old_password', 'new_password1', 'new_password2']
class AdminPasswordChangeForm(forms.Form):
"""
A form used to change the password of a user in the admin interface.
"""
password1 = forms.CharField(label=_("Password"), widget=forms.PasswordInput)
password2 = forms.CharField(label=_("Password (again)"), widget=forms.PasswordInput)
def __init__(self, user, *args, **kwargs):
self.user = user
super(AdminPasswordChangeForm, self).__init__(*args, **kwargs)
def clean_password2(self):
password1 = self.cleaned_data.get('password1')
password2 = self.cleaned_data.get('password2')
if password1 and password2:
if password1 != password2:
raise forms.ValidationError(_("The two password fields didn't match."))
return password2
def save(self, commit=True):
"""
Saves the new password.
"""
self.user.set_password(self.cleaned_data["password1"])
if commit:
self.user.save()
return self.user
| [
"marcus@marcuswhybrow.net"
] | marcus@marcuswhybrow.net |
61abec1d7d2ce4a962c4aecf28b2a2da4db5dad6 | b68887f55cfcd0225d732acfbfcc7f3724e49d5d | /pages/components/image.py | b8d9c0eef20385c5b8d655333a4b32e127497e8c | [
"MIT"
] | permissive | rds0751/nhsuk-content-store | 0ac7eb06f85cc97cd57e58a3f24e19db9991a8a2 | 7bd6a386e3583779ddba2347a4b3a80fdf75b368 | refs/heads/master | 2020-04-19T08:53:54.273378 | 2019-01-29T05:08:18 | 2019-01-29T05:08:18 | 168,092,530 | 0 | 0 | null | 2019-01-29T05:05:33 | 2019-01-29T05:05:33 | null | UTF-8 | Python | false | false | 139 | py | from images.blocks import ImageChooserBlock
from .base import Component
image = Component('image', ImageChooserBlock(icon="radio-full"))
| [
"marcofucci@gmail.com"
] | marcofucci@gmail.com |
9cb36269e42f2c79cc4e3d3221f7e3fa336338ac | b4aba66cb30dcad6d816a97b6e9bc4e11515427f | /sources/commands.py | 7eba1d3153bab5f5a4e4a3b21eaf888c478c7b28 | [
"MIT"
] | permissive | arve0/geogebra-wiki-translation-helper | 3abdcf1865f075a216e1d9d1ef5a6672e72089a7 | d865ceef68e81f1893768252d550548a2a818573 | refs/heads/master | 2020-12-24T16:23:38.769389 | 2014-09-10T19:09:21 | 2014-09-10T19:09:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,389 | py | # -*- coding: utf-8 -*-
"""
File: commands.py
Author: Arve Seljebu
Email: arve.seljebu@gmail.com
Github: arve0
Description: Class Commands: Get commands from SVN. Return as dictionary.
"""
from urllib import urlopen
import re
from language import Language
class Commands(object):
""" Geogebra commands from SVN as dictionary. """
def __init__(self, language='en', pages=None):
"""
Setup language and pages.
:language: Language code found in language.py.
:pages: List of wikipages(dict) with title key. Title is used to link
commands to wikipages(command['wikiid']).
"""
self.language = Language(language.lower())
self.commands = {}
self._raw_data = None
self.commands_translated = {}
self.pages = pages
def get(self):
"""
Load commands from SVN. Return dictionary of commands.
"""
if self.language.code != 'en':
# raw data does not include names that are equal in english
self._raw_data = urlopen(_en_svn_url()).read()\
.decode('ISO-8859-1')
self.convert_raw_data_to_dictionary()
print u'Fetching commands from SVN.'
self._raw_data = urlopen(self._svn_url()).read().decode('latin1')
self.convert_raw_data_to_dictionary()
self.validate_commands_dict()
self.link_pageid()
return self.commands
def convert_raw_data_to_dictionary(self):
""" Convert the raw data from SVN to a dictionary. """
lines = self._raw_data.split('\n')
for line in lines:
words = line.split('=')
if '.Syntax' not in words[0]:
# capitalize without lowering all other chars (ex: nPr Command)
command = _capitalize(words[0])
key = 'translation'
else:
command = _capitalize(words[0].split('.')[0])
key = 'syntax'
# raw data is escaped unicode, to keep format(explicit \n), decode
# as latin1(back to str) -> decode as escaped unicode
value = _capitalize(words[1])
value = value.encode('latin1').decode('unicode-escape')
# two possibilities: english or not english
# if english -> we got empty dictionary (no key)
# if not english -> we need to update all properties (key exist)
# -> test for key for detecting english/not english
if command in self.commands.iterkeys():
self.commands[command].update({
key: value
})
else:
self.commands.update({
command: {
key: value
}
})
def link_pageid(self):
"""
Find command in wiki. Save pageid as command['wikiid'] on command
dictionary.
"""
self.reverse_translated_commands()
cmd_string = self.commands['Command']['translation']
cmd_pages = [p for p in self.pages if is_command_page(p, cmd_string)]
for page in cmd_pages:
command = page['title'].replace(u' ' + cmd_string, '')
if command in self.commands_translated.iterkeys():
en_command = self.commands_translated[command]
self.commands[en_command].update({
'wikiid': page['id']
})
def validate_commands_dict(self):
"""
Check that every command dict has translation key.
"""
for (command, obj) in self.commands.iteritems():
if 'translation' not in obj.keys():
self.commands[command].update({
'translation': command
})
def reverse_translated_commands(self):
"""
Map translated commands to English commands (reverse dict).
"""
for (command, obj) in self.commands.iteritems():
if 'translation' in obj.keys():
self.commands_translated.update({
obj['translation']: command
})
else:
print u'ERROR: %s does not have a translation property.'\
% (command,)
def _svn_url(self):
""" Return URL to command.properties in SVN for self.language. """
return ('http://dev.geogebra.org/svn/branches/wiki/geogebra/properties/'
+ 'command{0}.properties'.format(self.language.commands_infix))
def print_status(self):
""" Print number of commands. """
msg = u'Language: %s(%s), Number of commands: %i' \
.format(self.language, self.language.code, len(self.commands))
print msg
def _en_svn_url():
"""
Return URL to command.properties in SVN for English.
"""
return 'https://geogebra.googlecode.com/svn/trunk/geogebra/' + \
'desktop/geogebra/properties/command.properties'
def _capitalize(string):
"""
Capitalize a string without lowering all other chararcters.
"""
return string.replace(string[0], string[0].upper(), 1)
def is_command_page(page, command_string):
"""
Return true if ' Command$' matches page['title'].
"""
regex = ' ' + command_string + '$'
match = re.search(regex, page['title'])
return match != None
| [
"arve.seljebu@gmail.com"
] | arve.seljebu@gmail.com |
5b5f682eb3c6905076165ff0281238b7405c9aa2 | 29fae683de63d8684c359d9ad5ce7f52e9d48c78 | /app/posts/admin.py | f2a78f3a31efae6bfe7b1ea8c794a17cd3900f58 | [] | no_license | jeonyh0924/NBAproject | 4dfab90f6251abeaf2828f2aa240b61567d0ce17 | 2adf0d7a08c1d495bbfb07d6c2672e1ec19a2dbe | refs/heads/master | 2022-09-14T11:45:34.490333 | 2020-10-31T04:15:54 | 2020-10-31T04:15:54 | 165,981,470 | 0 | 0 | null | 2022-08-23T17:30:30 | 2019-01-16T05:46:23 | JavaScript | UTF-8 | Python | false | false | 206 | py | from django.contrib import admin
# Register your models here.
from posts.models import *
admin.site.register(Post)
admin.site.register(Postlike)
admin.site.register(Comment)
admin.site.register(HashTags)
| [
"hungyb0924@gmail.com"
] | hungyb0924@gmail.com |
2074d3e264b3b8fb5e6d4dd8340b0ff9c5124334 | f45fa5a3966290e6cb6adc98abab9fb38f0ec7d6 | /entailment/encoder_base.py | 8bfbdea7a5400a9a874040bc0c784ccda1a5769e | [] | no_license | young-k/change-my-view | 151b4ed72586feebfb959c4be2033d0e3b407e17 | 9ed253bf0f5988b627b1ee323b21e68eaaae49e6 | refs/heads/master | 2021-04-09T11:08:53.701227 | 2018-05-12T22:21:47 | 2018-05-12T22:21:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,879 | py | import torch
from torch.autograd import Variable
from torch.nn.utils.rnn import pack_padded_sequence
from nn_util import get_lengths_from_binary_sequence_mask, sort_batch_by_length
class _EncoderBase(torch.nn.Module):
# pylint: disable=abstract-method
"""
This abstract class serves as a base for the 3 ``Encoder`` abstractions in AllenNLP.
- :class:`~allennlp.modules.seq2seq_encoders.Seq2SeqEncoders`
- :class:`~allennlp.modules.seq2vec_encoders.Seq2VecEncoders`
Additionally, this class provides functionality for sorting sequences by length
so they can be consumed by Pytorch RNN classes, which require their inputs to be
sorted by length. Finally, it also provides optional statefulness to all of it's
subclasses by allowing the caching and retrieving of the hidden states of RNNs.
"""
def __init__(self, stateful=False):
super(_EncoderBase, self).__init__()
self.stateful = stateful
self._states = None
def sort_and_run_forward(self, module, inputs, mask, hidden_state=None):
"""
This function exists because Pytorch RNNs require that their inputs be sorted
before being passed as input. As all of our Seq2xxxEncoders use this functionality,
it is provided in a base class. This method can be called on any module which
takes as input a ``PackedSequence`` and some ``hidden_state``, which can either be a
tuple of tensors or a tensor.
As all of our Seq2xxxEncoders have different return types, we return `sorted`
outputs from the module, which is called directly. Additionally, we return the
indices into the batch dimension required to restore the tensor to it's correct,
unsorted order and the number of valid batch elements (i.e the number of elements
in the batch which are not completely masked). This un-sorting and re-padding
of the module outputs is left to the subclasses because their outputs have different
types and handling them smoothly here is difficult.
Parameters
----------
module : ``Callable[[PackedSequence, Optional[RnnState]],
Tuple[Union[PackedSequence, torch.Tensor], RnnState]]``, required.
A function to run on the inputs. In most cases, this is a ``torch.nn.Module``.
inputs : ``torch.Tensor``, required.
A tensor of shape ``(batch_size, sequence_length, embedding_size)`` representing
the inputs to the Encoder.
mask : ``torch.Tensor``, required.
A tensor of shape ``(batch_size, sequence_length)``, representing masked and
non-masked elements of the sequence for each element in the batch.
hidden_state : ``Optional[RnnState]``, (default = None).
A single tensor of shape (num_layers, batch_size, hidden_size) representing the
state of an RNN with or a tuple of
tensors of shapes (num_layers, batch_size, hidden_size) and
(num_layers, batch_size, memory_size), representing the hidden state and memory
state of an LSTM-like RNN.
Returns
-------
module_output : ``Union[torch.Tensor, PackedSequence]``.
A Tensor or PackedSequence representing the output of the Pytorch Module.
The batch size dimension will be equal to ``num_valid``, as sequences of zero
length are clipped off before the module is called, as Pytorch cannot handle
zero length sequences.
final_states : ``Optional[RnnState]``
A Tensor representing the hidden state of the Pytorch Module. This can either
be a single tensor of shape (num_layers, num_valid, hidden_size), for instance in
the case of a GRU, or a tuple of tensors, such as those required for an LSTM.
restoration_indices : ``torch.LongTensor``
A tensor of shape ``(batch_size,)``, describing the re-indexing required to transform
the outputs back to their original batch order.
"""
# In some circumstances you may have sequences of zero length. ``pack_padded_sequence``
# requires all sequence lengths to be > 0, so remove sequences of zero length before
# calling self._module, then fill with zeros.
# First count how many sequences are empty.
batch_size = mask.size(0)
num_valid = torch.sum(mask[:, 0]).int().data[0]
sequence_lengths = get_lengths_from_binary_sequence_mask(mask)
sorted_inputs, sorted_sequence_lengths, restoration_indices, sorting_indices =\
sort_batch_by_length(inputs, sequence_lengths)
# Now create a PackedSequence with only the non-empty, sorted sequences.
packed_sequence_input = pack_padded_sequence(sorted_inputs[:num_valid, :, :],
sorted_sequence_lengths[:num_valid].data.tolist(),
batch_first=True)
# Prepare the initial states.
if not self.stateful:
if hidden_state is None:
initial_states = hidden_state
elif isinstance(hidden_state, tuple):
initial_states = [state.index_select(1, sorting_indices)[:, :num_valid, :]
for state in hidden_state]
else:
initial_states = hidden_state.index_select(1, sorting_indices)[:, :num_valid, :]
else:
initial_states = self._get_initial_states(batch_size, num_valid, sorting_indices)
# Actually call the module on the sorted PackedSequence.
module_output, final_states = module(packed_sequence_input, initial_states)
return module_output, final_states, restoration_indices
def _get_initial_states(self, batch_size, num_valid, sorting_indices):
"""
Returns an initial state for use in an RNN. Additionally, this method handles
the batch size changing across calls by mutating the state to append initial states
for new elements in the batch. Finally, it also handles sorting the states
with respect to the sequence lengths of elements in the batch and removing rows
which are completely padded. Importantly, this `mutates` the state if the
current batch size is larger than when it was previously called.
Parameters
----------
batch_size : ``int``, required.
The batch size can change size across calls to stateful RNNs, so we need
to know if we need to expand or shrink the states before returning them.
Expanded states will be set to zero.
num_valid : ``int``, required.
The batch may contain completely padded sequences which get removed before
the sequence is passed through the encoder. We also need to clip these off
of the state too.
sorting_indices ``torch.LongTensor``, required.
Pytorch RNNs take sequences sorted by length. When we return the states to be
used for a given call to ``module.forward``, we need the states to match up to
the sorted sequences, so before returning them, we sort the states using the
same indices used to sort the sequences.
Returns
-------
This method has a complex return type because it has to deal with the first time it
is called, when it has no state, and the fact that types of RNN have heterogeneous
states.
If it is the first time the module has been called, it returns ``None``, regardless
of the type of the ``Module``.
Otherwise, for LSTMs, it returns a tuple of ``torch.Tensors`` with shape
``(num_layers, num_valid, state_size)`` and ``(num_layers, num_valid, memory_size)``
respectively, or for GRUs, it returns a single ``torch.Tensor`` of shape
``(num_layers, num_valid, state_size)``.
"""
# We don't know the state sizes the first time calling forward,
# so we let the module define what it's initial hidden state looks like.
if self._states is None:
return None
# Otherwise, we have some previous states.
if batch_size > self._states[0].size(1):
# This batch is larger than the all previous states.
# If so, resize the states.
num_states_to_concat = batch_size - self._states[0].size(1)
resized_states = []
# state has shape (num_layers, batch_size, hidden_size)
for state in self._states:
# This _must_ be inside the loop because some
# RNNs have states with different last dimension sizes.
zeros = state.data.new(state.size(0),
num_states_to_concat,
state.size(2)).fill_(0)
zeros = Variable(zeros)
resized_states.append(torch.cat([state, zeros], 1))
self._states = tuple(resized_states)
correctly_shaped_states = self._states
elif batch_size < self._states[0].size(1):
# This batch is smaller than the previous one.
correctly_shaped_states = tuple(state[:, :batch_size, :] for state in self._states)
else:
correctly_shaped_states = self._states
# At this point, our states are of shape (num_layers, batch_size, hidden_size).
# However, the encoder uses sorted sequences and additionally removes elements
# of the batch which are fully padded. We need the states to match up to these
# sorted and filtered sequences, so we do that in the next two blocks before
# returning the state/s.
if len(self._states) == 1:
# GRUs only have a single state. This `unpacks` it from the
# tuple and returns the tensor directly.
correctly_shaped_state = correctly_shaped_states[0]
sorted_state = correctly_shaped_state.index_select(1, sorting_indices)
return sorted_state[:, :num_valid, :]
else:
# LSTMs have a state tuple of (state, memory).
sorted_states = [state.index_select(1, sorting_indices)
for state in correctly_shaped_states]
return tuple(state[:, :num_valid, :] for state in sorted_states)
def _update_states(self, final_states, restoration_indices):
"""
After the RNN has run forward, the states need to be updated.
This method just sets the state to the updated new state, performing
several pieces of book-keeping along the way - namely, unsorting the
states and ensuring that the states of completely padded sequences are
not updated. Finally, it also detatches the state variable from the
computational graph, such that the graph can be garbage collected after
each batch iteration.
Parameters
----------
final_states : ``RnnStateStorage``, required.
The hidden states returned as output from the RNN.
restoration_indices : ``torch.LongTensor``, required.
The indices that invert the sorting used in ``sort_and_run_forward``
to order the states with respect to the lengths of the sequences in
the batch.
"""
new_unsorted_states = [state.index_select(1, restoration_indices) for state in final_states]
if self._states is None:
# We don't already have states, so just set the
# ones we receive to be the current state.
self._states = tuple([torch.autograd.Variable(state.data)
for state in new_unsorted_states])
else:
# Now we've sorted the states back so that they correspond to the original
# indices, we need to figure out what states we need to update, because if we
# didn't use a state for a particular row, we want to preserve its state.
# Thankfully, the rows which are all zero in the state correspond exactly
# to those which aren't used, so we create masks of shape (new_batch_size,),
# denoting which states were used in the RNN computation.
current_state_batch_size = self._states[0].size(1)
new_state_batch_size = final_states[0].size(1)
# Masks for the unused states of shape (1, new_batch_size, 1)
used_new_rows_mask = [(state[0, :, :].sum(-1)
!= 0.0).float().view(1, new_state_batch_size, 1)
for state in new_unsorted_states]
new_states = []
if current_state_batch_size > new_state_batch_size:
# The new state is smaller than the old one,
# so just update the indices which we used.
for old_state, new_state, used_mask in zip(self._states, new_unsorted_states, used_new_rows_mask):
# zero out all rows in the previous state
# which _were_ used in the current state.
masked_old_state = old_state[:, :new_state_batch_size, :] * (1 - used_mask)
# The old state is larger, so update the relevant parts of it.
old_state[:, :new_state_batch_size, :] = new_state + masked_old_state
# Detatch the Variable.
new_states.append(torch.autograd.Variable(old_state.data))
else:
# The states are the same size, so we just have to
# deal with the possibility that some rows weren't used.
new_states = []
for old_state, new_state, used_mask in zip(self._states, new_unsorted_states, used_new_rows_mask):
# zero out all rows which _were_ used in the current state.
masked_old_state = old_state * (1 - used_mask)
# The old state is larger, so update the relevant parts of it.
new_state += masked_old_state
# Detatch the Variable.
new_states.append(torch.autograd.Variable(new_state.data))
# It looks like there should be another case handled here - when
# the current_state_batch_size < new_state_batch_size. However,
# this never happens, because the states themeselves are mutated
# by appending zeros when calling _get_inital_states, meaning that
# the new states are either of equal size, or smaller, in the case
# that there are some unused elements (zero-length) for the RNN computation.
self._states = tuple(new_states)
def reset_states(self):
self._states = None
| [
"ya242@cornell.edu"
] | ya242@cornell.edu |
fd96a58a59e118c00aad2c128be465a88183f0a5 | ac810c7e637afd67cf19704a1a724eaac56fed93 | /Hackerrank_python/13.Regex and Parsing/87.Validating phone numbers.py | ecc9b05424de3018ce6cadfe7dc7fea0d6550838 | [
"MIT"
] | permissive | Kushal997-das/Hackerrank | 57e8e422d2b47d1f2f144f303a04f32ca9f6f01c | 1256268bdc818d91931605f12ea2d81a07ac263a | refs/heads/master | 2021-10-28T06:27:58.153073 | 2021-10-18T04:11:18 | 2021-10-18T04:11:18 | 298,875,299 | 41 | 8 | MIT | 2021-03-01T04:40:57 | 2020-09-26T18:26:19 | Python | UTF-8 | Python | false | false | 667 | py | # Enter your code here. Read input from STDIN. Print output to STDOUT
import re
for i in range(int(input())):
x=input()
if re.match(r"[7,8,9]\d{9}$",x):
print("YES")
else:
print("NO")
'''
[789]\d{9}$
The above statement is validating the number/count of digits is 10 or not also
one digit out of [7,8,9] ==> One digit count done \d means any number of digits but here we used \d{9} which means 9 digits
previously 1 digit and later on 9 digits ==> total 10 digits were counted and validated
if you want to make a phone number with 11 digit validation try below and check so that doubts will get resolved
[789]\d{10}$
'''
| [
"noreply@github.com"
] | Kushal997-das.noreply@github.com |
cba8656d4ef8f3c2f9fb42a65d8fec8aca015e5d | a8124cc8631b436cf6a43a36d453fee4e52cd97f | /Algorithm.Python/DividendAlgorithm.py | 18c63db8476e3140408ff6aba738699a1ebfd1e2 | [
"Apache-2.0"
] | permissive | cyrsis/Lean | e3b9391ea4d62595cb0252b5bc1c7a0adf1b2883 | a2b74cef260ff9cec70a23d26ed046e239fb1f19 | refs/heads/master | 2023-03-03T10:03:40.698662 | 2016-08-17T15:45:37 | 2016-08-17T15:45:37 | 65,962,751 | 0 | 0 | Apache-2.0 | 2023-02-18T08:42:12 | 2016-08-18T03:38:45 | C# | UTF-8 | Python | false | false | 3,662 | py | # QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from clr import AddReference
AddReference("System")
AddReference("QuantConnect.Algorithm")
AddReference("QuantConnect.Common")
from System import *
from QuantConnect import *
from QuantConnect.Algorithm import *
from QuantConnect.Brokerages import *
from QuantConnect.Data import *
from QuantConnect.Data.Market import *
from QuantConnect.Orders import *
class DividendAlgorithm(QCAlgorithm):
'''Showcases the dividend and split event of QCAlgorithm
The data for this algorithm isn't in the github repo, so this will need to be run on the QC site'''
def Initialize(self):
'''Initialise the data and resolution required, as well as the cash and start-end dates for your algorithm. All algorithms must initialized.'''
self.SetStartDate(1998,01,01) #Set Start Date
self.SetEndDate(2006,01,21) #Set End Date
self.SetCash(100000) #Set Strategy Cash
# Find more symbols here: http://quantconnect.com/data
self.AddSecurity(SecurityType.Equity, "MSFT", Resolution.Daily)
self.Securities["MSFT"].SetDataNormalizationMode(DataNormalizationMode.Raw)
# this will use the Tradier Brokerage open order split behavior
# forward split will modify open order to maintain order value
# reverse split open orders will be cancelled
self.SetBrokerageModel(BrokerageName.TradierBrokerage)
def OnData(self, data):
'''OnData event is the primary entry point for your algorithm. Each new data point will be pumped in here.
Arguments:
data: Slice object keyed by symbol containing the stock data
'''
if self.Transactions.OrdersCount == 0:
self.SetHoldings("MSFT", .5)
# place some orders that won't fill, when the split comes in they'll get modified to reflect the split
self.Debug("Purchased Stock: {0}".format(self.Securities["MSFT"].Price))
self.StopMarketOrder("MSFT", -self.CalculateOrderQuantity("MSFT", .25), data["MSFT"].Low/2)
self.LimitOrder("MSFT", -self.CalculateOrderQuantity("MSFT", .25), data["MSFT"].High*2)
for kvp in data.Dividends: # update this to Dividends dictionary
symbol = kvp.Key
value = kvp.Value.Distribution
self.Log("{0} >> DIVIDEND >> {1} - {2} - {3} - {4}".format(self.Time, symbol, value, self.Portfolio.Cash, self.Portfolio["MSFT"].Price))
for kvp in data.Splits: # update this to Splits dictionary
symbol = kvp.Key
value = kvp.Value.SplitFactor
self.Log("{0} >> SPLIT >> {1} - {2} - {3} - {4}".format(self.Time, symbol, value, self.Portfolio.Cash, self.Portfolio["MSFT"].Quantity))
def OnOrderEvent(self, orderEvent):
# orders get adjusted based on split events to maintain order value
order = self.Transactions.GetOrderById(orderEvent.OrderId)
self.Log("{0} >> ORDER >> {1}".format(self.Time, order)) | [
"alexandre.catarino@gmail.com"
] | alexandre.catarino@gmail.com |
e2717ce4f14438d964c014636ebc6f251d3ece45 | 84350c2c396ecc0f0a786cdb9a40664f171723c4 | /home/views.py | 394be12ec44df2f2795e2512d9a5fcf684d01732 | [] | no_license | henry-alakazhang/seng3011 | cd28d510ba84133aba6c026568ba30b7bd5ea278 | 262791cd13953561c2153cefb5b0ce7315a52eec | refs/heads/master | 2021-05-01T11:48:32.975637 | 2016-04-19T22:38:37 | 2016-04-19T22:38:37 | 54,078,789 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 708 | py | from django.shortcuts import render
from django.http import HttpResponse, JsonResponse
def index(request):
return render(request, 'home/home.html', {'home':True});
def about(request):
return render(request, 'home/about.html', {'about':True});
def api_home(request):
return render(request, 'home/api.html', {'api':True});
def api_versions(request):
return render(request, 'home/api_versions.html', {'api':True});
def api_docs(request):
return render(request, 'home/api_docs.html', {'api':True});
def api_bugs(request):
return render(request, 'home/api_bugs.html', {'api':True});
def analytics_home(request):
return render(request, 'home/analytics.html', {'analytics':True}); | [
"henry.alakazhang@gmail.com"
] | henry.alakazhang@gmail.com |
74ade52a5e14e97314cb7c841a48003edc95c201 | d11c6b6b9762acda60fc094b7e8ad2754fd9b700 | /tensorflow/python/eager/run_eager_op_as_function_test.py | f3b46f6d616f7268bd9249d8f2704de918b11722 | [
"MIT",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | jprosser/tensorflow | 28ba58a811ac92e0e15aab518512cd46fbef90ea | c3f73a5f946c63b238f5412717f6aae4d4d6bf4b | refs/heads/master | 2021-10-07T16:53:58.923779 | 2021-10-01T17:23:25 | 2021-10-01T17:28:00 | 219,566,947 | 0 | 1 | Apache-2.0 | 2019-11-04T18:16:54 | 2019-11-04T18:16:53 | null | UTF-8 | Python | false | false | 6,445 | py | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for wrapping an eager op in a call op at runtime."""
import time
from tensorflow.python.data.experimental.ops import prefetching_ops
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import benchmarks_test_base
from tensorflow.python.eager import context
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import critical_section_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_map_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.util import tf_inspect
def run_benchmark(func, num_iters, unused_execution_mode):
# warm up
func()
start = time.time()
for _ in range(num_iters):
func()
end = time.time()
return end - start
CPU = "/device:CPU:0"
GPU = "/device:GPU:0"
# TODO(srbs): Why can't we use absl parameterized here?
@test_util.with_eager_op_as_function(only_as_function=True)
class MicroBenchmarks(benchmarks_test_base.MicroBenchmarksBase):
def __init__(self):
super().__init__()
self._m_2_by_2 = random_ops.random_uniform((2, 2))
self._m_100_by_100 = random_ops.random_uniform((100, 100))
self._m_1000_by_1000 = random_ops.random_uniform((1000, 1000))
def _get_benchmark_name(self):
"""Copied from benchmarks_test.py."""
stack = tf_inspect.stack()
name = None
for frame in stack[::-1]:
f_locals = frame[0].f_locals
f_self = f_locals.get("self", None)
if isinstance(f_self, test.Benchmark):
name = frame[3] # Get the method name
# This is a hack to get around the fact that some methods might have a
# disable_tfrt decorator around them. In that case a function called
# 'decorated' wraps the real called function underneath and so we
# peek one deeper into the stack to get the real name.
if name == "decorated":
continue
else:
break
if name is None:
raise ValueError("Unable to determine calling Benchmark function.")
if context.is_tfrt_enabled():
name = name + "_tfrt"
return name
def _run(self, func, num_iters):
self.run_report(run_benchmark, func, num_iters)
def _benchmark_matmul(self, mat, device):
if device == GPU and not context.num_gpus():
return
with context.device(device):
if device == GPU:
mat = mat.gpu()
func = lambda: math_ops.matmul(mat, mat)
self._run(func, num_iters=1000)
def benchmark_tf_matmul_2_by_2_CPU(self):
self._benchmark_matmul(self._m_2_by_2, CPU)
def benchmark_tf_matmul_2_by_2_GPU(self):
self._benchmark_matmul(self._m_2_by_2, GPU)
def benchmark_tf_matmul_100_by_100_CPU(self):
self._benchmark_matmul(self._m_100_by_100, CPU)
def benchmark_tf_matmul_100_by_100_GPU(self):
self._benchmark_matmul(self._m_100_by_100, GPU)
def benchmark_tf_matmul_1000_by_1000_CPU(self):
self._benchmark_matmul(self._m_1000_by_1000, CPU)
def benchmark_tf_matmul_1000_by_1000_GPU(self):
self._benchmark_matmul(self._m_1000_by_1000, GPU)
@test_util.with_eager_op_as_function(only_as_function=True)
class RunEagerOpAsFunctionTest(test.TestCase):
def setUp(self):
super().setUp()
self._m_2_by_2 = random_ops.random_uniform((2, 2))
def testDefaultAttrValues(self):
ragged_map_ops.map_fn(
fn=lambda x: x,
elems=ragged_factory_ops.constant([[7]]),
dtype=ragged_tensor.RaggedTensorType(dtype=dtypes.int32, ragged_rank=1))
def testArrayFill(self):
array_ops.fill(
constant_op.constant([2], dtype=dtypes.int64), constant_op.constant(1))
def testDatasetMap(self):
# When a GPU is available, this would test that the wrapped call ops are
# placed on the CPU (i.e. the device is selected using the unwrapped op).
dataset_ops.Dataset.range(2).map(math_ops.square)
def testPrefetchToDevice(self):
if not context.num_gpus():
self.skipTest("No GPU available")
dataset = dataset_ops.Dataset.range(10)
dataset = dataset.apply(prefetching_ops.prefetch_to_device("/gpu:0"))
def testMatmul(self):
math_ops.matmul(self._m_2_by_2, self._m_2_by_2)
def testMixedTypeListInputFastPath(self):
array_ops.identity_n([self._m_2_by_2, self._m_2_by_2])
def testMixedTypeListInputEagerFallback(self):
array_ops.identity_n([1, 1])
def testMixedTypeListInputFastPathDifferentArity(self):
# This tests that the FunctionDef cache key contains the number of args.
array_ops.identity_n([self._m_2_by_2, self._m_2_by_2])
array_ops.identity_n([self._m_2_by_2, self._m_2_by_2, self._m_2_by_2])
def testMixedTypeListInputEagerFallbackDifferentArity(self):
array_ops.identity_n([1, 1])
array_ops.identity_n([1, 1, 1])
def testSingleTypeListFastPath(self):
array_ops.concat([self._m_2_by_2, self._m_2_by_2], axis=-1)
def testSingleTypeListEagerFallback(self):
array_ops.concat([[1], [2]], axis=-1)
def testSingleTypeListFastPathDifferentArity(self):
array_ops.concat([self._m_2_by_2, self._m_2_by_2], axis=-1)
array_ops.concat([self._m_2_by_2, self._m_2_by_2, self._m_2_by_2], axis=-1)
def testSingleTypeListEagerFallbackDifferentArity(self):
array_ops.concat([[1], [2]], axis=-1)
array_ops.concat([[1], [2], [3]], axis=-1)
def testCreateCriticalSection(self):
cs = critical_section_ops.CriticalSection(shared_name="cs")
cs.execute(lambda: 1.0)
if __name__ == "__main__":
test.main()
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
a283863473df2bd22b85733056c19f607a424224 | 9f2cdd1b711209617a4ffd3cde30ebc2630119d7 | /pay.py | c9dd46a2c0fbc06b1bc54d2858ce50c1da5f4340 | [] | no_license | Abhaskar1/E-Wallet | fba879728359843b11b9a9c15b5840c4451169af | a7e59c8ecd77fe50c2c41c59a952d9689899983a | refs/heads/master | 2022-06-09T16:11:52.189190 | 2020-05-08T16:59:30 | 2020-05-08T16:59:30 | 260,440,409 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,579 | py | import tkinter as tk
import pymysql
from datetime import datetime
def money():
sql_query="""UPDATE users SET balance = %s where id = %s"""
sql_q2="""SELECT phone FROM users WHERE phone = %s"""
sql_q3="""UPDATE users SET balance = %s where phone = %s"""
sql_q4="""SELECT * FROM users where phone = %s"""
a1=str(v1.get())
a2=str(v2.get())
a3=str(v4.get())
#print(a3)
found=0
flag=1
#print(a2)
bal=0
if a1=='' or a2=='' or a3=='' or len(a3)!=10:
v3.set("Please Enter Correct/Complete Details")
else:
conn=pymysql.connect(host='localhost',port=3306,user='root',passwd='',db='wallet')
cur=conn.cursor()
#print("connected")
try:
val=(a3)
#print(val)
val=cur.execute(sql_q2,val)
print(val)
#print("DONE")
if(val>0):
found=1
val=(a3)
print(a3)
balNew=cur.execute(sql_q4,a3)
#print(balNew)
records=cur.fetchall()
#print(records)
for row in records:
balNew=row[5]
#print(balNew)
balNew=balNew+int(a2)
val=(balNew,a3)
#print(val)
cur.execute(sql_q3,val)
#print("Inside if")
if(found==0):
v3.set("Number not registered")
else:
cur.execute("SELECT * FROM users where id='"+a1 +"'")
for r1 in cur:
#print(r1)
bal=r1[5]
#print(bal)
bal2=bal-int(a2)
if(bal2<0):
v3.set("Not Enough Balance")
flag=0
#print(bal)
#print(bal2)
if(flag==1 and found==1):
phone=r1[4]
bal2=str(bal2)
val=(bal2,a1)
#print(val)
cur.execute(sql_query,val)
now = datetime.now()
print(now)
formatted_date = now.strftime('%Y-%m-%d %H:%M:%S')
print(formatted_date)
cur.execute('insert into transactions(sender,reciever,amount,date) values(%s,%s,%s,%s)',(phone,a3,a2,formatted_date))
conn.commit()
v3.set("Paid")
except:
conn.rollback()
v3.set("something error")
root=tk.Tk()
root.title("Recharge")
root.geometry("400x400")
v1=tk.StringVar()
v2=tk.StringVar()
v3=tk.StringVar()
v4=tk.StringVar()
tk.Label(root,text="ID",fg="red",bg="yellow").grid(row=1,column=0)
tk.Entry(root,text="",textvariable=v1).grid(row=1,column=1)
tk.Label(root,text="Amount",fg="red",bg="yellow").grid(row=2,column=0)
tk.Entry(root,text="",textvariable=v2).grid(row=2,column=1)
tk.Label(root,text="Pay To",fg="red",bg="yellow").grid(row=3,column=0)
tk.Entry(root,text="",textvariable=v4).grid(row=3,column=1)
tk.Button(root,text="Pay",command=money).grid(row=4,column=1)
tk.Label(root,text="",fg="red",bg="yellow",textvariable=v3).grid(row=5,column=0)
| [
"noreply@github.com"
] | Abhaskar1.noreply@github.com |
8a5b27c101b4341b7aea51cf777cc73c13e5a648 | bda25b9d65e3833bd51063060e7cd41945d8d04d | /invent/migrations/0015_auto_20191117_1248.py | 268f9113e301437eaf5fb4c8ac30efe312b5edf7 | [] | no_license | rajathongal/Inventory_Management_sys | e770824eb0e6f3f3df7795c0abe8a173b1cdcd6f | 46c2d3fc4c31d4747851dcdfa39d62e8fee7ff9c | refs/heads/master | 2022-06-14T17:10:47.516041 | 2020-04-25T11:15:28 | 2020-04-25T11:15:28 | 258,756,404 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 701 | py | # Generated by Django 2.1.2 on 2019-11-17 07:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('invent', '0014_auto_20191117_1245'),
]
operations = [
migrations.AlterField(
model_name='mobile',
name='fill',
field=models.CharField(choices=[('HuaweiP', 'HuaweiP20'), ('Huawei', 'HuaweiY9Prime2019')], default='HuaweiP20', max_length=10),
),
migrations.AlterField(
model_name='mobile',
name='status',
field=models.CharField(choices=[('HuaweiP', 'HuaweiP'), ('Huawei', 'Huawei')], default='HuaweiP', max_length=10),
),
]
| [
"reachme.rajathongal@gmail.com"
] | reachme.rajathongal@gmail.com |
6ae9ab1d798145631327842348ba8d675bea0e3e | 6226d27a844400989d7320fe8041599839ae9628 | /yukawa/chap3/nlp_20.py | fac49a6eabd451995d4692b096f1776153a34721 | [] | no_license | lrlab-nlp100/nlp100 | cb73e996b36a439ae279c99e5463e63678188271 | 65bfb4e5fdc42a655043c9d531e9f7bc10f82ed8 | refs/heads/master | 2021-01-19T23:33:07.483184 | 2017-06-29T03:47:46 | 2017-06-29T03:47:46 | 88,996,166 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 392 | py | from collections import Counter
import re
import json
import gzip
with gzip.open('jawiki-country.json.gz','r') as f:
for line in f:
json_str = line.decode('utf-8')
obj = json.loads(json_str)
if obj['title'] == 'イギリス':
En_str = obj['text']
break;
with open('English.txt','w',encoding='utf-8') as g:
g.write(En_str)
print(En_str)
| [
"f.777.aiueo.777@hotmail.co.jp"
] | f.777.aiueo.777@hotmail.co.jp |
aeb857bfb5a30a465110656ef8c80828321583c6 | aaa79e1992c9ea57a80f8bba98e2251cc2814cc3 | /project/final/gain_ratio.py | b4aa3c65357821eef4cdc76a34374419dc238b5b | [] | no_license | Xin-Cheng/CS412 | abcba546383af359290035d8e231e2d601f949a9 | 252662ac1b9c527eaad31de3cff1a75a1790b62f | refs/heads/master | 2021-03-30T16:31:09.316692 | 2017-04-29T06:27:27 | 2017-04-29T06:27:27 | 83,969,661 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,101 | py | import numpy as np
import pandas as pd
import pickle
from math import *
from numpy import *
from collections import deque
class Decision_Tree:
def __init__(self, name, condition, is_label):
self.name = name # name of each tree node
self.condition = condition
self.constraint = None
if condition is None or is_label:
num_of_children = 1
else:
num_of_children = 2 if isinstance(condition, int) or isinstance(condition, float) else len(condition)
self.children = [None]*num_of_children
def set_constraint(self, constraint):
self.constraint = constraint
# load data and preprocess
def preprocess():
# load data
users = pd.read_csv('data/user.txt')
movies = pd.read_csv('data/movie.txt')
train = pd.read_csv('data/train.txt')
test = pd.read_csv('data/test.txt')
# preprocessing data
fill_na(users)
fill_na(movies)
# training data
user_train = pd.merge(users, train, how='inner', left_on='ID', right_on='user-Id')
whole_train_data = pd.merge(user_train, movies, how='inner', left_on='movie-Id', right_on='Id')
train_data = whole_train_data[['Gender', 'Age', 'Occupation', 'Year', 'Genre', 'rating']]
# build decision tree
root = Decision_Tree('root', None, False)
build_decision_tree(train_data, root)
pickle.dump( root, open( 'gain_ratio.p', 'wb' ) )
my_tree = pickle.load( open( 'gain_ratio.p', 'rb' ) )
# test data
user_test = pd.merge(users, test, how='inner', left_on='ID', right_on='user-Id')
whole_test_data = pd.merge(user_test, movies, how='inner', left_on='movie-Id', right_on='Id')
test_data = whole_test_data[['Id_x', 'Gender', 'Age', 'Occupation', 'Year', 'Genre']]
test_data = test_data.rename(index=str, columns={'Id_x': 'Id'})
predict(test_data, my_tree)
def predict(test_data, decision_tree):
test_data['rating_str'] = ''
queries = build_queries(decision_tree)
for q in queries:
exec(q)
rating = []
for index, row in test_data.iterrows():
votes = array(map(int, list(row['rating_str'])))
rating.append(bincount(votes).argmax())
test_data['rating'] = rating
result = test_data[['Id', 'rating']]
result.sort(['rating'], inplace = True)
result.to_csv('gain_ratio_prediction.csv',index=False)
def build_queries(decision_tree):
queries = []
prefix = 'test_data.loc['
suffix = ', "rating_str"]= test_data["rating_str"] + '
node_list = deque([])
node_list.append(decision_tree)
while node_list:
curr_node = node_list.popleft()
if curr_node.name == 'label' and curr_node.constraint is not None:
queries.append(prefix + curr_node.constraint + suffix + '\"' + str(curr_node.condition) + '\"')
for node in curr_node.children:
if node is not None:
node_list.append(node)
return queries
# find split feature according to information gain
def find_split(train_data, all_info):
size = train_data.groupby('rating').size().shape[0]
if size == 1:
return Decision_Tree('label', train_data['rating'].tolist()[0], True)
# go for majority vote
elif train_data.shape[1] == 1:
return Decision_Tree('label', bincount(train_data['rating']).argmax(), True)
# if there is only one group
elif train_data.shape[1] == 2:
num_of_group = train_data.groupby(list(train_data)[0]).size().shape[0]
if num_of_group == 1:
return Decision_Tree('label', bincount(train_data['rating']).argmax(), True)
# find split feature
# calculate infomation of each feature
feature_names = list(train_data)
information = zeros(len(feature_names) - 1)
information_split = zeros([len(feature_names) - 1, 2])
for i in range(0, len(feature_names) - 1):
if feature_names[i] == 'Gender':
information[i] = discrete_information(train_data, feature_names[i], all_info)
elif feature_names[i] == 'Genre':
information[i] = combined_discrete_info(train_data, feature_names[i], all_info)
else:
info, split = continuous_info(train_data, feature_names[i], all_info)
information_split[i, :] = [info, split]
information[i] = info
# choose the feature with lowest infomation as current tree node
node_name = feature_names[argmax(information)]
if node_name == 'Gender':
condition = ['M', 'F']
elif node_name == 'Genre':
condition = unique(('|'.join(train_data[node_name].unique())).split('|'))
else:
condition = information_split[argmax(information)][1]
return Decision_Tree(node_name, condition, False)
# build decision tree
def build_decision_tree(train_data, tree_root):
if tree_root.condition is None:
all_info = entropy(train_data)
tree_root.children[0] = find_split(train_data, all_info)
build_decision_tree(train_data, tree_root.children[0])
elif tree_root.name == 'label':
return
else:
condition = tree_root.condition
name = tree_root.name
prev_constraint = tree_root.constraint + ' & ' if tree_root.constraint is not None else ''
if name != 'Genre':
left = (train_data[train_data[name] <= condition] if name != 'Gender' else train_data.groupby('Gender').get_group('M')).drop(name, axis=1)
right = (train_data[train_data[name] > condition] if name != 'Gender' else train_data.groupby('Gender').get_group('F')).drop(name, axis=1)
left_info = entropy(left)
right_info = entropy(right)
tree_root.children[0] = find_split(left, left_info)
tree_root.children[1] = find_split(right, right_info)
if name != 'Gender':
tree_root.children[0].set_constraint(prev_constraint + '(test_data[\"' + name + '\"]' + '<=' + str(condition) + ')')
tree_root.children[1].set_constraint(prev_constraint + '(test_data[\"' + name + '\"]' + '>' + str(condition) + ')')
else:
tree_root.children[0].set_constraint(prev_constraint + '(test_data["Gender"] == \"M\")')
tree_root.children[1].set_constraint(prev_constraint + '(test_data["Gender"] == \"F\")')
build_decision_tree(left, tree_root.children[0])
build_decision_tree(right, tree_root.children[1])
else:
for i in range(len(condition)):
group = (train_data[train_data['Genre'].str.contains(condition[i])]).drop(name, axis=1)
info = entropy(group)
tree_root.children[i] = find_split(group, info)
tree_root.children[i].set_constraint(prev_constraint + '(test_data[\"' + name + '\"]' + '.str.contains(' + '\"' + condition[i] + '\")' + ')')
build_decision_tree(group, tree_root.children[i])
# calculate continuous feature, 'Age', 'Occupation', and 'Year' in this project
def continuous_info(train_data, f_name, all_info):
size = train_data.shape[0]
features = train_data[f_name].unique()
sorted_features = sort(features)
split_info = zeros(len(sorted_features) - 1, dtype=float)
split_points = zeros(len(sorted_features) - 1, dtype=float)
# find split point
for i in range(len(sorted_features) - 1):
split = (sorted_features[i] + sorted_features[i + 1])/2
split_points[i] = split
left = train_data[train_data[f_name] <= split]
right = train_data[train_data[f_name] > split]
pr_left = float(left.shape[0])/size
pr_right = float(right.shape[0])/size
info = all_info - (entropy(left)*(pr_left) + entropy(right)*(pr_right))
split_inf = -pr_left*log2(pr_left) - pr_right*log2(pr_right)
split_info[i] = info/split_inf
max_split = argmax(split_info)
return split_info[max_split], split_points[max_split]
# calculate combined discrete feature, genre in this project
def combined_discrete_info(train_data, f_name, all_info):
size = train_data.shape[0]
# get distinct genres
genres_str = '|'.join(train_data['Genre'].unique())
genres = np.unique(genres_str.split('|'))
# calculate entropy of each distinct value
counts = zeros(len(genres), dtype=float)
eps = zeros(len(genres))
for i in range(len(genres)):
group = train_data[train_data['Genre'].str.contains(genres[i])]
counts[i] = group.shape[0]
eps[i] = entropy(group)
group_probability = (counts/size)/sum(counts/size)
info = all_info - dot(group_probability, eps)
split_info = info/dot(group_probability, log2(group_probability))
return -split_info
# calculate information of discrete feature, gender in this project
def discrete_information(train_data, f_name, all_info):
size = train_data.shape[0]
# calculate the probability of each distinct value of this feature
groups = train_data.groupby(f_name)
counts = groups.size().reset_index(name='count')
group_probability = array(counts['count'], dtype=float)/size
# calculate entropy of each distinct value
distinct_names = train_data[f_name].unique()
eps = zeros(len(distinct_names))
for i in range(len(distinct_names)):
eps[i] = entropy(groups.get_group(distinct_names[i]))
info = all_info - dot(group_probability, eps)
split_info = info/dot(group_probability, log2(group_probability))
return -split_info
# calculate entropy
def entropy(group):
size = group.shape[0]
groups = group.groupby('rating').size().reset_index(name='count')
ratings = array(groups['rating'])
counts = array(groups['count'], dtype=float)
probabilities = counts/size
log_probabilities = -log2(probabilities)
entropy = dot(probabilities, log_probabilities)
return entropy
# assign the most common value of the attribute to missing values
def fill_na(dataframe):
for column in dataframe:
dataframe[column].fillna(value=dataframe[column].value_counts().idxmax(), inplace = True)
def main():
preprocess()
if __name__ == "__main__":
main() | [
"x_cheng@outlook.com"
] | x_cheng@outlook.com |
aabb0f0e8ff633a07728e3ac6a5e69a580bc1ed2 | e1d11eadd56ed94d6bfc5b9a72d00b3482c99121 | /courses/models.py | e521c9bc85bc8ac8962aa9ee3f2904ce0bdf8189 | [] | no_license | Gabkings/django-elearning-app | 0296f8353daa9204c5128481fcd6a237d205fd81 | def573b74ddb3d3732632550cb6d5157329f0388 | refs/heads/master | 2020-09-17T16:34:10.318126 | 2019-11-26T04:32:55 | 2019-11-26T04:32:55 | 224,102,662 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,110 | py | from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Subject(models.Model):
title = models.CharField(max_length=200)
slug = models.SlugField(max_length=200, unique=True)
class Meta:
ordering = ['title']
def __str__(self):
return self.title
class Course(models.Model):
owner = models.ForeignKey(User,
related_name='course_created',
on_delete=models.CASCADE)
subject = models.ForeignKey(Subject,
related_name='courses',on_delete=models.CASCADE)
title = models.CharField(max_length=200)
slug = models.SlugField(max_length=200, unique=True)
overview = models.TextField()
created = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ['-created']
def __str__(self):
return self.title
class Module(models.Model):
course = models.ForeignKey(Course,
related_name='modules',
on_delete=models.CASCADE)
title = models.CharField(max_length=200)
description = models.TextField(blank=True)
def __str__(self):
return self.title
| [
"gabworks51@gmail.com"
] | gabworks51@gmail.com |
13e7ec5bd8bcba85d483fc56b614ad0ff0f33428 | f0a5ad7b8aa39f51f233391fead0da3eabecc4ee | /.history/toolbox/sheets_20191128125946.py | 6e2ba795be9d240993ce111520e995cf4f16a0c6 | [] | no_license | OseiasBeu/webScrapping | e0a524847e55b24dbbd3d57bbe7fa43b4e101f48 | 1e72c7551aea355a891043baecfcbab8a89e719a | refs/heads/master | 2022-10-25T18:12:50.858653 | 2020-06-18T01:29:24 | 2020-06-18T01:29:24 | 224,681,550 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,883 | py | from __future__ import print_function
import pickle
import os.path
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
import numpy as np
import gspread
def insertPlanMiddleware(rows):
# If modifying these scopes, delete the file token.pickle.
SCOPES = ['https://www.googleapis.com/auth/spreadsheets']
SAMPLE_SPREADSHEET_ID = '1QSGAY_WyamEQBZ4ITdAGCVAbavR9t-D-4gPQx4Sbf7g'
SAMPLE_RANGE_NAME = 'middleware'
"""Shows basic usage of the Gmail API.
Lists the user's Gmail labels.
"""
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'C:\\Users\\beuo\\Documents\\Demandas\\AtualizaMiddleIntegrationVtex\\toolbox\\credentials.json', SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open('token.pickle', 'wb') as token:
pickle.dump(creds, token)
service = build('sheets', 'v4', credentials=creds)
sheet = service.spreadsheets()
# result = sheet.values().get(spreadsheetId=SAMPLE_SPREADSHEET_ID,
# range=SAMPLE_RANGE_NAME).execute()
# values = result.get('values', [])
range_ = SAMPLE_RANGE_NAME
value_input_option = 'RAW'
include_Values_In_Response = True
insert_data_option = 'INSERT_ROWS'
linhas = np.asarray(rows)
# print(linhas)
linhas_corrigidas = []
uma_linha = []
for row in linhas:
p0 = row[0]
p1 = row[1]
p2 = row[2]
p3 = row[3]
uma_linha.append(p3)
uma_linha[0] = now
uma_linha.append(p0)
uma_linha.append(p1)
uma_linha.append(p2)
linhas_corrigidas.append(uma_linha)
uma_linha = []
print(linhas_corrigidas)
value_range_body = {
"majorDimension": "ROWS",
"range": "",
"values": linhas_corrigidas
}
request = service.spreadsheets().values().append(spreadsheetId=SAMPLE_SPREADSHEET_ID, range=range_, valueInputOption=value_input_option, insertDataOption=insert_data_option, body=value_range_body, includeValuesInResponse = include_Values_In_Response)
response = request.execute()
print(response)
# print(values)
| [
"oseiasbeu@outlook.com"
] | oseiasbeu@outlook.com |
7052235f3eb337f9448428b30966f5fff59e23db | 7cda779a723bdeefaf6489da7d7783ccd1120f62 | /pp1cs18ex02/test.py | 88294203a80dff5ba52c8fc04275371b73a2bb16 | [] | no_license | nvnvashisth/Protein_Prediction | 15d934fb972ede69cd6586edb797b71f2368828c | 3cd2d337950ddb0d1355c834b922aafde1ea1237 | refs/heads/master | 2020-05-25T17:37:48.790830 | 2019-05-21T21:00:37 | 2019-05-21T21:00:37 | 187,912,598 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,176 | py | import sys
import re
import string
from collections import Counter
fasta = []
test = []
count = 0
with open("tests.fasta") as file_one:
for line in file_one:
count=count+1
if(count < 20):
line = line.strip()
if not line:
continue
if line.startswith(">"):
active_sequence_name = line[1:]
if active_sequence_name not in fasta:
test.append(''.join(fasta))
fasta = []
continue
fasta.append(line)
else:
break
print(count)
if fasta:
test.append(''.join(fasta))
test = test[1:]
removetable = str.maketrans('', '', '*')
out_list = [s.translate(removetable) for s in test]
#print(out_list)
check = []
for index, seq in enumerate(out_list):
print("Read %d Sequence"%index)
if(index < 100000):
for j in range(0, len(seq)):
if(j<len(seq)-3):
check.append(seq[j]+seq[j+1]+seq[j+2]+seq[j+3])
with open("output.txt",'w') as f:
print('Filename:', Counter(check), file=f)
#print(Counter(check)) | [
"nitin@Nitins-MacBook-Pro.local"
] | nitin@Nitins-MacBook-Pro.local |
93615a5b7ca2a53756a14fe3d568a1233a68318d | 644e93ab8ca24734f99ef91e0ef6ace9e232a517 | /Machine-Learning/naive_bayes/nb_author_id.py | 200e531c33e1addfd01572995be7d0f7adef9bc7 | [] | no_license | yennanliu/udacity-nanodegree-data-analyst | 9d8d9a22fb9bbfa00b2f4e5458a8ccbba5050d7b | 1821665b9c608d13e13ae87640816ff68bd9f21d | refs/heads/master | 2021-01-17T23:19:07.524073 | 2015-04-05T20:30:29 | 2015-04-05T20:30:29 | 46,547,159 | 1 | 1 | null | 2015-11-20T07:41:19 | 2015-11-20T07:41:19 | null | UTF-8 | Python | false | false | 1,186 | py | #!/usr/bin/python
"""
this is the code to accompany the Lesson 1 (Naive Bayes) mini-project
use a Naive Bayes Classifier to identify emails by their authors
authors and labels:
Sara has label 0
Chris has label 1
"""
import sys
from time import time
sys.path.append("../tools/")
from email_preprocess import preprocess
### features_train and features_test are the features for the training
### and testing datasets, respectively
### labels_train and labels_test are the corresponding item labels
features_train, features_test, labels_train, labels_test = preprocess()
#########################################################
### your code goes here ###
from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import MultinomialNB
t0 = time()
nb = GaussianNB()
print "Creating Naive Bayes:", round(time()-t0, 3), "s"
t0 = time()
nb.fit(features_train, labels_train)
print "trining naive bayes:", round(time()-t0, 3), "s"
t0=time()
y = nb.predict(features_test)
print "predicting naive bayes:", round(time()-t0, 3), "s"
t0 = time()
print nb.score(features_test, labels_test)
print "scoring naive bayes:", round(time()-t0,3), "s" | [
"orenov@codeminders.com"
] | orenov@codeminders.com |
617dedf006ab85fbf08f9e3961c99c013a7b7d22 | 9eaa2c64a777bd24a3cccd0230da5f81231ef612 | /study/1905/month01/code/Stage3/day18/mysite4/bookstore/admin.py | a90113c19ca8e6a6e29470fcf94152edf2850ee4 | [
"MIT"
] | permissive | Dython-sky/AID1908 | 4528932f2ca66b844d8a3fcab5ed8bf84d20eb0c | 46cd54a7b36b5f009974f2bbb7005a4ad440ca1a | refs/heads/master | 2022-04-14T12:23:30.426270 | 2020-04-01T18:05:19 | 2020-04-01T18:05:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 773 | py | from django.contrib import admin
# Register your models here.
# file:bookstore/admin.py
from . import models
class BookManager(admin.ModelAdmin):
list_display = ['id','title','pub','price','market_price']
list_display_links = ['id','title']
list_filter = ['pub']
search_fields = ['title','pub']
list_editable = ['market_price']
admin.site.register(models.Book,BookManager)
class AuthorManager(admin.ModelAdmin):
list_display = ['id','name','age','email']
list_display_links = ['id','name','age','email']
list_filter = ['age']
search_fields = ['name']
admin.site.register(models.Author,AuthorManager)
class WifeManager(admin.ModelAdmin):
list_display = ['id','name','author']
admin.site.register(models.Wife,WifeManager) | [
"dong_1998_dream@163.com"
] | dong_1998_dream@163.com |
4a50e5dda4de4a0ff20b6832581088f8feb31a5d | a8139ccd50a27861d3c5a4168fd0e4b351c0a514 | /material/code/ds/stack_oop.py | 782427639945b056f919130e5ad4f50d33cecd31 | [] | no_license | shambhand/pythontraining | a124aa1485c3ce0e589fc2cd93c1e991746432e4 | 24dd923e2b2c07c70500775e3665e2a527240329 | refs/heads/master | 2021-05-17T22:54:45.331127 | 2019-01-11T03:12:59 | 2019-01-11T03:12:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 768 | py | #! /usr/local/bin/python3
class Stack:
def __init__(self):
self.lst = []
def push (self, element):
self.lst.append (element)
def pop (self):
if len (self.lst) == 0:
raise ValueError ("pop:stack empty")
return self.lst.pop ()
def examine_stack_top (self):
if len (self.lst) == 0:
raise ValueError ("est:stack empty")
return self.lst[len (self.lst) - 1]
def is_stack_empty (self):
return len (self.lst) == 0
def main ():
st1 = Stack ()
st1.push (10)
st1.push (20)
st1.push (30)
print ("st1:top:", st1.examine_stack_top ())
print ("st1.pop1:", st1.pop ())
print ("st1.pop2:", st1.pop ())
print ("st1.pop3:", st1.pop ())
print ("st1.is_stack_empty:", st1.is_stack_empty ())
print ("st1.pop4:", st1.pop ())
main ()
| [
"amit2766@gmail.com"
] | amit2766@gmail.com |
96aedd41e9935efbfa1f872685834590bc3d7830 | 35fbdc8288a086bda423709580b42a0214789947 | /repo/database.py | 5150331602325c01f435bea6e57fa990f0d89627 | [] | no_license | arvindbasant/telegram-sync | 37fa843c629bfd1ee09a9ae31bb4caa9abe670e2 | 7323b2f6a10bd7054bb1b9818e8e149a42f52b57 | refs/heads/master | 2022-11-26T20:29:53.820497 | 2020-07-22T13:53:44 | 2020-07-22T13:53:44 | 270,522,934 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 748 | py | import pyodbc
import os
from utils import logger
logger = logger.get_logger(__name__)
def connect():
mssql_server = os.environ['MSSQL_SERVER']
mssql_port = os.environ['MSSQL_PORT']
mssql_database = os.environ['MSSQL_DATABASE']
mssql_username = os.environ['MSSQL_USERNAME']
mssql_password = os.environ['MSSQL_PASSWORD']
driver = '/usr/lib/libtdsodbc.so'
try:
conn = pyodbc.connect(
'DRIVER=' + driver + ';SERVER=' + mssql_server + ';PORT=' + mssql_port + ';DATABASE=' + mssql_database + ';UID=' + mssql_username + ';PWD=' + mssql_password + '')
except Exception as e:
logger.error("Database Connection error %s", e)
else:
cursor = conn.cursor()
return conn, cursor
| [
"Arvind.Kumar@mrcooper.com"
] | Arvind.Kumar@mrcooper.com |
80ade2a316255dd197b2cbae465d3246784f7290 | 29deebaee19131b8a4eb05f5f628df0ebb260d08 | /test/social2/FM_addOrCancelAttention.py | 780c04c2f04c63cd8377cd28fc7bfd7dea95509c | [] | no_license | cccthon/webApiTest | d5843f55712499580557afe69f7ab64aa6a55d9c | 5d0a635a67884de10cf19f140f547b7f7dcd1f7a | refs/heads/master | 2022-12-09T12:14:41.087713 | 2018-07-26T06:42:00 | 2018-07-26T06:42:00 | 142,399,806 | 2 | 2 | null | 2022-09-23T21:53:46 | 2018-07-26T06:41:46 | Python | UTF-8 | Python | false | false | 5,722 | py | #========================================================
#+++++++++++++++++ 测试用例信息 ++++++++++++++++
# 用例 ID: FM_addOrCancelAttention
# 用例标题: 关注用户,分别检查关注列表和粉丝列表
# testcase0011:
#1、登录
#2、关注用户
#3、检查关注列表
#testcase002:
# 1、登录
# 2、关注用户
# 3、检查粉丝列表
import sys,requests,unittest,yaml,json,time
sys.path.append("../../lib/common")
sys.path.append("../../lib/webAPI")
sys.path.append("../../lib/http")
import Auth,newSocial,FMCommon,Http
userData = FMCommon.loadWebAPIYML()
socialData=FMCommon.loadnewSocialYML()
authData=FMCommon.loadAuthYML()
class addOrCancelAttention(unittest.TestCase):
def setUp(self):
#登录账号A
siginParams= {"account": userData['account'], "password": userData['passwd'], "remember": False}
siginRes=Auth.signin(userData['hostName']+authData['signin_url'],headers = userData['headers'],datas = siginParams)
#断言返回200登录成功
self.assertEqual(siginRes.status_code,userData['status_code_200'])
#获取headers
self.token=json.loads(siginRes.text)['data']['token']
self.headers=dict(userData['headers'],**{userData['Authorization'] : userData['Bearer']+self.token})
self.userId=json.loads(siginRes.text)['data']['id']
self.nickName=json.loads(siginRes.text)['data']['nickname']
#登录账号B
siginFollowParams= {"account": userData['followAccount'], "password": userData['followPasswd'], "remember": False}
siginFollowRes=Auth.signin(userData['hostName']+authData['signin_url'],headers = userData['headers'],datas = siginFollowParams)
#断言返回200登录成功
self.assertEqual(siginFollowRes.status_code,userData['status_code_200'])
#获取headers
self.followToken=json.loads(siginFollowRes.text)['data']['token']
self.followHeaders=dict(userData['headers'],**{userData['Authorization'] : userData['Bearer']+self.followToken})
self.followUserId=json.loads(siginFollowRes.text)['data']['id']
self.followNickName = json.loads(siginFollowRes.text)['data']['nickname']
#判断用户A当前是否关注了用户B,如果已经关注,不做操作,没有关注,执行关注
#获取用户A的关注列表
myAttentionsRes=newSocial.getMyAttentions(userData['hostName']+socialData['getMyAttentions_url'],headers = self.headers)
#断言返回200,code=0,获取关注列表成功
self.assertEqual(myAttentionsRes.status_code,userData['status_code_200'])
self.assertEqual(json.loads(myAttentionsRes.text)['code'],userData['code_0'])
#获取关注列表用户
attentionUserList=json.loads(myAttentionsRes.text)['data']['Items']
userIdList = []
for i in range(len(attentionUserList)):
if attentionUserList[i]['UserInfo'] != None:
userIdList.append(attentionUserList[i]['UserInfo']['BaseInfo']['UserId'])
if self.followUserId not in userIdList:
params={"toUserId":self.followUserId}
attentionRes=newSocial.addOrCancelAttention(userData['hostName']+socialData['addOrCancelAttention_url'],datas = params, headers= self.headers)
self.assertEqual(attentionRes.status_code,userData['status_code_200'])
self.assertEqual(json.loads(attentionRes.text)['code'],0)
else:
pass
def test_attentionUser001(self):
'''关注用户,检查关注列表'''
myAttentionsRes=newSocial.getMyAttentions(userData['hostName']+socialData['getMyAttentions_url'],headers = self.headers)
#断言返回200,code=0,获取关注列表成功
self.assertEqual(myAttentionsRes.status_code,userData['status_code_200'])
self.assertEqual(json.loads(myAttentionsRes.text)['code'],userData['code_0'])
#获取关注列表用户
attentionUserList=json.loads(myAttentionsRes.text)['data']['Items']
userIdList = []
for i in range(len(attentionUserList)):
if attentionUserList[i]['UserInfo'] != None:
userIdList.append(attentionUserList[i]['UserInfo']['BaseInfo']['UserId'])
self.assertIn(self.followUserId,userIdList)
def test_attentionUser002(self):
'''关注用户,检查粉丝列表'''
myFansRes=newSocial.getMyFans(userData['hostName']+socialData['getMyFans_url'],headers = self.followHeaders)
#断言返回200,code=0,获取粉丝列表成功
self.assertEqual(myFansRes.status_code,userData['status_code_200'])
self.assertEqual(json.loads(myFansRes.text)['code'],userData['code_0'])
#检查用户A UserId存在于用户B粉丝列表UserId中
fansUserList=json.loads(myFansRes.text)['data']['Items']
userIdList=[]
for i in range(len(fansUserList)):
userIdList.append(fansUserList[i]['UserInfo']['BaseInfo']['UserId'])
self.assertIn(self.userId,userIdList)
def tearDown(self):
#清空测试环境
#取消关注
params={"toUserId":self.followUserId}
attentionRes=newSocial.addOrCancelAttention(userData['hostName']+socialData['addOrCancelAttention_url'],datas = params, headers= self.headers)
self.assertEqual(attentionRes.status_code,userData['status_code_200'])
self.assertEqual(json.loads(attentionRes.text)['code'],0)
#退出登录
signOutRes=Auth.signout(userData['hostName']+ authData['signout_url'],datas = self.headers)
self.assertEqual(signOutRes.status_code,userData['status_code_200'])
if __name__=='__main__':
unittest.main() | [
"shencanhui@followme-inc.com"
] | shencanhui@followme-inc.com |
76f5f61d7faf29a8e38e7001278dd3b7abae36eb | 48ba8d0788e4ac7d4cacd7e7a2e2cf4f391c85ad | /Lists/9copylist.py | 674aaa41b444cb13b303f6996b6dec02da0fbd8c | [] | no_license | rahulvshinde/Python_Playground | c28ac2dc0865e254caa5360c3bb97b4ff5f23b3a | 7a03b765dd440654caba1e06af5b149f584e9f08 | refs/heads/master | 2023-04-19T17:25:55.993837 | 2021-05-17T01:15:30 | 2021-05-17T01:15:30 | 280,736,898 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 363 | py | # 9. Write a Python program to clone or copy a list.
my_list = [1,2,3]
def copylist(my_list):
new_list = []
for item in my_list:
new_list.append(item)
return new_list
def copylist1(my_list):
new_list = list(my_list)
return new_list
print("Copied list {}".format(copylist(my_list)))
print("Copied list {}".format(copylist1(my_list))) | [
"r.shinde2007@gmail.com"
] | r.shinde2007@gmail.com |
48482ab71d1bd1822599d7339cad342b01c643d4 | 51be8db88a49f7bebeefddf431faff6048ac4f37 | /xpcc/tools/bootloader/can/host/util/intelhex.py | 63975de317d5ea0829b0b6026572b9b4a64a75f2 | [
"MIT"
] | permissive | jrahlf/3D-Non-Contact-Laser-Profilometer | 0a2cee1089efdcba780f7b8d79ba41196aa22291 | 912eb8890442f897c951594c79a8a594096bc119 | refs/heads/master | 2016-08-04T23:07:48.199953 | 2014-07-13T07:09:31 | 2014-07-13T07:09:31 | 17,915,736 | 6 | 3 | null | null | null | null | UTF-8 | Python | false | false | 3,295 | py | #!/usr/bin/python
import sys
class HexParserException(Exception):
""" Ausnahmeklasse fuer den Intel-Hex-Parser """
pass
class Segment:
""" Speicher einen String mit Speicherinhalten zusammen mit seiner Startadresse """
def __init__(self, address = 0, data = ''):
self.address = address
self.data = data
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return len(self.data)
def __repr__(self):
return "Segment (address = 0x%04x, data=%s)" % (self.address, self.data)
class IntelHexParser:
""" Liest Intelhex Dateien """
def __init__(self, filename = ''):
""" Konstruktor """
self.segments = []
if filename:
self.filename = filename
self.load_file(filename)
def load_file(self, filename):
""" Liest die Datei in einen internen Puffer """
try:
file = open(filename)
self.load_hex_data(file)
file.close()
except IOError:
raise HexParserException("Could not open file: \"%s\"." % filename)
except:
raise HexParserException("No Intel-Hex Format!")
def load_hex_data(self, file):
""" liest die Daten aus einer Datei im Intel-Hex Format """
segmentdata = []
currentAddr = 0
startAddr = 0
for line in file:
l = line.strip("\n\r")
# Leerzeilen herausfiltern
if len(l) == 0:
continue
if l[0] != ':' or len(l) < 11:
raise HexParserException("File Format Error.")
length = int(l[1:3], 16)
address = int(l[3:7], 16)
type = int(l[7:9], 16)
check = int(l[-2:], 16)
if len(l) != (11 + 2*length):
raise HexParserException("Invaild Line Length.")
# Checksumme ueberpruefen
bb = [int(l[i:i+2], 16) for i in xrange(1,len(l),2)]
crc = reduce(lambda x, y: x+y, bb, 0)
crc &= 0x0FF
if crc != 0:
raise HexParserException("Checksum Error.")
if type == 0x00:
if currentAddr != address:
if segmentdata:
self.segments.append( Segment(startAddr, ''.join(segmentdata)) )
startAddr = currentAddr = address
segmentdata = []
for i in range(length):
segmentdata.append( chr(int(l[9+2*i:11+2*i],16)) )
currentAddr = length + currentAddr
elif type == 0x01:
# Ende der Hexdaten
if length == 0:
break
else:
raise HexParserException("Invalid End-of-File Record")
elif type in (0x02, 0x03, 0x04):
pass
else:
sys.stderr.write("Ignored unknown field (type 0x%02x) in ihex file.\n" % type)
if segmentdata:
self.segments.append( Segment(startAddr, ''.join(segmentdata)) )
def __repr__(self):
""" Gibt die geladene IntelHex Datei aus """
buffer = []
if self.segments:
for segment in self.segments:
buffer.append("Startadresse: %s, Daten:\n" % segment.address)
counter = 0
for value in segment:
buffer.append("%02x " % ord(value))
counter += 1
if counter >= 26:
counter = 0
buffer.append("\n")
buffer.append("\n\n")
buffer.pop()
return ''.join(buffer)
# kleines Beispiel zur Verwendung des Intelhexparsers
#
# Liest eine uebergebene Intelhex-Datei ein und gibt den Inhalt entsprechend
# verarbeitet aus.
if __name__ == '__main__':
filename = sys.argv[1]
try:
parser = IntelHexParser(filename)
print parser
except HexParserException, e:
# Fehlermeldungen ausgeben
print e
| [
"dev.jonas.rahlf@gmail.com"
] | dev.jonas.rahlf@gmail.com |
14da268989613e7d688226f884587ec8acb176ed | 04e6cd86e6d100740c33c9610b93ab12e21c3047 | /supporting_scripts/.virtualenvs/enrollment_venv/lib/python3.8/site-packages/us/tests/test_us.py | de58c64b582d2296f973a7fed1fac39d1a52b8b7 | [] | no_license | sdl60660/cleveland_eviction_mapping | 5f0ad4b2509b09117612e60a29103176d13b3ef3 | ea4601fded96a3ff2b26b30fa71019e030a14197 | refs/heads/main | 2023-05-08T10:11:56.595243 | 2021-06-02T14:51:14 | 2021-06-02T14:51:14 | 308,381,227 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,496 | py | from __future__ import unicode_literals
from itertools import chain
import jellyfish
import pytest
import pytz
import us
# attribute
def test_attribute():
for state in us.STATES_AND_TERRITORIES:
assert state == getattr(us.states, state.abbr)
def test_valid_timezones():
for state in us.STATES_AND_TERRITORIES:
if state.capital:
assert pytz.timezone(state.capital_tz)
for tz in state.time_zones:
assert pytz.timezone(tz)
# During migration from SQLite to Python classes, a duplicate
# time zone had been found
assert len(state.time_zones) == len(set(state.time_zones))
# maryland lookup
def test_fips():
assert us.states.lookup("24") == us.states.MD
assert us.states.lookup("51") != us.states.MD
def test_abbr():
assert us.states.lookup("MD") == us.states.MD
assert us.states.lookup("md") == us.states.MD
assert us.states.lookup("VA") != us.states.MD
assert us.states.lookup("va") != us.states.MD
def test_name():
assert us.states.lookup("Maryland") == us.states.MD
assert us.states.lookup("maryland") == us.states.MD
assert us.states.lookup("Maryland", field="name") == us.states.MD
assert us.states.lookup("maryland", field="name") is None
assert us.states.lookup("murryland") == us.states.MD
assert us.states.lookup("Virginia") != us.states.MD
# lookups
def test_abbr_lookup():
for state in us.STATES:
assert us.states.lookup(state.abbr) == state
def test_fips_lookup():
for state in us.STATES:
assert us.states.lookup(state.fips) == state
def test_name_lookup():
for state in us.STATES:
assert us.states.lookup(state.name) == state
def test_obsolete_lookup():
for state in us.OBSOLETE:
assert us.states.lookup(state.name) == state
# test metaphone
def test_jellyfish_metaphone():
for state in chain(us.STATES_AND_TERRITORIES, us.OBSOLETE):
assert state.name_metaphone == jellyfish.metaphone(state.name)
# mappings
def test_mapping():
states = us.STATES[:5]
assert us.states.mapping("abbr", "fips", states=states) == dict(
(s.abbr, s.fips) for s in states
)
def test_obsolete_mapping():
mapping = us.states.mapping("abbr", "fips")
for state in us.states.OBSOLETE:
assert state.abbr in mapping
# known bugs
def test_kentucky_uppercase():
assert us.states.lookup("kentucky") == us.states.KY
assert us.states.lookup("KENTUCKY") == us.states.KY
def test_wayoming():
assert us.states.lookup("Wyoming") == us.states.WY
assert us.states.lookup("Wayoming") is None
def test_dc():
assert us.states.DC not in us.STATES
assert us.states.lookup("DC") == us.states.DC
assert us.states.lookup("District of Columbia") == us.states.DC
assert "DC" in us.states.mapping("abbr", "name")
# shapefiles
@pytest.mark.skip
def test_head():
import requests
for state in us.STATES_AND_TERRITORIES:
for region, url in state.shapefile_urls().items():
resp = requests.head(url)
assert resp.status_code == 200
# counts
def test_obsolete():
assert len(us.OBSOLETE) == 3
def test_states():
assert len(us.STATES) == 50
def test_territories():
assert len(us.TERRITORIES) == 5
def test_contiguous():
# Lower 48 + DC
assert len(us.STATES_CONTIGUOUS) == 48
def test_continental():
# Lower 48 + DC + Alaska
assert len(us.STATES_CONTINENTAL) == 49
| [
"learnersd@gmail.com"
] | learnersd@gmail.com |
c8ba16a584ac505e53aac951cfa2b73eceae3b39 | 64bac35d595f4f5f626d5fb2e2b97e26ef914f87 | /Неделя 2/Домашние задания/8. Шоколадка.py | a08796152b8dd126bc57b6b26e201a3de6f225a9 | [] | no_license | BesVik/10WEEKS | ca963b22566bc50f7772da90b5f58cb0c49dac22 | 23ce5d80378280eafa505c5bb6a5d4d5197d8701 | refs/heads/master | 2022-12-07T21:08:58.544146 | 2020-08-22T16:05:57 | 2020-08-22T16:05:57 | 287,961,961 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 863 | py | # Шоколадка имеет вид прямоугольника, разделенного на n×m долек. Шоколадку можно один раз разломить по прямой на
# две части. Определите, можно ли таким образом отломить от шоколадки часть, состоящую ровно из k долек.
#
# Формат ввода
#
# Программа получает на вход три числа: n, m, k.
#
# Формат вывода
#
# Программа должна вывести одно из двух слов: YES или NO.
#
# Примеры
#
# Тест 1
# Входные данные:
# 4
# 2
# 6
#
# Вывод программы:
# YES
#
#
#
# Тест 2
# Входные данные:
# 2
# 10
# 7
#
# Вывод программы:
# NO
| [
"barsuc1952@yandex.ru"
] | barsuc1952@yandex.ru |
187bd675e9a52862101d51f1b5681a12c9404357 | 3680b53221c78eb79e53fdddb51b8e686193d487 | /p3multithread.py | 06b563c3940af6aea3448b57db90b2de3ac22285 | [] | no_license | vimanari/Bug-Bounty | cc602ebaaa126935e664b102b76d05f6f8737c71 | 6a8a241299ece1558b42553ce1a3cbbab64339ae | refs/heads/master | 2022-11-14T22:40:24.711088 | 2020-07-13T09:23:06 | 2020-07-13T09:23:06 | 278,278,852 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 850 | py | #!/usr/bin/env python3
import sys, os, socket
from socketserver import ThreadingMixIn
from http.server import SimpleHTTPRequestHandler, HTTPServer
HOST = socket.gethostname()
class ThreadingSimpleServer(ThreadingMixIn, HTTPServer):
pass
'''
This sets the listening port, default set to 8080
'''
if sys.argv[1:]:
PORT = int(sys.argv[1])
else:
PORT = 8080
'''
This sets the working directory of the HTTPServer, by default it is set to where the script is executed.
'''
if sys.argv[2:]:
os.chdir(sys.argv[2])
CWD = sys.argv[2]
else:
CWD = os.getcwd()
server = ThreadingSimpleServer(('0.0.0.0', PORT), SimpleHTTPRequestHandler)
print("Serving HTTP traffic from", CWD, "on", HOST, "using port", PORT)
try:
while 1:
sys.stdout.flush()
server.handle_request()
except KeyboardInterrupt:
print("\nShutting down server per users request.")
| [
"noreply@github.com"
] | vimanari.noreply@github.com |
79da6c9b0ad778fbd6c4e91df807ffc5d86c8248 | b1b53a1c50d4d04a25670de094f01feb7f05d08e | /farmacia/migrations/0003_producto_url.py | e5d6f1d2fbb1abfc8fb88164d6dfd3d6e769ac6d | [] | no_license | patoba/Farmacia-Huffman | ba0dcb140d40d4ad3300dd9b34d2a5b559ac41e2 | 666dc5f3d4629d640eacf30a0c2e645862131511 | refs/heads/master | 2022-12-26T03:38:52.876229 | 2020-10-03T04:11:07 | 2020-10-03T04:11:07 | 133,097,635 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 438 | py | # Generated by Django 2.0.4 on 2018-05-12 02:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('farmacia', '0002_remove_factura_monto'),
]
operations = [
migrations.AddField(
model_name='producto',
name='url',
field=models.CharField(default='null', max_length=40),
preserve_default=False,
),
]
| [
"patobarrero@gmail.com"
] | patobarrero@gmail.com |
6f703f4726f327903524d0b1053b388413970267 | 4fc4aa7cea78eca63b42fe99f3a58d55b3851308 | /source_code/gsdmm.py | a4a8a1435e7f54c03454b80e953c334eadb16fe0 | [] | no_license | zshwuhan/gsdmm-2 | 6ec8981faadb1b32e416a6b0aa504cca0fa6936c | e5309a69d4b1f33e3c2831b3c9a7e6deca82c46e | refs/heads/master | 2023-03-15T18:17:19.236918 | 2020-03-22T20:26:50 | 2020-03-22T20:26:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,322 | py | import timeit
import logging
import numpy as np
from collections import Counter
from tqdm import tqdm, trange
import preprocess
import pickle
class GSDMM:
def __init__(self, documents, vocab_size, num_topics=50, alpha=0.1, beta=0.1):
logging.getLogger(__name__).info(f'Initializing an GSDMM instance with parameters:\n'
f'K: {num_topics}\n'
f'alpha: {alpha}\n'
f'beta: {beta}')
self.documents = documents
self.num_topics = num_topics
self.vocab_size = vocab_size
self.alpha = alpha
self.beta = beta
self.num_docs = len(self.documents)
self.num_docs_per_topic = np.zeros(num_topics, dtype=np.uintc)
self.num_words_per_topic = np.zeros(num_topics, dtype=np.uintc)
self.topic_label_by_doc = np.zeros(self.num_docs, dtype=np.uintc)
self.word_count_num_topics_by_vocab_size = np.zeros((num_topics, vocab_size), dtype=np.uintc)
# self.topic_assignment_num_docs_by_num_topics = np.zeros((self.num_docs, num_topics), dtype=np.uintc)
"""
Initialization step: topic assignment
Randomly assign a topic, P(topic) ~ Multinomial(1/num_topics)
Increment number of documents for the assigned topic
Add number of words in the doc to total number of words in the assigned topic
Increment number of words for the assigned topic for each word in vocab (word frequency distribution by topic)
np.random.multinomial(1, [(1/num_topics) * num_topics]) returns an array of len(num_topics) where the non-0
index location is the randomly sampled choice, i.e. the assigned topic
"""
for doc_index, each_doc in enumerate(documents):
topic_index = np.random.multinomial(1, (1 / float(num_topics)) * np.ones(num_topics)).argmax()
num_words_in_doc = len(each_doc)
self.topic_label_by_doc[doc_index] = topic_index
# self.topic_assignment_num_docs_by_num_topics[doc_index, :] = topic
self.num_docs_per_topic[topic_index] += 1
self.num_words_per_topic[topic_index] += num_words_in_doc
for word_id in each_doc:
self.word_count_num_topics_by_vocab_size[topic_index, word_id] += 1
def _count_non_zero_docs_topics(self):
return len(self.num_docs_per_topic.nonzero()[0])
def gibbs_sampling_topic_reassignment(self, iterations=10):
num_non_zero_topic_clusters = []
for _ in trange(iterations):
# keeping track of number of topics with docs in each iteration
num_non_zero_topic_clusters.append(self._count_non_zero_docs_topics())
# GSDMM algorithm in one iteration
for doc_index, each_doc in enumerate(self.documents):
# record current topic assignment from the initialization step
# exclude doc and word frequencies for this topic
current_topic_index = self.topic_label_by_doc[doc_index]
# current_topic_index = self.topic_assignment_num_docs_by_num_topics[doc_index, :].argmax()
num_words_in_doc = len(each_doc)
self.num_docs_per_topic[current_topic_index] -= 1
self.num_words_per_topic[current_topic_index] -= num_words_in_doc
for word_id in each_doc:
self.word_count_num_topics_by_vocab_size[current_topic_index, word_id] -= 1
# re-sample for a new topic assignment based on Equation 4 in Yin and Wang
prob_topic_assigned_to_doc = self.calc_normalized_topic_sampling_prob(each_doc)
# print(prob_topic_assigned_to_doc)
new_topic = np.random.multinomial(1, prob_topic_assigned_to_doc)
new_topic_index = new_topic.argmax()
# update doc and word counts based on new topic assignment
self.topic_label_by_doc[doc_index] = new_topic_index
# self.topic_assignment_num_docs_by_num_topics[doc_index, :] = new_topic
self.num_docs_per_topic[new_topic_index] += 1
self.num_words_per_topic[new_topic_index] += num_words_in_doc
for word_id in each_doc:
self.word_count_num_topics_by_vocab_size[new_topic_index, word_id] += 1
return num_non_zero_topic_clusters
def calc_normalized_topic_sampling_prob(self, doc):
"""
Equation 4 from Yin and Wang (2014) represents the probability of a document being assigned a topic K
i.e. prob[topic_index]
Breaking up Equation 4 into 4 components: left numerator * right numerator
------------------------------------
left denominator * right denominator
left numerator: num_docs_per_topic[topic_index] + alpha
left denominator: num docs in corpus - 1 + num_topics * alpha
right numerator: product(product(word_count_topics_by_vocab[topic, word_id] + beta + j - 1))
from j == 1 to word frequency of word w in doc, for each word_id in doc
right denominator: product(num_words_per_topic[topic_index] + vocab_size * beta + i - 1) from i == 1 to
num_words in doc
Working in natural log space to avoid underflow:
Equation 4 == exp(log(left numerator) + log(right numerator) - log(left denominator) - log(right denominator))
log(left numerator) == log(num_docs_per_topic[topic_index] + alpha)
log(left denominator) == log(num docs in corpus - 1 + num_topics * alpha)
log(right numerator) == sum(sum(log(word_count_topics_by_vocab[topic, word_id] + beta + j - 1)))
log(right denominator) == sum(log(num_words_per_topic[topic_index] + vocab_size * beta + i - 1))
:param doc: tokenized doc, each word token is an integer representation
:type doc: List[int]
:return: np.array of normalized probabilities for a doc being assigned each topic for all topics
"""
ln_prob = np.zeros(self.num_topics)
doc_word_freq = Counter(doc)
num_words_in_doc = len(doc)
# calculating probability for each topic_index in natural log space
ln_left_denominator = np.log(self.num_docs - 1 + self.num_topics * self.alpha)
for topic_index in range(self.num_topics):
ln_left_numerator = np.log(self.num_docs_per_topic[topic_index] + self.alpha)
ln_right_numerator = 0.0
ln_right_denominator = 0.0
for word_id in doc:
word_freq = doc_word_freq[word_id]
for j in range(1, word_freq + 1):
ln_right_numerator += np.log(self.word_count_num_topics_by_vocab_size[topic_index, word_id] +
self.beta + j - 1)
for i in range(1, num_words_in_doc + 1):
ln_right_denominator += np.log(self.num_words_per_topic[topic_index] + self.vocab_size * self.beta + i
- 1)
ln_prob[topic_index] = ln_left_numerator + ln_right_numerator - ln_left_denominator - ln_right_denominator
# converting log probabilities back to linear scale
# use 128-bit float to avoid NaN overflow
prob = np.exp(ln_prob, dtype=np.float128)
# normalize probabilities
try:
normalized_prob = prob / prob.sum()
except ZeroDivisionError:
normalized_prob = prob / 1.0
# return as float64 to be compatible with np.random.multinomial
return normalized_prob.astype(np.float64)
def predict_doc_topic_labels(self):
"""
:return: list of topic labels for all docs
"""
logging.getLogger(__name__).info(f'generating list of predicted labels for docs')
predicted_labels = []
for doc_index in range(self.num_docs):
topic_label = self.topic_label_by_doc[doc_index]
predicted_labels.append(topic_label)
# topic_label = self.topic_assignment_num_docs_by_num_topics[doc_index, :].argmax()
# print(f'Doc no: {doc_index} is assigned topic label: {topic_label}')
return predicted_labels
def make_pickle(filename, obj_to_pickle):
logging.getLogger(__name__).info(f'dumping pickle file to: {filename}')
with open(filename, 'wb') as w_file:
pickle.dump(obj_to_pickle, w_file)
return None
def predict_most_populated_clusters(gsdmm, vocab, filename, num_wanted_words=5, num_wanted_topics=20):
logging.getLogger(__name__).info(f'Writing output file with predicted clusters, saving to: {filename}\n'
f'Number of non-zero doc topics: {num_wanted_topics}')
highest_num_docs = np.sort(gsdmm.num_docs_per_topic)[::-1][:num_wanted_topics]
most_docs_topics = np.argsort(gsdmm.num_docs_per_topic)[::-1][:num_wanted_topics]
with open(filename, 'a') as w_file:
print(f'Predicted number of documents per topic for most populated clusters: {highest_num_docs}\n'
f'Predicted topic labels with highest numbers of documents: {most_docs_topics}', file=w_file)
most_frequent_words_by_topic = {}
with open(filename, 'a') as w_file:
for topic in most_docs_topics:
most_freq_words_ids = np.argsort(gsdmm.word_count_num_topics_by_vocab_size[topic, :])[::-1][:num_wanted_words]
highest_word_freq = np.sort(gsdmm.word_count_num_topics_by_vocab_size[topic, :])[::-1][:num_wanted_words]
most_frequent_words = [(vocab.id_to_word[word_id], freq) for word_id, freq in zip(most_freq_words_ids,
highest_word_freq)]
most_frequent_words_by_topic[topic] = most_frequent_words
print(f'Predicted topic label: {topic}\tMost frequent words: {most_frequent_words}', file=w_file)
return most_frequent_words_by_topic
def true_most_populated_clusters(true_clusters, documents, vocab, filename, num_wanted_words=5, num_wanted_topics=20):
logging.getLogger(__name__).info(f'Starting output file with true clusters, saving to: {filename}')
# true_clusters is a list of list of docs in a topic, len(list of docs) == num_docs_per_topic
num_topics = len(true_clusters)
cluster_size = []
for cluster in true_clusters:
cluster_size.append(len(cluster))
num_docs_per_topic = cluster_size[:num_wanted_topics]
with open(filename, 'w') as w_file:
print(f'Number of documents per topic in true clusters: {num_docs_per_topic}', file=w_file)
word_count_per_topic = np.zeros(num_topics, dtype=np.uintc)
word_count_num_topics_by_vocab_size = np.zeros((num_topics, vocab.size()), dtype=np.uintc)
for topic_index, each_topic in enumerate(true_clusters):
for each_doc_id in each_topic:
word_count_per_topic[topic_index] += len(documents[each_doc_id])
for word_id in documents[each_doc_id]:
word_count_num_topics_by_vocab_size[topic_index, word_id] += 1
most_frequent_words_by_topic = {}
with open(filename, 'a') as w_file:
for topic_index in range(num_topics):
most_freq_words_ids = np.argsort(word_count_num_topics_by_vocab_size[topic_index, :])[::-1][
:num_wanted_words]
highest_word_freq = np.sort(word_count_num_topics_by_vocab_size[topic_index, :])[::-1][:num_wanted_words]
most_frequent_words = [(vocab.id_to_word[word_id], freq) for word_id, freq in zip(most_freq_words_ids,
highest_word_freq)]
most_frequent_words_by_topic[topic_index] = most_frequent_words
print(f'True topic label: {topic_index}\tMost frequent words: {most_frequent_words}', file=w_file)
print(f'***end of true labels***\n', file=w_file)
return most_frequent_words_by_topic
def main():
# please ignore code blocks below; they're there just for testing
toy_filename = '../data/toy.txt'
sofl_filename = '../data/title_StackOverflow.txt'
toy_corpus = preprocess.load_corpus(toy_filename)
stack_overflw_corpus = preprocess.load_corpus(sofl_filename)
toy_vocab = preprocess.Vocabulary()
toy_docs = [toy_vocab.doc_to_ids(doc) for doc in toy_corpus]
vocab = preprocess.Vocabulary()
stack_overflow_docs = [vocab.doc_to_ids(doc) for doc in stack_overflw_corpus]
gsdmm_toy = GSDMM(toy_docs, toy_vocab.size())
num_toy_topic_clusters_by_iterations = gsdmm_toy.gibbs_sampling_topic_reassignment()
toy_predicted_labels = gsdmm_toy.predict_doc_topic_labels()
toy_predicted_most_freq_words_by_topic = predict_most_populated_clusters(gsdmm_toy, toy_vocab)
# true labels and clusters
labels = preprocess.load_labels('../data/label_StackOverflow.txt')
true_clusters = preprocess.make_topic_clusters(labels)
true_most_frequent_words_by_topic = true_most_populated_clusters(true_clusters, stack_overflow_docs, vocab)
if __name__ == '__main__':
main()
| [
"noonsky.noon@gmail.com"
] | noonsky.noon@gmail.com |
c555ec981657f8f94ed686047fd33e4b1fa41ff2 | d04811d81be3cddf35e7abe87ddf10e18226a35b | /src/python&db/payment.py | a3423828ed1da70c15fa117fb57e2527b18a447e | [] | no_license | Coutcault/Betting-App-thingy | 09c62f610bd5917b8a3ee3f234acc2ecac2621df | 2754ddaba1ff6090b4ec73563e32aed8ff1195d5 | refs/heads/master | 2022-11-14T21:08:56.019251 | 2020-07-08T19:56:24 | 2020-07-08T19:56:24 | 270,734,109 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,285 | py | # from venmo_api import Client
# Get your access token. You will need to complete the 2FA process
# access_token = Client.get_access_token(username='',
# password='')
# venmo = Client(access_token=access_token)
# Search for users. You get 50 results per page.
# users = venmo.user.search_for_users(query="Peter",
# page=2)
# for user in users:
# print(user.username)
# # Or, you can pass a callback to make it multi-threaded
# def callback(users):
# for user in users:
# print(user.username)
# venmo.user.search_for_users(query="peter",
# callback=callback,
# page=2,
# count=10)
# venmo.log_out('abb471a30b6a5fb0261b0c43b9a74deb81c548c90f10559f7ca47eddec601a72')
import requests
venmo_username = input('Username:')
venmo_password = input('Password:')
url = 'https://api.venmo.com/v1/oauth/access_token'
header ={
'Content-Type': 'application/json'
}
body = {
"phone_email_or_username": (f'{venmo_username}'),
"client_id": '1',
"password": (f'{venmo_password}')
}
r = requests.post(url, headers=header, data=body)
print (r) | [
"Coutcault@gmali.com"
] | Coutcault@gmali.com |
65b04bd650e98b58eddf10ab0e177dcaf8ad613a | 6c73e4abec647a97e011218fdb5e21d9a83323ee | /大蟒/e2f & f2e.py | 0d80911e505a633a22bb33f2bed8b2e19e0b4e84 | [] | no_license | laippmiles/Code_python3.5 | c16db354000d4c91b4455cb9b606ec82e5efe767 | 4d5fb81da5f7dc10b0b3b9961a80d8445cf49ecc | refs/heads/master | 2021-04-27T17:49:23.633804 | 2018-02-21T11:37:37 | 2018-02-21T11:37:37 | 122,328,742 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 299 | py | e2f = {'dog':'chien','cat':'chat','walrus':'morse'};
print('e2f:',e2f);
print('what is \'walrus\':',e2f.get('walrus'));
f2e = {};
for name,contenrs in e2f.items():
f2e[contenrs]=name;
print('f2e:',f2e);
print('what is \'chien\':',f2e.get('chien'));
eset = set(e2f.keys());
print('eset:',eset);
| [
"laippmiles@gmail.com"
] | laippmiles@gmail.com |
b33a7b84bdeedeb3073d351fa989c3ee2eaaaaec | 1639de7d6da19ee382e8b381b847778a5d217edb | /backend/restserver/pipture/urls.py | c434263963c531b2f8865d8fe754788cc188513c | [] | no_license | pipture/Old-App-iPhone | ed24d957758330e60c69c9da03c314b2ee5ba01d | b6c51717beaee20b79c7d12935291bcc844de541 | refs/heads/master | 2020-04-26T14:55:52.877493 | 2013-05-28T11:33:26 | 2013-05-28T11:33:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,164 | py | from django.conf.urls.defaults import *
from django.conf import settings
from restserver.pipture import views as pipture_view
urlpatterns = patterns('',
#(r'^get_all_series/', pipture_view.get_all_series_get),
#(r'^new_series/', pipture_view.set_new_series_post),
#(r'^new_album/', pipture_view.set_new_album_post),
#(r'^get_albums_by_series/', pipture_view.get_albums_by_series_get),
(r'^get_albums_by_series/', pipture_view.get_albums_by_series_get),
#(r'^get_timeslots/', pipture_view.get_timeslots),
#(r'^get_albums/', pipture_view.get_albums),
(r'^get_timeslot_videos/', pipture_view.get_timeslot_videos),
(r'^get_album_videos/', pipture_view.get_album_videos),
#(r'^get_trailers/', pipture_view.get_trailers),
(r'^set_timeslot/', pipture_view.set_timeslot),
(r'^update_views/', pipture_view.update_views),
(r'', pipture_view.index),
) | [
"vkubyshev@thumbtack.net"
] | vkubyshev@thumbtack.net |
946320ab12f007199aa42d1d44c388eee370370a | ffaf4a4b45b7709f752bac7691566eebd5244385 | /Titanium.py | cc7e37d63bc0490d59c5fa006595c1a950cf2964 | [
"Apache-2.0"
] | permissive | commandovmgit/Titanium | d5cc201c9972ad1b6f6b92db22e1a97fdfc65037 | c2e3c6f2d1107ca94d5f280713f86d052024cf4a | refs/heads/master | 2023-06-16T22:52:40.488577 | 2021-07-10T13:18:51 | 2021-07-10T13:18:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,791 | py | #------------------------AUTHOR:Lucksi------------------------#
#-------------------------------------------------------------#
import os
from Core import phishing
from Core import campaign
from Core import services
from Core import config
from Core.Support import Font
from time import sleep
import socket
Network = socket.gethostbyname(socket.gethostname())
def update():
print(Font.Color.WHITE + "\n[+]" + Font.Color.GREEN + "CHECKING IF THERE IS AN INTERNET CONNECTION")
sleep(1)
if Network != "127.0.0.1":
ris = int(input(Font.Color.GREEN + "\n[+]" + Font.Color.WHITE + "INTERNET FOUND ARE YOU SURE TO UPDATE TITANIUM(1)YES(2)NO" + Font.Color.GREEN + "\n\n[*TITANIUM*]" + Font.Color.WHITE + "-->"))
if ris == 1 :
os.system("Core/./update.sh")
exit()
elif ris == 2 :
inp = input(Font.Color.WHITE + "\nPRESS ENTER TO CONTINUE")
else :
inp = input(Font.Color.WHITE + "\nPRESS ENTER TO CONTINUE")
else:
inp = input(Font.Color.RED + "[!]" + Font.Color.WHITE + "SORRY BUT LOOKS LIKE YOU HAVENT AN INTERNET CONNECTION SO IT IS IMPOSSIBILE TO UPDATE\nPRESS ENTER TO CONTINUE")
def agree_banner():
f = open("Banners/Main.txt", "r")
banner = f.read()
f.close()
print(Font.Color.GREEN + banner)
def check_root():
if os.getuid() != 0:
os.system("cls" if os.name == "nt" else "clear")
agree_banner()
print(Font.Color.RED + "A SIMPLE SOCIAL ENGINEERING TOOL:) CODED BY LUCKSI\n")
print("YOU MUST EXECUTE THIS PROGRAM AS ROOT TRY TO USE IT WITH SUDO:)")
exit()
else:
os.system("cls" if os.name == "nt" else "clear")
def banner(r):
f = open("Version/Version.txt", "r", newline=None)
for line in f:
r = line.replace("\n", "")
version = f.read() + r
f.close()
os.system("cls" if os.name == "nt" else "clear")
print(Font.Color.YELLOW + "***************************************************************")
agree_banner()
print(Font.Color.WHITE + "A SIMPLE SOCIAL ENGINEERING TOOL:) CODED BY LUCKSI\n")
print(Font.Color.WHITE + "[+]" + Font.Color.GREEN + "VERSION:" + version)
print(
Font.Color.YELLOW + "Instagram:lucks_022\nEMAIL:lukege287@gmail.com\nGIT-HUB:Lucksi\nWebsite:https://sosuke.altervista.org")
print("***************************************************************")
def Main():
while True:
banner(r=True)
try:
scel = input(
Font.Color.GREEN + "[*INSERT AN OPTION:*]" + Font.Color.WHITE + "\n(A)PHISHING-MODE(AVAIABLE)\n(B)SERVICES-MODE(AVAIABLE)\n(C)CAMPAIGN-MODE(AVAIABLE)\n(D)CONFIGURATION-FILE\n(E)UPDATE\n(F)EXIT" + Font.Color.GREEN + "\n\n[*TITANIUM*]" + Font.Color.WHITE + "-->")
if scel == "a" or scel == "A":
phishing.Phishing.main()
elif scel == "b" or scel == "B":
services.Services.main()
elif scel == "c" or scel == "C":
campaign.Campaign.main()
elif scel == "d" or scel == "D":
config.Config.main()
elif scel == "e" or scel == "E":
os.system("Core/./update.sh")
elif scel == "f" or scel == "F":
print(Font.Color.GREEN + "\nTHANKS FOR HAVE USED TITANIUM,HAVE A NICE DAY:)")
exit()
except ValueError:
print(Font.Color.RED + "\n[!]" + Font.Color.WHITE + "OPS LOOKS LIKE YOU PRESS AN INVALID OPTION")
inp = input("PRESS ENTER TO CONTINUE...")
Main()
def agree():
check_root()
agree_banner()
agreement = str(input(
Font.Color.BLUE + "THIS TOOL DOESN'T PROMOTE ANT TYPE OF ILLEGAL ACTIVITY ITS MADE ONLY FOR EDUCATIONAL PURPOSE AND TESTING,\nI DO NOT TAKE ANY RESPONSABILITY FOR ANY DAMAGE YOU WILL CAUSE.BY USING THIS TOOL YOU ACCEPT THIS CONDITION\nAND REMEMBER WHITH GREAT POWERS COMES GREAT RESPONSBILITES:)" + Font.Color.RED + "\nYES NO" + Font.Color.GREEN + "\n\n[*TITANIUM*]" + Font.Color.WHITE + "-->"))
if agreement == "yes" or agree == "YES":
print(Font.Color.WHITE + "\nTHANK YOU GOOD HACKING:)")
inp = input("\nPRESS ENTER TO USE TITANIUM")
os.system("cls" if os.name == "nt" else "clear")
elif agreement == "no" or agree == "NO":
print(Font.Color.RED + "YOU MUST ACCEPT THE AGREEMENT TO RUN THIS SCRIPT")
exit()
else:
exit()
if __name__ == "__main__":
agree()
try:
Main()
except KeyboardInterrupt:
print(
Font.Color.RED + "\n[!]" + Font.Color.WHITE + "LOOKS LIKE YOU PRESSED CTRL'C EXIT..." + Font.Color.RED + "[!]")
exit()
| [
"noreply@github.com"
] | commandovmgit.noreply@github.com |
8badae67c6d22d6be2a5ec4cd5580944385364b1 | 6cae2952e55f2e7c339ffd9e1913504aafc3808f | /listaArmas.py | c24af7812c42dd176476abcf646e0c2ce57db0bc | [] | no_license | bobyzoo/BattleOfTheSpirits | b06cb506f0acc4a363a44a6e5af558cb9996e378 | d87820db163e39d0a6cc6f7824571ac166a596ca | refs/heads/master | 2020-08-07T13:33:36.588361 | 2020-01-24T14:45:28 | 2020-01-24T14:45:28 | 213,470,664 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 221 | py | from Armas import *
armas = {'Adaga':Armas('coc','Adaga',2,'1d4 perfurante',0.5,''),
'Machado':Armas('marcialcoc','Machado',10, '2d8 cortante',1,'') ,'espadaCurta':Armas('coc','Espada Curta',10,'1d6 2',10,'',4)} | [
"gabrieldossantosvargas@gmail.com"
] | gabrieldossantosvargas@gmail.com |
2c06ebbaf1dd7b970b2f9765fbade07de16e081c | b1e61e5246dc1602d54b0cb3541dfbc943a68e10 | /TestCases/Api.py | 127721acc184abab81e428c5e70471aa5182420f | [] | no_license | heizh/Interface-Automation-Test | c84a3ef9045f8589c7944e2539c60e76d5fc4891 | 6c69afd4da73d800e154a1bb6ae920cf3dfb64f6 | refs/heads/master | 2020-06-11T03:25:19.990329 | 2019-07-15T09:16:09 | 2019-07-15T09:16:09 | 193,837,877 | 1 | 0 | null | 2019-06-26T05:50:59 | 2019-06-26T05:50:59 | null | UTF-8 | Python | false | false | 628 | py | from Base.BaseRunner import ParametrizedTestCase
#from Base.BaseGetExcel import read_excel
from Base.BaseGetExcel import read_excels
from Base.BaseReq import Config
from Base.BaseElementEnmu import Element
from Base.BaseIni import BaseIni
class ApiTest(ParametrizedTestCase):
def test_api(self):
ls = read_excels(Element.API_FILES)[0]#读取用例excel,得到一个列表
token = self.token#得到token
Config().config_req(ls, token)#将包含用例的列表和token传入请求模块config_req
@classmethod
def setUpClass(cls):
super(ApiTest, cls).setUpClass()
cls.token
| [
"noreply@github.com"
] | heizh.noreply@github.com |
745bc0a9eb93fb4f57748962d2ec5b16092e87b0 | f76d6da19a089edcbc16eff34f0a919fe8ee2539 | /K means Clustering/CVIP_P2_Q3(4).py | 9e7e1d32a6e5d149ebd90585d8f21895596bde3b | [] | no_license | krishnasehgal/Image-Processing-1 | a75db38d220e2a3850a28804c61a0a81f2e55ecc | 848289aeb52f72706e53a010db91b59372c43ffb | refs/heads/master | 2020-04-17T05:52:20.898007 | 2019-01-17T21:50:29 | 2019-01-17T21:50:29 | 166,301,417 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,311 | py |
# coding: utf-8
# In[1]:
get_ipython().run_line_magic('matplotlib', 'inline')
UBIT='ksehgal';
from copy import deepcopy
import numpy as np
np.random.seed(sum([ord(c) for c in UBIT]))
get_ipython().run_line_magic('matplotlib', 'inline')
from copy import deepcopy
import cv2
from matplotlib import pyplot as plt
# In[2]:
def dist (a,b,axis):
return np.linalg.norm(a-b,axis=axis)
# In[3]:
def mykmean(k,z):
width, height, depth = z.shape
reshape_z = np.reshape(
z, (width * height, depth))
a=0
labels=[]
label=[0 for j in range(len(reshape_z))]
for i in range(k):
labels.append(a)
a+=1
#print(labels)
x=np.array(reshape_z)
x = (x-x.min())/(x.max()-x.min())
print(x)
center=np.random.randint(k, size=(k,3))
center=x[:k,:]
C_old = np.zeros(center.shape)
error = dist(center, C_old, None)
print(error)
distances=[]
while error != 0:
clusters=[]
for i in range(len(x)):
distances=[]
for j in range(k):
distances.append(dist(x[i], center[j],None))
cluster=distances.index(min(distances))
clusters.append(cluster)
C_old = deepcopy(center)
x *= 255.0/x.max()
# Finding the new centroids by taking the average value
for i in range(k):
points=[]
for j in range(len(x)):
if clusters[j]==i:
points.append(x[j,:])
label[j]=labels[i]
center[i] = np.mean(points, axis=0)
error = dist(center, C_old, None)
return center, clusters, label,x
# In[ ]:
img = cv2.imread('/Users/krishna/Downloads/data/baboon.jpg')
width, height, depth = img.shape
z = np.float32(img)
k = [3,5,10,20]
i=0
lis=['task3_baboon_3.png','task3_baboon_5.png','task3_baboon_10.png','task3_baboon_15.png']
for item in k:
center,cluster,label,x = mykmean(item,z)
center = np.uint8(center)
label=np.asarray(label)
print(label.shape)
print(label)
print(center[0])
res=center[label]
print(res.shape)
print(img.shape)
res2=res.reshape((img.shape)).astype('uint8')
res2=np.asarray(res2)
cv2.imwrite(lis[i],res2)
i+=1
| [
"krishna.dpsite@gmail.com"
] | krishna.dpsite@gmail.com |
07b303013845878dffb7352deceb4ceea7081d66 | 8e18927bb14f2d5a0bb9a30e1f1d9f8297017e87 | /Lorem/test_readfiles.py | 33f3c2bb3118981246eee358fc278e87556073bc | [] | no_license | sngina/core | d8f7e28a9eea83b2d4d98e8b5a7a487946fa409a | 9a89cedef5ccb6a499e5969267b7af35703491f6 | refs/heads/master | 2023-05-30T02:54:10.432569 | 2021-06-12T07:59:55 | 2021-06-12T07:59:55 | 370,314,026 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 284 | py | import unittest
import readfiles
class TestReadFiles(unittest.TestCase):
"""
Class to test the functions on the readfiles module
Args:
unittest.TestCase: Class from the unittest module to create unit tests
"""
if __name__ == "__main__":
unittest.main() | [
"sngina707@gmail.com"
] | sngina707@gmail.com |
4ee9c034912fbd3c02c4b5075ab4b2154b1ebb4a | 414db33a43c50a500741784eea627ba98bb63e27 | /0x0F-python-object_relational_mapping/102-relationship_cities_states_list.py | a213de184612054f6f68c22341ba51371e3e099c | [] | no_license | rayraib/holbertonschool-higher_level_programming | 2308ea02bd7f97eae3643e3ce0a6489cc1ad9ff5 | 6b4196eb890ffcb91e541431da9f5f57c5b85d4e | refs/heads/master | 2021-09-14T09:12:26.664653 | 2018-05-11T03:23:12 | 2018-05-11T03:23:12 | 113,070,818 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 935 | py | #!/usr/bin/python3
'''
script that lists all City objects from the database hbtn_0e_101_usa
'''
from sqlalchemy.orm import sessionmaker
from sqlalchemy import (create_engine)
from relationship_city import City, Base
from relationship_state import State
import sys
if __name__ == "__main__":
uname = sys.argv[1]
pw = sys.argv[2]
db = sys.argv[3]
# Create engine that opens connection between the class state and
# the database with the data
engine = create_engine('mysql+mysqldb://{}:{}@localhost/{}'.format
(uname, pw, db), pool_pre_ping=True)
Base.metadata.create_all(engine)
# create session
Session = sessionmaker(bind=engine)
session = Session()
#
for state in session.query(State).all():
state_name = state.name
for city in state.cities:
print("{}: {} -> {}".format(city.id, city.name, state_name))
session.close()
| [
"binitarai11@gmail.com"
] | binitarai11@gmail.com |
089fa56f09a89e12a66c98007383fb4a61327a95 | 5657a7e514758d4d5634efb0e1faa2f848121b11 | /sqlite.py | a8caeb2d15126ceffa35804f5e055b045f0fccf5 | [] | no_license | BetterLuke/V-collection-tools | dcdd0c0a1fefd618fbee16e4511cc877ad1334e0 | cd8d904862c322000e5c3bc6a4663e0cc9c4e06d | refs/heads/master | 2020-03-29T13:08:27.361397 | 2018-09-22T12:26:39 | 2018-09-22T12:26:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,473 | py | import sqlite3
import os
from mutagen.mp4 import MP4
from mutagen import File
import configparser
config = configparser.ConfigParser()
config.read('./config.ini')
root_path = config['path']['root']
conn = sqlite3.connect(config['path']['db'])
cur = conn.cursor()
def get_mp3_title(path):
if path.lower().endswith('mp3'):
afile = File(path)
return afile.tags.get("TIT2").text[0]
else:
afile = MP4(path)
return afile.tags.get("©nam")[0]
def _get_path(path, current_path):
for item in os.listdir(path):
item_path = os.path.join(path, item)
if os.path.isfile(item_path):
title = get_mp3_title(item_path)
db_path = os.path.join(current_path, title)
cur.execute('select Id from items where Path=?', [db_path])
if cur.rowcount == -1:
cur.execute("insert into items (Path) values (?);", [db_path])
if os.path.isdir(item_path):
db_path = os.path.join(current_path, item)
cur.execute('select Id from items where Path=?', [db_path])
if cur.rowcount == -1:
cur.execute("insert into items (Path) values (?);", [db_path])
_get_path(item_path, os.path.join(current_path, item))
def get_path(path):
_get_path(path, '')
# get_path(root_path)
cur.execute("select id from items WHERE Path='1.V collection 1【Leader】\【★】Cantarella'")
print(cur.fetchone()[0])
conn.commit()
| [
"axel10@163.com"
] | axel10@163.com |
aea9fd66601e1d252dc445341c6f56ac0b6e60b7 | e295a8593681f70fd9bacf4d334c20b7eb5920b7 | /resources/notifications.py | 711488418b8012551433e53c064b28e7f44f46af | [] | no_license | afolaa27/uconnect-flask | b2d968890eaa9e5d844941e7525a47c72e379076 | c0d8e78fa5cc6ee5659571a0bfad9ef662951e84 | refs/heads/master | 2023-04-23T16:07:01.754079 | 2021-01-14T04:47:54 | 2021-01-14T04:47:54 | 242,198,623 | 0 | 0 | null | 2021-05-06T19:54:26 | 2020-02-21T17:51:08 | Python | UTF-8 | Python | false | false | 1,297 | py | import models
from flask import Blueprint, request, jsonify
from flask_login import current_user, login_required
from playhouse.shortcuts import model_to_dict
notifications = Blueprint('notifications', 'notifications')
#creates a notification
@notifications.route('/<id>', methods=['POST'])
@login_required
def create_notification(id):
payload = request.get_json()
notification = models.Notification.create(
Seller_id=payload['Seller_id'],
Book_id=id,
Buyer_id= current_user.id,
message=payload['message'])
notification_to_dict = model_to_dict(notification)
print(notification_to_dict)
notification_to_dict['Seller_id'].pop('password')
notification_to_dict['Buyer_id'].pop('password')
notification_to_dict['Book_id']['owner'].pop('password')
return jsonify(
data=notification_to_dict,
message="Notification created",
status=200),200
#gets all notification for logged in user
@notifications.route('/', methods=['GET'])
@login_required
def get_notifications():
current_user_notifications= [model_to_dict(nots) for nots in current_user.notification]
for i in current_user_notifications:
i['User_id'].pop('password')
i['Book_Id']['owner'].pop('password')
return jsonify(data=current_user_notifications,
message="got all notifications",
status=200),200 | [
"afolaa27@uwgb.edu"
] | afolaa27@uwgb.edu |
8e33adabba127996ba29511c9825a48fbc037722 | 8987e044c86fdf53ab8996f433689852716fb277 | /Bioinformatics2/week2/PairedComposition.py | a5d8041bf1dd2473cbe78f5e09b91f21acc8c8b0 | [] | no_license | zssasa/Bioinformatics | fab02b503ae8063090c0a46bf90c899bc59b3dd4 | 4cfe5234b46d4ad7e84f4a90aa07a6e7b93666c2 | refs/heads/master | 2020-04-02T08:05:40.102613 | 2016-07-30T14:32:37 | 2016-07-30T14:32:37 | 62,781,709 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | # -*- coding: utf-8 -*-
__author__ = 'zhangsheng'
def PairedComposition(text, k, d):
result = []
for i in range(len(text)-2*k-d+1):
result.append([text[i:i+k],text[i+k+d:i+2*k+d]])
result = sorted(result)
return result
if __name__ == '__main__':
text = 'TAATGCCATGGGATGTT'
k = 3
d = 2
s = PairedComposition(text,k,d)
for pair in s:
print('(%s|%s)' % (pair[0],pair[1]),end=' ')
| [
"814950812@qq.com"
] | 814950812@qq.com |
0144b7ac8a7b779fff14f402809e08641264f959 | b9c0946e13675e6579d0320baa81a088e6bc2d5e | /HospitalManagementapp/views.py | b994a8db437a58bdc66a5f466f80dbd127465261 | [] | no_license | ashishchauhan1234/HospitalManagement | 75df214b08db718d46b106754bcd125b6cb829db | 97efcbb01d21a399c3c18e25deec5ab085183685 | refs/heads/main | 2023-06-16T03:11:13.844967 | 2021-07-13T19:38:16 | 2021-07-13T19:38:16 | 384,480,971 | 0 | 0 | null | 2021-07-12T10:28:37 | 2021-07-09T15:37:21 | HTML | UTF-8 | Python | false | false | 29,352 | py | from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator
from django.http import HttpResponse
from django.shortcuts import render, redirect
from django.urls import reverse
from django.views.decorators.csrf import csrf_exempt
from paypal.standard.forms import PayPalPaymentsForm
from .forms import *
from .models import *
# Create your views here.
def index(request):
return HttpResponse("hello World!!!")
@login_required
def department(request):
if request.method == "POST":
dept_form_vdf= DepartmentForm(request.POST)
if dept_form_vdf.is_valid():
# dept= Department()
# dept.name= dept_form_vdf.cleaned_data['name']
# dept.head= dept_form_vdf.cleaned_data['head']
# dept.save()
dept_form_vdf.save()
dept_vdf= DepartmentForm()
return render(request, 'AddDepartment.html',{"vdf": dept_vdf,'success':"Department Data is saved"})
else:
return render(request, 'AddDepartment.html',{"vdf": dept_form_vdf,'success':"Department Data is not "
"saved"})
dept_vdf= DepartmentForm()
return render(request, 'AddDepartment.html', {"vdf": dept_vdf })
def updateDepartment(request,id):
dept_data= Department.objects.get(pk= id)
if request.method == "POST":
dept_form_vdf= DepartmentForm(request.POST)
if dept_form_vdf.is_valid():
new_dept= Department()
new_dept.name= dept_form_vdf.cleaned_data['name']
new_dept.head= dept_form_vdf.cleaned_data['head']
new_dept.id= dept_data.id
new_dept.save()
return redirect("Show_Department_Details")
else:
return render(request, 'AddDepartment.html', {"vdf": dept_form_vdf, 'success': "Department Data is not "
"saved"})
dept_vdf= DepartmentForm( instance= dept_data)
return render(request, 'AddDepartment.html', {"vdf": dept_vdf})
def deleteDepartment(request,id):
dept_data= Department.objects.get(pk= id)
dept_data.delete()
return redirect('Show_Department_Details')
def showDepartment(request):
department_list_all= Department.objects.all()
page_wise= Paginator(department_list_all, per_page= 5, orphans= 2)
current_page= request.GET.get('page')
department_list= page_wise.get_page(current_page)
return render(request, 'ShowDepartment.html',{'dept_list': department_list})
@login_required
def doctor(request):
if request.method == "POST":
doc_form_vdf= DoctorForm(request.POST)
if doc_form_vdf.is_valid():
doc= Doctor()
doc.name= doc_form_vdf.cleaned_data['name']
doc.d_o_b= doc_form_vdf.cleaned_data['d_o_b']
doc.gender= doc_form_vdf.cleaned_data['gender']
doc.phone= doc_form_vdf.cleaned_data['phone']
doc.email= doc_form_vdf.cleaned_data['email']
doc.address= doc_form_vdf.cleaned_data['address']
doc.degree= doc_form_vdf.cleaned_data['degree']
doc.department= doc_form_vdf.cleaned_data['department']
doc.save()
doc_vdf= DoctorForm()
return render(request, 'AddDoctor.html',{"vdf": doc_vdf,'success':"new doctor Data is saved"})
else:
return render(request, 'AddDoctor.html',{"vdf": doc_form_vdf,'success':"Data is not saved"})
doc_vdf= DoctorForm()
return render(request, 'AddDoctor.html', {"vdf": doc_vdf })
def updateDoctor(request,id):
doc_data= Doctor.objects.get(pk= id)
if request.method == "POST":
doc_form_vdf= DoctorForm(request.POST)
if doc_form_vdf.is_valid():
new_doc= Doctor()
new_doc.name = doc_form_vdf.cleaned_data['name']
new_doc.d_o_b = doc_form_vdf.cleaned_data['d_o_b']
new_doc.gender = doc_form_vdf.cleaned_data['gender']
new_doc.phone = doc_form_vdf.cleaned_data['phone']
new_doc.email = doc_form_vdf.cleaned_data['email']
new_doc.address = doc_form_vdf.cleaned_data['address']
new_doc.degree = doc_form_vdf.cleaned_data['degree']
new_doc.department = doc_form_vdf.cleaned_data['department']
new_doc.id= doc_data.id
new_doc.save()
return redirect("Show_Doctor_Details")
else:
return render(request, 'AddDoctor.html', {"vdf": doc_form_vdf, 'success': "Doctor Data is not "
"saved"})
doc_vdf= DoctorForm( instance= doc_data)
return render(request, 'AddDoctor.html', {"vdf": doc_vdf})
def deleteDoctor(request,id):
doc_data= Doctor.objects.get(pk= id)
doc_data.delete()
return redirect('Show_Doctor_Details')
def showDoctor(request):
doctor_list_all= Doctor.objects.all()
page_wise= Paginator(doctor_list_all, per_page= 5, orphans= 2)
current_page= request.GET.get('page')
doctor_list= page_wise.get_page(current_page)
return render(request, 'ShowDoctor.html', {'doctor_list': doctor_list})
@login_required
def patient(request):
if request.method == "POST":
patient_form_vdf= PatientForm(request.POST)
if patient_form_vdf.is_valid():
p= Patient()
p.name= patient_form_vdf.cleaned_data['name']
p.gender= patient_form_vdf.cleaned_data['gender']
p.d_o_b= patient_form_vdf.cleaned_data['d_o_b']
p.age= patient_form_vdf.cleaned_data['age']
p.blood= patient_form_vdf.cleaned_data['blood']
p.disease= patient_form_vdf.cleaned_data['disease']
p.address= patient_form_vdf.cleaned_data['address']
p.phone= patient_form_vdf.cleaned_data['phone']
p.date= patient_form_vdf.cleaned_data['date']
p.room_no= patient_form_vdf.cleaned_data['room_no']
p.doctor= patient_form_vdf.cleaned_data['doctor']
p.save()
p_vdf= PatientForm()
return render(request, 'AddPatient.html',{"vdf": p_vdf,'success':"new patient data is saved"})
else:
return render(request, 'AddPatient.html',{"vdf": patient_form_vdf,'success':"Data is not saved"})
p_vdf= PatientForm()
return render(request, 'AddPatient.html', {"vdf": p_vdf })
def updatePatient(request,id):
patient_data= Patient.objects.get(pk= id)
if request.method == "POST":
patient_form_vdf= PatientForm(request.POST)
if patient_form_vdf.is_valid():
new_patient= Patient()
new_patient.name= patient_form_vdf.cleaned_data['name']
new_patient.gender= patient_form_vdf.cleaned_data['gender']
new_patient.d_o_b= patient_form_vdf.cleaned_data['d_o_b']
new_patient.age= patient_form_vdf.cleaned_data['age']
new_patient.blood= patient_form_vdf.cleaned_data['blood']
new_patient.disease= patient_form_vdf.cleaned_data['disease']
new_patient.address= patient_form_vdf.cleaned_data['address']
new_patient.phone= patient_form_vdf.cleaned_data['phone']
new_patient.date= patient_form_vdf.cleaned_data['date']
new_patient.room_no= patient_form_vdf.cleaned_data['room_no']
new_patient.doctor= patient_form_vdf.cleaned_data['doctor']
new_patient.id= patient_data.id
new_patient.save()
return redirect("Show_Patient_Details")
else:
return render(request, 'AddPatient.html', {"vdf": patient_form_vdf, 'success': "Patient Data is not "
"saved"})
patient_vdf= PatientForm( instance= patient_data)
return render(request, 'AddPatient.html', {"vdf": patient_vdf})
def deletePatient(request,id):
patient_data= Patient.objects.get(pk= id)
patient_data.delete()
return redirect('Show_Patient_Details')
def showPatient(request):
patient_list_all= Patient.objects.all()
page_wise= Paginator(patient_list_all, per_page= 5, orphans= 2)
current_page= request.GET.get('page')
patient_list= page_wise.get_page(current_page)
return render(request,'ShowPatient.html',{'patient_list' : patient_list })
@login_required
def room(request):
if request.method == 'POST':
room_form_vdf= RoomForm(request.POST)
if room_form_vdf.is_valid():
r= Room()
r.room_no= room_form_vdf.cleaned_data['room_no']
r.room_type= room_form_vdf.cleaned_data['room_type']
r.status= room_form_vdf.cleaned_data['status']
r.price= room_form_vdf.cleaned_data['price']
r.no_of_bed= room_form_vdf.cleaned_data['no_of_bed']
r.save()
r_vdf= RoomForm()
return render(request, 'AddRoom.html',{'vdf':r_vdf, 'success':'New room data has successfully save...'})
else:
return render(request, 'AddRoom.html',{'vdf':room_form_vdf, 'success':'New room data has not save...'})
r_vdf= RoomForm()
return render(request, 'AddRoom.html', {'vdf': r_vdf, 'success': ''})
def updateRoom(request,id):
room_data= Room.objects.get(pk= id)
if request.method == "POST":
room_form_vdf= RoomForm(request.POST)
if room_form_vdf.is_valid():
new_room= Room()
new_room.room_no= room_form_vdf.cleaned_data['room_no']
new_room.room_type= room_form_vdf.cleaned_data['room_type']
new_room.status= room_form_vdf.cleaned_data['status']
new_room.price= room_form_vdf.cleaned_data['price']
new_room.no_of_bed= room_form_vdf.cleaned_data['no_of_bed']
new_room.id= room_data.id
new_room.save()
return redirect("Show_Room_Details")
else:
return render(request, 'AddRoom.html', {"vdf": room_form_vdf, 'success': "Room Data is not "
"saved"})
room_vdf= RoomForm( instance= room_data)
return render(request, 'AddRoom.html', {"vdf": room_vdf})
def deleteRoom(request,id):
room_data= Room.objects.get(pk= id)
room_data.delete()
return redirect('Show_Room_Details')
def showRoom(request):
room_list_all= Room.objects.all()
page_wise= Paginator(room_list_all, per_page= 5, orphans= 2)
current_page= request.GET.get('page')
room_list= page_wise.get_page(current_page)
return render(request,'ShowRoom.html',{ "room_list" : room_list })
@login_required
def checkout(request):
if request.method == "POST":
checkout_form_vdf= CheckoutForm(request.POST)
if checkout_form_vdf.is_valid():
c= Checkout()
c.patient= checkout_form_vdf.cleaned_data['patient']
c.gender= checkout_form_vdf.cleaned_data['gender']
c.age= checkout_form_vdf.cleaned_data['age']
c.contact= checkout_form_vdf.cleaned_data['contact']
c.address= checkout_form_vdf.cleaned_data['address']
c.disease= checkout_form_vdf.cleaned_data['disease']
c.d_o_b= checkout_form_vdf.cleaned_data['d_o_b']
c.date_of_adm= checkout_form_vdf.cleaned_data['date_of_adm']
c.date_of_dis= checkout_form_vdf.cleaned_data['date_of_dis']
c.room_no= checkout_form_vdf.cleaned_data['room_no']
c.total_bill= checkout_form_vdf.cleaned_data['total_bill']
c.save()
latest_payment= Checkout.objects.latest('bill_no')
request.session['payer_id']= latest_payment.bill_no
request.session['first_name']= str(c.patient)
request.session['contact_phone']= c.contact
request.session['amount']= str(c.total_bill)
return redirect('Payment_Process')
else:
return render(request, "AddCheckout.html",{'vdf':checkout_form_vdf, 'success': "your billing data has not saved"})
check_vdf= CheckoutForm()
return render(request, "AddCheckout.html", {'vdf':check_vdf, 'success': " "})
def updateCheckout(request,id):
checkout_data= Checkout.objects.get(pk= id)
if request.method == "POST":
checkout_form_vdf= CheckoutForm(request.POST)
if checkout_form_vdf.is_valid():
new_checkout= Checkout()
new_checkout.patient= checkout_form_vdf.cleaned_data['patient']
new_checkout.gender= checkout_form_vdf.cleaned_data['gender']
new_checkout.age= checkout_form_vdf.cleaned_data['age']
new_checkout.contact= checkout_form_vdf.cleaned_data['contact']
new_checkout.address= checkout_form_vdf.cleaned_data['address']
new_checkout.disease= checkout_form_vdf.cleaned_data['disease']
new_checkout.d_o_b= checkout_form_vdf.cleaned_data['d_o_b']
new_checkout.date_of_adm= checkout_form_vdf.cleaned_data['date_of_adm']
new_checkout.date_of_dis= checkout_form_vdf.cleaned_data['date_of_dis']
new_checkout.room_no= checkout_form_vdf.cleaned_data['room_no']
new_checkout.total_bill= checkout_form_vdf.cleaned_data['total_bill']
new_checkout.bill_no= checkout_data.bill_no
new_checkout.payment_status= checkout_data.payment_status
new_checkout.save()
if checkout_data.payment_status != 'Confirm' :
latest_payment = Checkout.objects.latest('bill_no')
request.session['payer_id'] = latest_payment.bill_no
request.session['first_name'] = str(new_checkout.patient)
request.session['contact_phone'] = new_checkout.contact
request.session['amount'] = str(new_checkout.total_bill)
return redirect('Payment_Process')
return redirect("Show_Checkout_Details")
else:
return render(request, 'AddCheckout.html', {"vdf": checkout_form_vdf, 'success': "Checkout Data is not "
"saved"})
checkout_vdf= CheckoutForm( instance= checkout_data)
return render(request, 'AddCheckout.html', {"vdf": checkout_vdf})
def deleteCheckout(request,id):
checkout_data= Checkout.objects.get(pk= id)
checkout_data.delete()
return redirect('Show_Checkout_Details')
def showCheckout(request):
checkout_list_all= Checkout.objects.all()
page_wise= Paginator(checkout_list_all, per_page= 5, orphans= 2)
current_page= request.GET.get('page')
checkout_list= page_wise.get_page(current_page)
return render(request, 'ShowCheckout.html',{'checkout_list': checkout_list})
def paymentProcess(request):
host= request.get_host()
paypal_dict= {
'business': settings.PAYPAL_RECEIVER_EMAIL,
'name': str(request.session['first_name']),
'amount': request.session['amount'],
'phone': request.session['contact_phone'],
'receipt_no': str(request.session['payer_id']),
'item_name': 'Checkout #' + str(request.session['payer_id']),
'invoice': str(request.session['payer_id']),
'currency_code': 'INR',
'notify_url': 'http://{}{}'.format(host, reverse('paypal-ipn')),
'done_return': 'http://{}{}'.format(host, reverse('Payment_Done')),
'cancel_return': 'http://{}{}'.format(host, reverse('Payment_Cancelled')),
}
form= PayPalPaymentsForm(initial= paypal_dict)
return render(request, 'PaymentProcess.html', {'form': form, 'paypal_dic': paypal_dict})
@csrf_exempt
def payPalDone(request):
# get the details of the paypal
checkout_obj= Checkout.objects.get(pk= request.session['payer_id'])
checkout_obj.payment_status= "Confirm"
checkout_obj.save()
del request.session['payer_id']
return render(request, 'PaymentDone.html',{'pay': "Payment Successfully done"} )
@csrf_exempt
def payPalCancelled(request):
# Delete the order details
checkout_obj= Checkout.objects.get(pk= request.session['payer_id'])
# order_obj.delete()
checkout_obj.payment_status= "Pending"
checkout_obj.save()
del request.session['payer_id']
return render(request, 'PaymentCancelled.html',{'pay': "payment not done"} )
@login_required
def visitor(request):
if request.method == 'POST':
visitor_vdf= VisitorForm(request.POST)
if visitor_vdf.is_valid():
visitor_vdf.save()
v_vdf= VisitorForm()
return render(request, 'AddVisitor.html',{'vdf':v_vdf,'success': "New visitor data has successfully "
"saved"})
else:
return render(request, 'AddVisitor.html', {'vdf': visitor_vdf, 'success': "New visitor data has not saved"})
vdf= VisitorForm()
return render(request, 'AddVisitor.html', {'vdf': vdf, 'success': ""})
def updateVisitor(request,id):
visitor_data= Visitor.objects.get(pk= id)
if request.method == "POST":
visitor_form_vdf= VisitorForm(request.POST)
if visitor_form_vdf.is_valid():
new_visitor= Visitor()
new_visitor.name= visitor_form_vdf.cleaned_data['name']
new_visitor.phone= visitor_form_vdf.cleaned_data['phone']
new_visitor.date= datetime.datetime.now()
new_visitor.id= visitor_data.id
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
new_visitor.patient.set_value(visitor_form_vdf.cleaned_data['patient'])
new_visitor.save()
return redirect("Show_Visitor_Details")
else:
return render(request, 'AddVisitor.html', {"vdf": visitor_form_vdf, 'success': "Visitor Data is not "
"saved"})
visitor_vdf= VisitorForm( instance= visitor_data)
return render(request, 'AddVisitor.html', {"vdf": visitor_vdf})
def deleteVisitor(request,id):
visitor_data= Visitor.objects.get(pk= id)
visitor_data.delete()
return redirect('Show_Visitor_Details')
def showVisitor(request):
visitor_list_all= Visitor.objects.all()
page_wise= Paginator(visitor_list_all, per_page= 5, orphans= 2)
current_page= request.GET.get('page')
visitor_list= page_wise.get_page(current_page)
return render(request,'ShowVisitor.html',{'v_list': visitor_list })
@login_required
def staff(request):
if request.method == "POST":
staff_vdf= StaffForm(request.POST)
if staff_vdf.is_valid():
s= Staff()
s.name= staff_vdf.cleaned_data['name']
s.email= staff_vdf.cleaned_data['email']
s.salary= staff_vdf.cleaned_data['salary']
s.address= staff_vdf.cleaned_data['address']
s.gender= staff_vdf.cleaned_data['gender']
s.qualification= staff_vdf.cleaned_data['qualification']
s.designation= staff_vdf.cleaned_data['designation']
s.contact= staff_vdf.cleaned_data['contact']
s.join_date= staff_vdf.cleaned_data['join_date']
s.save()
s_vdf= StaffForm()
return render(request, 'AddStaff.html',{'vdf':s_vdf, 'success':"new staff data has successfully saved"})
else:
return render(request, 'AddStaff.html',{'vdf':staff_vdf, 'success':"new staff data has successfully saved"})
vdf= StaffForm()
return render(request, 'AddStaff.html',{'vdf':vdf, 'success':""})
def updateStaff(request,id):
staff_data= Staff.objects.get(pk= id)
if request.method == "POST":
staff_form_vdf= StaffForm(request.POST)
if staff_form_vdf.is_valid():
new_staff= Staff()
new_staff.name= staff_form_vdf.cleaned_data['name']
new_staff.email= staff_form_vdf.cleaned_data['email']
new_staff.salary= staff_form_vdf.cleaned_data['salary']
new_staff.address= staff_form_vdf.cleaned_data['address']
new_staff.gender= staff_form_vdf.cleaned_data['gender']
new_staff.qualification= staff_form_vdf.cleaned_data['qualification']
new_staff.designation= staff_form_vdf.cleaned_data['designation']
new_staff.contact= staff_form_vdf.cleaned_data['contact']
new_staff.join_date= staff_form_vdf.cleaned_data['join_date']
new_staff.s_id= staff_data.s_id
new_staff.save()
return redirect("Show_Staff_Details")
else:
return render(request, 'AddStaff.html', {"vdf": staff_form_vdf, 'success': "Staff Data is not "
"saved"})
staff_vdf= StaffForm( instance= staff_data)
return render(request, 'AddStaff.html', {"vdf": staff_vdf})
def deleteStaff(request,id):
staff_data= Staff.objects.get(pk= id)
staff_data.delete()
return redirect('Show_Staff_Details')
def showStaff(request):
staff_list_all= Staff.objects.all()
page_wise= Paginator(staff_list_all, per_page= 5, orphans= 2)
current_page= request.GET.get('page')
staff_list= page_wise.get_page(current_page)
return render(request,'ShowStaff.html',{'s_list': staff_list })
@login_required
def medicalStore(request):
if request.method == "POST":
m_store_vdf= MedicalStoreForm(request.POST)
if m_store_vdf.is_valid():
ms= MedicalStore()
ms.name= m_store_vdf.cleaned_data['name']
ms.type= m_store_vdf.cleaned_data['type']
ms.mfd_date= m_store_vdf.cleaned_data['mfd_date']
ms.exp_date= m_store_vdf.cleaned_data['exp_date']
ms.quantity= m_store_vdf.cleaned_data['quantity']
ms.buy_cost= m_store_vdf.cleaned_data['buy_cost']
ms.sell_cost= m_store_vdf.cleaned_data['sell_cost']
ms.save()
ms_vdf= MedicalStoreForm()
return render(request, 'AddMedicalStore.html',{'vdf':ms_vdf,'success':'New store item data has '
'successfully saved'})
else:
return render(request, 'AddMedicalStore.html', {'vdf': m_store_vdf, 'success': 'New store item data has '
'not saved'})
m_vdf= MedicalStoreForm()
return render(request, 'AddMedicalStore.html', {'vdf': m_vdf, 'success': ''})
def updateMedicalStore(request,id):
ms_data= MedicalStore.objects.get(pk= id)
if request.method == "POST":
ms_form_vdf= MedicalStoreForm(request.POST)
if ms_form_vdf.is_valid():
new_ms= MedicalStore()
new_ms.name= ms_form_vdf.cleaned_data['name']
new_ms.type= ms_form_vdf.cleaned_data['type']
new_ms.mfd_date= ms_form_vdf.cleaned_data['mfd_date']
new_ms.exp_date= ms_form_vdf.cleaned_data['exp_date']
new_ms.quantity= ms_form_vdf.cleaned_data['quantity']
new_ms.buy_cost= ms_form_vdf.cleaned_data['buy_cost']
new_ms.sell_cost= ms_form_vdf.cleaned_data['sell_cost']
new_ms.id= ms_data.id
new_ms.save()
return redirect("Show_Medical_Store_Details")
else:
return render(request, 'AddMedicalStore.html', {"vdf": ms_form_vdf, 'success': "MedicalStore Data is not "
"saved"})
ms_vdf= MedicalStoreForm( instance= ms_data)
return render(request, 'AddMedicalStore.html', {"vdf": ms_vdf})
def deleteMedicalStore(request,id):
ms_data= MedicalStore.objects.get(pk= id)
ms_data.delete()
return redirect('Show_Medical_Store_Details')
def showMedicalStore(request):
medical_store_list_all= MedicalStore.objects.all()
page_wise= Paginator(medical_store_list_all, per_page= 5, orphans= 2)
current_page= request.GET.get('page')
medical_store_list= page_wise.get_page(current_page)
return render(request,'ShowMedicalStore.html',{'ms_list': medical_store_list })
@login_required
def orderMedicine(request):
if request.method == "POST":
order_vdf= OrderMedicineForm(request.POST)
if order_vdf.is_valid():
order_vdf.save()
latest_payment= OrderMedicine.objects.latest('id')
request.session['payer_id']= latest_payment.id
request.session['first_name']= str(order_vdf.cleaned_data['name'])
request.session['contact_phone']= order_vdf.cleaned_data['phone']
request.session['amount']= str(order_vdf.cleaned_data['buy_cost'])
return redirect('Order_Payment_Process')
else:
return render(request, 'OrderMedicine.html', {'vdf': order_vdf,'success' :'Your order is not '
'successfully save'})
order= OrderMedicineForm()
return render(request, 'OrderMedicine.html',{'vdf': order})
def showOrderMedicine(request):
order_list_all= OrderMedicine.objects.all()
page= Paginator(order_list_all, per_page= 10, orphans= 3)
current_page= request.GET.get('page')
order_list= page.get_page(current_page)
return render(request,'ShowOrderMedicine.html',{"order_list": order_list})
def orderPaymentProcess(request):
host= request.get_host()
paypal_dict= {
'business': settings.PAYPAL_RECEIVER_EMAIL,
'name': str(request.session['first_name']),
'amount': request.session['amount'],
'phone': request.session['contact_phone'],
'receipt_no': str(request.session['payer_id']),
'item_name': 'Checkout #' + str(request.session['payer_id']),
'invoice': str(request.session['payer_id']),
'currency_code': 'INR',
'notify_url': 'http://{}{}'.format(host, reverse('paypal-ipn')),
'done_return': 'http://{}{}'.format(host, reverse('Payment_Done')),
'cancel_return': 'http://{}{}'.format(host, reverse('Payment_Cancelled')),
}
form= PayPalPaymentsForm(initial= paypal_dict)
return render(request, 'OrderPaymentProcess.html', {'form': form, 'paypal_dic': paypal_dict})
@csrf_exempt
def orderPaymentDone(request):
# get the details of the paypal
order_obj= OrderMedicine.objects.get(pk= request.session['payer_id'])
order_obj.payment_status= "Confirm"
order_obj.save()
del request.session['payer_id']
return render(request, 'OrderPaymentDone.html',{'pay': "Payment Successfully done"} )
@csrf_exempt
def orderPaymentCancelled(request):
# Delete the order details
order_obj= OrderMedicine.objects.get(pk= request.session['payer_id'])
# order_obj.delete()
order_obj.payment_status= "Pending"
order_obj.save()
del request.session['payer_id']
return render(request, 'OrderPaymentCancelled.html',{'pay': "payment not done"} )
@login_required
def showHome(request):
return render(request, "Home.html", {'title': "Hospital Management System- Home page"})
def showContactDetails(request):
return render(request,'ContactDetails.html',{})
def adminForm(request):
if request.method == 'POST':
admin_form_vdf= AdminForm(request.POST)
if admin_form_vdf.is_valid():
# create aan object for User model for authentications
# this function save dataa to the database automatically . there is no need to use save() function
User.objects.create_user(
username= admin_form_vdf.cleaned_data['email_id'],
first_name= admin_form_vdf.cleaned_data['name'],
email= admin_form_vdf.cleaned_data['email_id'],
password= admin_form_vdf.cleaned_data['password']
)
admin_vdf= AdminForm()
return render(request, "AdminForm.html", {'vdf': admin_vdf,'success':"you are successfully register"})
else:
return render(request, "AdminForm.html", {'vdf': admin_form_vdf,'success':"you are not successfully register"})
admin_vdf= AdminForm()
return render(request, "AdminForm.html", {'vdf': admin_vdf,'title':'registration Page'})
@login_required
def showAdmin(request):
admin_list_all= Admin.objects.all()
pages= Paginator(admin_list_all,per_page= 10,orphans= 3)
current_page= request.GET.get('page')
admin_list= pages.get_page(current_page)
return render(request,'ShowAdmin.html',{'admin_list': admin_list, 'title' :'Admin list'})
def bloodBank(request):
if request.method == "POST":
blood_bank_vdf = BloodBankForm(request.POST)
if blood_bank_vdf.is_valid():
bb = BloodBankDetails()
bb.name = blood_bank_vdf.cleaned_data['name']
bb.d_o_b = blood_bank_vdf.cleaned_data['d_o_b']
bb.blood_group = blood_bank_vdf.cleaned_data['blood_group']
bb.gender = blood_bank_vdf.cleaned_data['gender']
bb.patient_type = blood_bank_vdf.cleaned_data['patient_type']
bb.phone = blood_bank_vdf.cleaned_data['phone']
bb.email = blood_bank_vdf.cleaned_data['email']
bb.address = blood_bank_vdf.cleaned_data['address']
bb.date = blood_bank_vdf.cleaned_data['date']
bb.feedback = blood_bank_vdf.cleaned_data['feedback']
bb.save()
blood_vdf = BloodBankForm()
return render(request, 'BloodBankForm.html', {'vdf': blood_vdf,'success':"Your data has successfully "
"save"})
else:
return render(request, 'BloodBankForm.html',{'vdf': blood_bank_vdf,'success':'Your data has not saved'})
bb_vdf = BloodBankForm()
return render(request, 'BloodBankForm.html',{'vdf': bb_vdf})
def updateBloodBank(request,id):
blood_bank_date = BloodBankDetails.objects.get(pk = id)
if request.method == "POST":
blood_bank_vdf = BloodBankForm(request.POST)
if blood_bank_vdf.is_valid():
new_bb = BloodBankDetails()
new_bb.name = blood_bank_vdf.cleaned_data['name']
new_bb.d_o_b = blood_bank_vdf.cleaned_data['d_o_b']
new_bb.blood_group = blood_bank_vdf.cleaned_data['blood_group']
new_bb.gender = blood_bank_vdf.cleaned_data['gender']
new_bb.patient_type = blood_bank_vdf.cleaned_data['patient_type']
new_bb.phone = blood_bank_vdf.cleaned_data['phone']
new_bb.email = blood_bank_vdf.cleaned_data['email']
new_bb.address = blood_bank_vdf.cleaned_data['address']
new_bb.date = blood_bank_vdf.cleaned_data['date']
new_bb.feedback = blood_bank_vdf.cleaned_data['feedback']
new_bb.id = blood_bank_date.id
new_bb.save()
return redirect('Show_Blood_Bank_Details')
else:
return render(request, 'BloodBankForm.html',{'vdf': blood_bank_vdf,'success':'Your data has not '
'updated'})
bb_vdf = BloodBankForm(instance=blood_bank_date)
return render(request, 'BloodBankForm.html',{'vdf': bb_vdf})
def deleteBloodBank(request,id):
bb_data= BloodBankDetails.objects.get(pk= id)
bb_data.delete()
return redirect('Show_Blood_Bank_Details')
def showBloodBank(request):
blood_bank_list_all = BloodBankDetails.objects.all()
page_wise= Paginator(blood_bank_list_all, per_page= 5, orphans= 2)
current_page= request.GET.get('page')
blood_bank_list= page_wise.get_page(current_page)
return render(request,'ShowBloodBank.html',{'blood_bank_list': blood_bank_list })
| [
"noreply@github.com"
] | ashishchauhan1234.noreply@github.com |
48cebb7680cd04cef78846c1efb655e8e84e8dbb | 37433418676b2f1cd0551b5abb27de44d36ca883 | /invoice_module.py | dee8e65a246ac09dd560a0fe271727a89053e7cb | [] | no_license | aarcha123/pyplayground | 3c00c2cfb3952cd28f8bda64e1eb2496d80128c9 | 7f93735c2114c5f4508cb0f3d506531ddc09811d | refs/heads/master | 2020-03-19T05:54:13.871139 | 2018-06-27T19:14:36 | 2018-06-27T19:14:36 | 135,972,736 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,273 | py | import sys,getopt
import pandas as pd
import numpy as np
from sklearn import model_selection
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
from sklearn.feature_selection import RFE
import math
import pickle
lr_model_file = 'lr_model.sav'
model = None
X_test = None
Y_test = None
cust_dict = None
cust_avg_settled = None
def main(argv):
trainfile=''
testfile=''
datafile=''
try:
opts, args = getopt.getopt(argv, "ht:e:p", ["train=", "test=","predict="])
except getopt.GetoptError:
print('test.py -t <traindata> -v <validatedata> -p <predictdata>')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('test.py -t <traindata> -v <validatedata> -p <predictdata>')
sys.exit()
elif opt in ("-t", "--train"):
train(arg)
elif opt in ("-e", "--test"):
testfile = arg
elif opt in ("-p", "--predict"):
predict(arg)
def train(trainfile):
global model
dataset = pd.read_csv(trainfile)
dataset_new = extract_features(dataset)
array = dataset_new.values
n = len(dataset_new.columns)
X = array[:, 0:n - 1]
Y = array[:, n - 1]
seed = 7
X_train, X_rest, Y_train, Y_rest = model_selection.train_test_split(X, Y, test_size=0.40, random_state=seed)
X_validation, X_test, Y_validation, Y_test = model_selection.train_test_split(X_rest, Y_rest, test_size=0.50,random_state=seed)
lm = LinearRegression()
model = lm
lm.fit(X_train, Y_train)
print("done training...")
model_stats(lm,X_validation,Y_validation)
pickle.dump(lm, open(lr_model_file, 'wb'))
print("model saved")
return lm
def model_stats(lm,X_validation, Y_validation):
print("score= ", lm.score(X_validation, Y_validation))
y_predict = lm.predict(X_validation)
regression_model_mse = mean_squared_error(y_predict, Y_validation)
print("regression rmse:", math.sqrt(regression_model_mse))
def extract_features(dataset):
global cust_avg_settled
global cust_dict
grouped = dataset.groupby('customerID', as_index=False)
invoice_count = grouped.agg({"invoiceNumber": "count"})
invoice_count.columns = ['customerID', 'total']
custlist = invoice_count['customerID'].tolist()
cust_dict = {x: custlist.index(x) for x in custlist}
df = pd.DataFrame(list(cust_dict.items()), columns=['customerID', 'code'])
df.to_csv("customer_map.csv", index=0)
settled_days_avg = grouped.agg({'DaysToSettle': 'mean'})
settled_days_avg.columns = ['customerID', 'avgDaysToSettle']
settled_days_avg.to_csv("avg_days.csv", index=0)
cust_avg_settled = pd.Series(settled_days_avg.avgDaysToSettle.values, index=settled_days_avg.customerID).to_dict()
dataset_enriched = calc_features(dataset)
return dataset_enriched
def calc_features(dataset):
global cust_avg_settled
global cust_dict
dataset['invoicemonth'] = pd.to_datetime(dataset['InvoiceDate']).dt.month
dataset['invoicedate'] = pd.to_datetime(dataset['InvoiceDate']).dt.day
dataset['invoiceday'] = pd.to_datetime(dataset['InvoiceDate']).dt.weekday
dataset['monthend'] = np.where(dataset['invoicedate'] > 27, 1, 0)
dataset['firsthalfmonth'] = np.where(dataset['invoicedate'] < 16, 1, 0)
paperless = {'Paper': 0, 'Electronic': 1}
dataset['paperless'] = dataset['PaperlessBill'].map(paperless)
if cust_avg_settled is None:
cust_avg_df = pd.read_csv('avg_days.csv')
cust_avg_settled = pd.Series(cust_avg_df.avgDaysToSettle.values, index=cust_avg_df.customerID).to_dict()
dataset['avgDaysToSettle'] = dataset['customerID'].map(cust_avg_settled)
if cust_dict is None:
cust_map_df = pd.read_csv('customer_map.csv')
cust_dict = pd.Series(cust_map_df.code.values, index=cust_map_df.customerID).to_dict()
dataset['cust'] = dataset['customerID'].map(cust_dict)
dataset_final = dataset[['cust', 'InvoiceAmount', 'invoicemonth', 'monthend', 'firsthalfmonth', 'paperless', 'avgDaysToSettle','DaysToSettle']]
cols = dataset_final.columns
dataset_final[cols] = dataset_final[cols].apply(pd.to_numeric)
return dataset_final
def auto_extract_feature(X_train,Y_train):
rfe = RFE(model, 4)
fit = rfe.fit(X_train, Y_train)
print("Num Features: %d" % fit.n_features_)
print("Selected Features: %s" % fit.support_)
print("Feature Ranking: %s" % fit.ranking_)
def file_to_array(filename):
invoice_data = pd.read_csv(filename)
invoice_data_enriched = calc_features(invoice_data)
array = invoice_data_enriched.values
n = len(invoice_data_enriched.columns)
X = array[:, 0:n - 1]
return X
def predict(datafile):
invoice_data = pd.read_csv(datafile)
invoice_data_enriched = calc_features(invoice_data)
array = invoice_data_enriched.values
n = len(invoice_data_enriched.columns)
x_value = array[:, 0:n - 1]
loaded_model = pickle.load(open(lr_model_file, 'rb'))
y_value = loaded_model.predict(x_value)
print("prediction: ")
print(y_value)
invoice_data['predicted'] = y_value
print(invoice_data.head(1))
def to_json():
print('json')
if __name__ == "__main__":
main(sys.argv[1:]) | [
"aarcha.nair@gmail.com"
] | aarcha.nair@gmail.com |
6e741735016928b167410e4c77d3bc6b0f51ca10 | c86c05e79075ef9bfb37f99d94adc1f706a1569c | /tesdloo/src/KMP.py | f8bb4e5a9c766136619c811fe9a37315a4e54147 | [] | no_license | arfaghif/Word-Searching-on-file---flask | a258ab032f96165ed19a2a035319dd4cfadcb541 | 06b34fde5a4e623df9acb8af78d7cfdf489fc11f | refs/heads/master | 2022-04-21T17:21:06.171273 | 2020-04-22T05:19:36 | 2020-04-22T05:19:36 | 257,787,586 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,086 | py | def KMPSearch(pat, txt):
M = len(pat)
N = len(txt)
lps = findArrayLPS(pat) #cai lpsnya terlebih dahulu
j = 0 #index penggeseran pat
i = 0 # index penggeseran txt
while i < N:
if pat[j].lower() == txt[i].lower():
if(j==M-1):#sudah menemukan yang sesuai
return True
i += 1
j += 1
elif(j>0):
j = lps[j-1]
else:
i +=1
return False
def findArrayLPS(pat):
#res adalah array yang akan menjadi hasil
length = len(pat)
res = [0 for i in range (length)]
val = 0
i = 1
while( i< length ):
if(pat[i].lower() == pat[val].lower()):#tidak mempedulinan case
val += 1
res[i] = val
i += 1
elif val>0:
#lihat kecocokan yang sebelumnya berdasarkan nilai lps
val = res[val-1]
else:
i+=1
return res
# txt = "ABABDABACDABABCABAB"
# pat = "AAACCAAACA"
# pat1 = "ababcaBaB"
# KMPSearch(pat, txt)
# KMPSearch(pat1, txt)
| [
"52845090+arfaghif@users.noreply.github.com"
] | 52845090+arfaghif@users.noreply.github.com |
ecc765eb41a1d612d5995a7ce9f399159e82aa23 | 7c5fb33929116bb77b438de3ead93b3978b5af71 | /alf/examples/ppo_rnd_mrevenge_conf.py | 0bc0a4ca8c4e71621119b9eb61da912539881928 | [
"Apache-2.0"
] | permissive | HorizonRobotics/alf | d6dac891322a81ccb7e2a9749139627b1eda28cb | b00ff2fa5e660de31020338ba340263183fbeaa4 | refs/heads/pytorch | 2023-08-21T18:51:41.370566 | 2023-08-16T00:07:22 | 2023-08-16T00:07:22 | 178,459,453 | 288 | 57 | Apache-2.0 | 2023-09-14T20:40:20 | 2019-03-29T18:44:07 | Python | UTF-8 | Python | false | false | 3,875 | py | # Copyright (c) 2021 Horizon Robotics and ALF Contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration to train on Atari Game Montezuma's Revenge with PPO + RND exploration.
NOTE: Currently this configuration only achieves around 3000 (reward) instead of
around 6000 as it used to be.
TODO: Tune the paramters to make it achieving 6000 or better reward again.
"""
import alf
import torch
import functools
# from alf.algorithms.actor_critic_algorithm import ActorCriticAlgorithm
from alf.algorithms.rnd_algorithm import RNDAlgorithm
from alf.networks import EncodingNetwork, ActorDistributionNetwork, CategoricalProjectionNetwork, ValueNetwork
from alf.tensor_specs import TensorSpec
from alf.examples import ppo_conf
from alf.examples import atari_conf
alf.config('DMAtariPreprocessing', noop_max=0)
# From OpenAI gym wiki:
#
# "v0 vs v4: v0 has repeat_action_probability of 0.25
# (meaning 25% of the time the previous action will be used instead of the new action),
# while v4 has 0 (always follow your issued action)
# Because we already implements frame_skip in AtariPreprocessing, we should always
# use 'NoFrameSkip' Atari environments from OpenAI gym
alf.config(
'create_environment',
env_name='MontezumaRevengeNoFrameskip-v0',
num_parallel_environments=128)
# RND config
KEEP_STACKED_FRAMES = 1
EMBEDDING_DIM = 1000
alf.config(
'RNDAlgorithm',
encoder_net=EncodingNetwork(
activation=torch.tanh,
input_tensor_spec=TensorSpec(shape=(KEEP_STACKED_FRAMES, 84, 84)),
conv_layer_params=((64, 5, 5), (64, 2, 2), (64, 2, 2))),
target_net=EncodingNetwork(
activation=torch.tanh,
input_tensor_spec=TensorSpec(shape=(1024, )),
fc_layer_params=(300, 400, 500, EMBEDDING_DIM)),
predictor_net=EncodingNetwork(
activation=torch.tanh,
input_tensor_spec=TensorSpec(shape=(1024, )),
fc_layer_params=(300, 400, 500, EMBEDDING_DIM)),
optimizer=alf.optimizers.AdamTF(lr=4e-5),
keep_stacked_frames=KEEP_STACKED_FRAMES)
alf.config(
'Agent',
extrinsic_reward_coef=1.0,
intrinsic_reward_module=RNDAlgorithm(),
intrinsic_reward_coef=1e-3,
optimizer=alf.optimizers.AdamTF(lr=1e-4))
alf.config('PPOLoss', entropy_regularization=0.01)
# Neural Network Configuration
CONV_LAYER_PARAMS = ((32, 8, 4), (64, 4, 2), (64, 3, 1))
FC_LAYER_PARAMS = (512, 512)
actor_network_cls = functools.partial(
ActorDistributionNetwork,
fc_layer_params=FC_LAYER_PARAMS,
conv_layer_params=CONV_LAYER_PARAMS,
discrete_projection_net_ctor=CategoricalProjectionNetwork)
alf.config('CategoricalProjectionNetwork', logits_init_output_factor=1e-10)
value_network_cls = functools.partial(
ValueNetwork,
fc_layer_params=FC_LAYER_PARAMS,
conv_layer_params=CONV_LAYER_PARAMS)
alf.config(
'ActorCriticAlgorithm',
actor_network_ctor=actor_network_cls,
value_network_ctor=value_network_cls)
alf.config(
'TrainerConfig',
num_updates_per_train_iter=6,
unroll_length=32,
mini_batch_length=1,
mini_batch_size=1024,
num_iterations=0,
num_env_steps=50000000, # = 200M frames / 4 (frame_skip)
debug_summaries=True,
summarize_grads_and_vars=False,
summary_interval=100,
num_checkpoints=10,
use_rollout_state=True,
update_counter_every_mini_batch=True)
| [
"noreply@github.com"
] | HorizonRobotics.noreply@github.com |
9ae571a71c56c78f4543371dd21be8418092fe0a | 3d19e1a316de4d6d96471c64332fff7acfaf1308 | /Users/P/pannaahmed/ddd-localharvest.py | 4885b26e593534bda9b9eaa3fe5349672326a96e | [] | no_license | BerilBBJ/scraperwiki-scraper-vault | 4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc | 65ea6a943cc348a9caf3782b900b36446f7e137d | refs/heads/master | 2021-12-02T23:55:58.481210 | 2013-09-30T17:02:59 | 2013-09-30T17:02:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,276 | py | import re;
def remove_html(string):
return re.sub("<.*?>", "", string)
def clean(string):
return safestr(final_clean(strip_non_text(remove_html(string.strip()))))
def strip_non_text(string):
return re.sub("\n|\r|&\w{3};|<.*?>",",",string)
def final_clean(string):
return re.sub("[, ]{2,10}", ",", string)
def split_and_clean(string, delim):
return ",".join(re.sub("[^a-zA-Z ]", "", rec, re.I) for rec in [clean(rec) for rec in string.split(delim) if rec.strip() != ""])
SAFESTR_RX = re.compile("^u\'(.+)\'$")
def safestr(string):
try:
return english_string(string).encode('utf-8', 'replace')
except:
return re.sub(SAFESTR_RX, '\1', repr(string))
import scraperwiki
import lxml.html
import re
domaine = 'http://www.localharvest.org'
s_url = 'http://www.localharvest.org/store'
scraperwiki.sqlite.save_var("source", "localharvest.org")
scraperwiki.sqlite.save_var("author", "Panna Ahmed")
def scrape_site(start_url, domaine):
print start_url
html_content = scraperwiki.scrape(domaine, None, 'Mozilla/5.0 (Windows NT 6.1; rv:15.0) Gecko/20100101 Firefox/15.0.1')
p_num=1
r_num=0
#while True:
root = lxml.html.fromstring(html_content)
print root
#data_list = root.cssselect('div[id="col1_content"] div')
def scrape_info(comp_link, num):
my_data = []
my_data.append(('id', num))
my_data.append(('sourceurl', comp_link))
for attempt in range(5):
try:
html_content = scraperwiki.scrape(comp_link)
break
except:
pass
root = lxml.html.fromstring(html_content)
div = root.cssselect('div[id="col1_content"] ')[0]
data_list = div.text_content().split('\r\n')
print data_list
adress = data_list[4]+data_list[5]+data_list[6]
my_data.append(('companyname', data_list[3]))
my_data.append(('address', adress))
for data in data_list[6:]:
if re.search('Phone:',data):
#print data
contacts = data.split(':')
try:
my_data.append(('country', contacts[0][:-5]))
my_data.append(('phonenumber', contacts[1][:-3]))
my_data.append(('faxnumber', contacts[2][:-7]))
if contacts[3].split()[0] != 'Products':
if '@' in contacts[3].split()[0]:
my_data.append(('emails', contacts[3].split()[1][:-8]))
else:
my_data.append(('contact1first', contacts[3].split()[0]))
my_data.append(('contact1last', contacts[3].split()[1][:-8]))
except:
pass
m_cats=''
m_categories = div.cssselect('li span.toplevelcategory')
for m_cat in m_categories:
m_cats=m_cats+', '+m_cat.text
my_data.append(('maincategory', m_cats[2:]))
categories = div.cssselect('li span')
cats=''
for cat in categories:
cats=cats+', '+cat.text
print cats
my_data.append(('categories', cats[2:]))
print my_data
scraperwiki.sqlite.save(unique_keys=['companyname'], data=dict(my_data))
scrape_site(s_url, domaine)
import re;
def remove_html(string):
return re.sub("<.*?>", "", string)
def clean(string):
return safestr(final_clean(strip_non_text(remove_html(string.strip()))))
def strip_non_text(string):
return re.sub("\n|\r|&\w{3};|<.*?>",",",string)
def final_clean(string):
return re.sub("[, ]{2,10}", ",", string)
def split_and_clean(string, delim):
return ",".join(re.sub("[^a-zA-Z ]", "", rec, re.I) for rec in [clean(rec) for rec in string.split(delim) if rec.strip() != ""])
SAFESTR_RX = re.compile("^u\'(.+)\'$")
def safestr(string):
try:
return english_string(string).encode('utf-8', 'replace')
except:
return re.sub(SAFESTR_RX, '\1', repr(string))
import scraperwiki
import lxml.html
import re
domaine = 'http://www.localharvest.org'
s_url = 'http://www.localharvest.org/store'
scraperwiki.sqlite.save_var("source", "localharvest.org")
scraperwiki.sqlite.save_var("author", "Panna Ahmed")
def scrape_site(start_url, domaine):
print start_url
html_content = scraperwiki.scrape(domaine, None, 'Mozilla/5.0 (Windows NT 6.1; rv:15.0) Gecko/20100101 Firefox/15.0.1')
p_num=1
r_num=0
#while True:
root = lxml.html.fromstring(html_content)
print root
#data_list = root.cssselect('div[id="col1_content"] div')
def scrape_info(comp_link, num):
my_data = []
my_data.append(('id', num))
my_data.append(('sourceurl', comp_link))
for attempt in range(5):
try:
html_content = scraperwiki.scrape(comp_link)
break
except:
pass
root = lxml.html.fromstring(html_content)
div = root.cssselect('div[id="col1_content"] ')[0]
data_list = div.text_content().split('\r\n')
print data_list
adress = data_list[4]+data_list[5]+data_list[6]
my_data.append(('companyname', data_list[3]))
my_data.append(('address', adress))
for data in data_list[6:]:
if re.search('Phone:',data):
#print data
contacts = data.split(':')
try:
my_data.append(('country', contacts[0][:-5]))
my_data.append(('phonenumber', contacts[1][:-3]))
my_data.append(('faxnumber', contacts[2][:-7]))
if contacts[3].split()[0] != 'Products':
if '@' in contacts[3].split()[0]:
my_data.append(('emails', contacts[3].split()[1][:-8]))
else:
my_data.append(('contact1first', contacts[3].split()[0]))
my_data.append(('contact1last', contacts[3].split()[1][:-8]))
except:
pass
m_cats=''
m_categories = div.cssselect('li span.toplevelcategory')
for m_cat in m_categories:
m_cats=m_cats+', '+m_cat.text
my_data.append(('maincategory', m_cats[2:]))
categories = div.cssselect('li span')
cats=''
for cat in categories:
cats=cats+', '+cat.text
print cats
my_data.append(('categories', cats[2:]))
print my_data
scraperwiki.sqlite.save(unique_keys=['companyname'], data=dict(my_data))
scrape_site(s_url, domaine)
| [
"pallih@kaninka.net"
] | pallih@kaninka.net |
223113bd3170bdf050e52260e3ebcd3425d47b41 | 3a28b1a12d0710c06f6360381ad8be6cf3707907 | /modular_model/triHPC/triHPCThermo/HPCAllTrays23CstmLiqO2.py | f0d1e9e2b4629bdff6dd6da191e1906279a26d37 | [] | no_license | WheatZhang/DynamicModelling | 6ce1d71d3b55176fd4d77a6aedbaf87e25ce4d02 | ea099245135fe73e8c9590502b9c8b87768cb165 | refs/heads/master | 2020-06-15T14:12:50.373047 | 2019-07-05T01:37:06 | 2019-07-05T01:37:06 | 195,319,788 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 307 | py | def LiqO2(P,T,x_N2):
x = (P-5.50184878e+02)/3.71707400e-01
y = (T--1.77763832e+02)/1.81029000e-02
z = (x_N2-9.82399475e-01)/2.41594433e-03
output = \
1*-8.96045078e+00+\
z*9.17331761e+00+\
y*9.40865318e+00+\
x*-4.59810291e+00
x_O2 = output*9.91253363e-04+3.46966193e-03
return x_O2 | [
"1052632241@qq.com"
] | 1052632241@qq.com |
00697132bb7c42e21763aab73b74b5b170bfc996 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03221/s034094237.py | b644de8c875a571cb205b5a1bf2d7b195b08966a | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 362 | py | from collections import defaultdict
N, M = map(int, input().split())
IPY = []
for i in range(1, M + 1):
p, y = map(int, input().split())
IPY.append((i, p, y))
IPY.sort(key=lambda ipy: ipy[2])
ids = {}
nums = defaultdict(int)
for i, p, y in IPY:
nums[p] += 1
ids[i] = '{:06d}{:06d}'.format(p, nums[p])
for i in range(1, M + 1):
print(ids[i]) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
6b45be762da585386e265b8716c3bad5e87ae5fe | 39a9bfc535f798602a83ea73df7eb61f4508a433 | /Simple_file_encryption.py | 0158e70e27b9980fb23aa75aaacafc49b852c735 | [] | no_license | jalaj-pathak/FIle_encryptor_and_hash_calculator | 6f2707c49bacf1b73b08ca5329b0b118955d1057 | 6ef4a3891a0dc82fb6043ac52efab8032d9c72a4 | refs/heads/main | 2023-07-19T00:11:12.220231 | 2021-09-11T17:26:16 | 2021-09-11T17:26:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,389 | py | import os
import os.path
import hashlib
def encrypt_file():
filename = input("Enter the path to the file: ")
key = int(input("Enter the key: "))
if (os.path.isfile(filename)):
with open(filename, 'r') as file:
result = ''
lines = file.read()
for line in lines:
for i in range(len(line)):
letter = line[i]
try:
if (line.isupper()):
result += chr((ord(letter) + int(key) - 65) % 26 + 65)
elif (line.islower()):
result += chr((ord(letter) + int(key) - 97) % 26 + 97)
except:
print("[-] Could not encrypt. Something went wrong\n")
file.close()
with open('encryptedfile.txt', 'w') as f:
f.writelines(result)
print("[+] File encrypted\n")
else:
print("[-] File not found. Enter the correct path and try again.\n")
encrypt_file()
def encrypt_text():
plaintext = input("[+] Enter the plaintext: ")
key = int(input("Enter the key: "))
result = ''
for i in range(len(plaintext)):
letter = plaintext[i]
try:
if (letter.isupper()):
result += chr((ord(letter) + int(key) - 65) % 26 + 65)
elif (letter.islower()):
result += chr((ord(letter) + int(key) - 97) % 26 + 97)
except:
print ("[-] Could not encrypt. Invalid plaintext!!\n")
return result
def hash_file():
filename = input("[+] Enter the path to the file: ")
choice = input("[+] Enter the hashing algorithm: ")
hash = ''
try:
match choice:
case 'md5':
with open(filename, 'r') as file:
content = file.read()
file.close()
hash = hashlib.md5(content.encode())
print(hash.hexdigest())
case 'sha1':
with open(filename, 'r') as file:
content = file.read()
file.close()
hash = hashlib.sha1(content.encode())
print(hash.hexdigest())
case 'sha256':
with open(filename, 'r') as file:
content = file.read()
file.close()
hash = hashlib.sha256(content.encode())
print(hash.hexdigest())
except:
print("[-] Something went wrong. Please try again")
hash_file()
if __name__ == '__main__':
while True:
print(
'''
[1] Encrypt file
[2] Encrypt text
[3] Calculate hash of a file
''')
Choice = input()
match Choice:
case '1':
encrypt_file()
case '2':
print(encrypt_text())
case '3':
hash_file()
case _:
print("Something went wrong")
continue
is_end = input("Do something else (y/n): ")
if (is_end == 'n'):
break
| [
"noreply@github.com"
] | jalaj-pathak.noreply@github.com |
ed9ad77c25f12d502802ce77047e6aa0de6946c7 | 07539ecbcee0488ce4a0eb779583da3149cfac7b | /amonone/web/apps/settings/views/servers.py | de80a5b486c46ee332a81ed6ad1aa596bd63e64c | [
"MIT"
] | permissive | outbounder/amonone | e151584ac38222b40c314d586ebadc4e0f43fce1 | 985fa147c1d98a4f57ff33ebd37ca0d938fe674d | refs/heads/master | 2020-12-25T13:33:46.425826 | 2013-07-11T08:54:32 | 2013-07-11T08:54:32 | 11,389,227 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,224 | py | from formencode.validators import Invalid as InvalidForm
from tornado.web import authenticated
from amonone.web.apps.core.baseview import BaseView
from amonone.web.apps.alerts.models import alerts_model, alerts_group_model
from amonone.web.apps.core.models import server_model
from amonone.web.apps.settings.forms import ServerForm
class ServersBaseView(BaseView):
def initialize(self):
self.current_page = 'settings:servers'
super(ServersBaseView, self).initialize()
class ServersView(ServersBaseView):
@authenticated
def get(self):
errors = self.session.get('errors',None)
all_servers = server_model.get_all()
servers = []
if all_servers:
for server in all_servers.clone():
alert_group = server.get('alert_group', None)
server['alert_group'] = alerts_group_model.get_by_id(alert_group)
servers.append(server)
self.render('settings/servers/view.html',
servers=servers)
class ServersDeleteView(ServersBaseView):
@authenticated
def get(self, param=None):
server = server_model.get_by_id(param)
alerts_model.delete_server_alerts(param)
server_model.delete(param)
self.redirect(self.reverse_url('settings_servers'))
class ServersUpdateView(ServersBaseView):
@authenticated
def get(self, param=None):
errors = self.session.get('errors',None)
server = server_model.get_by_id(param)
groups = alerts_group_model.get_all()
self.delete_session_key('errors')
self.render('settings/servers/edit.html',
server=server,
groups=groups,
errors=errors)
@authenticated
def post(self, param=None):
self.check_xsrf_cookie()
form_data = {
"name": self.get_argument('name', ''),
"notes": self.get_argument('notes', ''),
"alert_group": self.get_argument('alert_group', ''),
}
try:
valid_data = ServerForm.to_python(form_data)
server_model.update(valid_data, param)
self.delete_session_key('errors')
self.delete_session_key('form_data')
self.redirect(self.reverse_url('settings_servers'))
except InvalidForm, e:
self.session['errors'] = e.unpack_errors()
self.session['form_data'] = form_data
self.redirect(self.reverse_url('update_server', param))
class ServersAddView(ServersBaseView):
@authenticated
def get(self):
errors = self.session.get('errors',None)
form_data = self.session.get('form_data',None)
groups = alerts_group_model.get_all()
self.delete_session_key('errors')
self.render('settings/servers/add.html',
groups=groups,
errors=errors,
form_data=form_data)
@authenticated
def post(self):
self.check_xsrf_cookie()
form_data = {
"name": self.get_argument('name', ''),
"notes": self.get_argument('notes', ''),
"alert_group": self.get_argument('alert_group', ''),
}
try:
valid_data = ServerForm.to_python(form_data)
server_model.add(valid_data['name'],
valid_data['notes'],
valid_data['alert_group'])
self.delete_session_key('errors')
self.delete_session_key('form_data')
self.redirect(self.reverse_url('settings_servers'))
except InvalidForm, e:
self.session['errors'] = e.unpack_errors()
self.session['form_data'] = form_data
self.redirect(self.reverse_url('settings_servers_add')) | [
"martinrusev@zoho.com"
] | martinrusev@zoho.com |
1a5d2b15ad5333a6df92322bd5e48acfc8d40367 | 598b71f99a13a6a124d30617157e1a84dacacaf3 | /deploy/urls.py | ada8e0ee3d2aa8452b76cb9ce3883971c1747fd3 | [] | no_license | Ashutosh1997/iris-flower-reg | 1cada2f547278c1df677777d4e88a502b5777749 | c2c2f0acefc135011383fcfd95fdcd3d27a14913 | refs/heads/main | 2023-03-30T17:31:02.172808 | 2021-03-24T15:47:33 | 2021-03-24T15:47:33 | 351,132,085 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 862 | py | """deploy URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from deploy import views
urlpatterns = [
path('admin/', admin.site.urls),
path('', views.home, name='home'),
path('result/', views.result, name='result'),
]
| [
"Ritikmshr1@gmail.com"
] | Ritikmshr1@gmail.com |
10c688c854546fc06fce743d8e6fa86f451a8964 | 4a8bfa3407aa98a04ede3162f85467b1b5012fe7 | /tests/test_api/test_methods/test_send_document.py | 69fbd458e86e5b98614e54eb0e7f81a108d9f7d2 | [] | no_license | aiogram/tg-codegen | 07ec80814eec46f464d2490fd27b7b6b27257f1b | ba3c2f893591d45dda418dd16e0646e260afdf14 | refs/heads/master | 2022-12-09T10:44:10.781570 | 2021-11-07T23:33:25 | 2021-11-07T23:33:25 | 218,523,371 | 24 | 5 | null | 2022-12-08T08:47:43 | 2019-10-30T12:33:21 | Python | UTF-8 | Python | false | false | 1,021 | py | import pytest
from aiogram.api.methods import Request, SendDocument
from tests.mocked_bot import MockedBot
@pytest.mark.skip
class TestSendDocument:
@pytest.mark.asyncio
async def test_method(self, bot: MockedBot):
prepare_result = bot.add_result_for(SendDocument, ok=True, result=None)
response: Message = await SendDocument(
chat_id=..., document=...,
)
request: Request = bot.get_request()
assert request.method == "sendDocument"
# assert request.data == {}
assert response == prepare_result.result
@pytest.mark.asyncio
async def test_bot_method(self, bot: MockedBot):
prepare_result = bot.add_result_for(SendDocument, ok=True, result=None)
response: Message = await bot.send_document(
chat_id=..., document=...,
)
request: Request = bot.get_request()
assert request.method == "sendDocument"
# assert request.data == {}
assert response == prepare_result.result
| [
"jroot.junior@gmail.com"
] | jroot.junior@gmail.com |
2b85533b105f783711b789a2daccbab5b3cc2db7 | 813cd4c92fd1257bb26d214d49cf3083ddd57ebe | /dashboard/model/flask_utils.py | 4e2bc23bc62381f779dcb03d1b380fa2bf272396 | [] | no_license | vkous/simple_dashboard | 7d204aba10d8714ba5cc2e49e15cb17b047376b9 | 9e2718c98af2a7449eb0d39b8b469e2d78d2e244 | refs/heads/main | 2023-02-12T01:34:03.716322 | 2021-01-17T23:10:28 | 2021-01-17T23:10:28 | 326,380,090 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,285 | py | from flask import(
current_app as app,
request,
url_for,
redirect,
render_template,
session,
g
)
import os
import json
def redirect_to_last_page():
last_page_url = request.referrer
if request.referrer is not None :
if (len(request.referrer.split('?')) == 1) | (request.referrer is None) :
return redirect(url_for('index'))
else:
return redirect(request.referrer)
else:
print('redirect to last page : no last page')
return redirect(url_for('index'))
def force_update():
session['delta_mins'] = 1
return redirect_to_last_page()
def reinitialize_session_delta_mins():
session['delta_mins'] = 15
def check_delta_mins():
if session.get('delta_mins') is None:
reinitialize_session_delta_mins()
if request.args.get('update') is not None:
return force_update()
def return_template_index_page():
if not ('latitude' in session) & ('longitude' in session):
_latitude = None
_longitude = None
else:
_latitude = session['latitude']
_longitude = session['longitude']
return render_template(
'dashboard.html',
basic_content_boolean=True,
latitude = _latitude,
longitude= _longitude) | [
"vk@vk.com"
] | vk@vk.com |
74649ca75f11649222b2dec6cad27c8abea4cc22 | 8feba89f89258b51d53914d060b384aceaa4f6d3 | /09.ml/svm_mnist_hog_train.py | 22bdbc3936f787911175fddbb473657e8cb6536b | [] | no_license | dltpdn/insightbook.opencv_project_python | e35f2738da5ea07d4b8efe0bbb759e4e4f4bcd6c | 4f62bc4d084acf575c9f6bfa6d429772072a09bc | refs/heads/master | 2022-08-20T14:54:51.296781 | 2022-07-31T04:24:56 | 2022-07-31T04:24:56 | 174,243,667 | 64 | 65 | null | null | null | null | UTF-8 | Python | false | false | 2,288 | py | import cv2
import numpy as np
import mnist
import time
# 기울어진 숫자를 바로 세우기 위한 함수 ---①
affine_flags = cv2.WARP_INVERSE_MAP|cv2.INTER_LINEAR
def deskew(img):
m = cv2.moments(img)
if abs(m['mu02']) < 1e-2:
return img.copy()
skew = m['mu11']/m['mu02']
M = np.float32([[1, skew, -0.5*20*skew], [0, 1, 0]])
img = cv2.warpAffine(img,M,(20, 20),flags=affine_flags)
return img
# HOGDescriptor를 위한 파라미터 설정 및 생성---②
winSize = (20,20)
blockSize = (10,10)
blockStride = (5,5)
cellSize = (5,5)
nbins = 9
hogDesc = cv2.HOGDescriptor(winSize,blockSize,blockStride,cellSize,nbins)
if __name__ =='__main__':
# MNIST 이미지에서 학습용 이미지와 테스트용 이미지 가져오기 ---③
train_data, train_label = mnist.getTrain(reshape=False)
test_data, test_label = mnist.getTest(reshape=False)
# 학습 이미지 글씨 바로 세우기 ---④
deskewed = [list(map(deskew,row)) for row in train_data]
# 학습 이미지 HOG 계산 ---⑤
hogdata = [list(map(hogDesc.compute,row)) for row in deskewed]
train_data = np.float32(hogdata)
print('SVM training started...train data:', train_data.shape)
# 학습용 HOG 데이타 재배열 ---⑥
train_data = train_data.reshape(-1,train_data.shape[2])
# SVM 알고리즘 객체 생성 및 훈련 ---⑦
svm = cv2.ml.SVM_create()
startT = time.time()
svm.trainAuto(train_data, cv2.ml.ROW_SAMPLE, train_label)
endT = time.time() - startT
print('SVM training complete. %.2f Min'%(endT/60))
# 훈련된 결과 모델 저장 ---⑧
svm.save('svm_mnist.xml')
# 테스트 이미지 글씨 바로 세우기 및 HOG 계산---⑨
deskewed = [list(map(deskew,row)) for row in test_data]
hogdata = [list(map(hogDesc.compute,row)) for row in deskewed]
test_data = np.float32(hogdata)
# 테스트용 HOG 데이타 재배열 ---⑩
test_data = test_data.reshape(-1,test_data.shape[2])
# 테스트 데이타 결과 예측 ---⑪
ret, result = svm.predict(test_data)
# 예측 결과와 테스트 레이블이 맞은 갯수 합산 및 정확도 출력---⑫
correct = (result==test_label).sum()
print('Accuracy: %.2f%%'%(correct*100.0/result.size)) | [
"rainer@swMBA.local"
] | rainer@swMBA.local |
096d5b2af4ddd18259a0e430d08101383c5666d6 | 8dcd3ee098b4f5b80879c37a62292f42f6b2ae17 | /venv/Lib/site-packages/pandas/tests/indexing/multiindex/test_ix.py | 76d710dfe9a915da57ca20003f1a5f095129c32d | [] | no_license | GregVargas1999/InfinityAreaInfo | 53fdfefc11c4af8f5d2b8f511f7461d11a3f7533 | 2e4a7c6a2424514ca0ec58c9153eb08dc8e09a4a | refs/heads/master | 2022-12-01T20:26:05.388878 | 2020-08-11T18:37:05 | 2020-08-11T18:37:05 | 286,821,452 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,067 | py | import numpy as np
import pandas._testing as tm
import pytest
from pandas import DataFrame, MultiIndex
from pandas.errors import PerformanceWarning
class TestMultiIndex:
def test_frame_setitem_loc(self, multiindex_dataframe_random_data):
frame = multiindex_dataframe_random_data
frame.loc[("bar", "two"), "B"] = 5
assert frame.loc[("bar", "two"), "B"] == 5
# with integer labels
df = frame.copy()
df.columns = list(range(3))
df.loc[("bar", "two"), 1] = 7
assert df.loc[("bar", "two"), 1] == 7
def test_loc_general(self):
# GH 2817
data = {
"amount": {0: 700, 1: 600, 2: 222, 3: 333, 4: 444},
"col": {0: 3.5, 1: 3.5, 2: 4.0, 3: 4.0, 4: 4.0},
"year": {0: 2012, 1: 2011, 2: 2012, 3: 2012, 4: 2012},
}
df = DataFrame(data).set_index(keys=["col", "year"])
key = 4.0, 2012
# emits a PerformanceWarning, ok
with tm.assert_produces_warning(PerformanceWarning):
tm.assert_frame_equal(df.loc[key], df.iloc[2:])
# this is ok
df.sort_index(inplace=True)
res = df.loc[key]
# col has float dtype, result should be Float64Index
index = MultiIndex.from_arrays([[4.0] * 3, [2012] * 3], names=["col", "year"])
expected = DataFrame({"amount": [222, 333, 444]}, index=index)
tm.assert_frame_equal(res, expected)
def test_loc_multiindex_missing_label_raises(self):
# GH 21593
df = DataFrame(
np.random.randn(3, 3),
columns=[[2, 2, 4], [6, 8, 10]],
index=[[4, 4, 8], [8, 10, 12]],
)
with pytest.raises(KeyError, match=r"^2$"):
df.loc[2]
def test_series_loc_getitem_fancy(
self, multiindex_year_month_day_dataframe_random_data
):
s = multiindex_year_month_day_dataframe_random_data["A"]
expected = s.reindex(s.index[49:51])
result = s.loc[[(2000, 3, 10), (2000, 3, 13)]]
tm.assert_series_equal(result, expected)
| [
"44142880+GregVargas1999@users.noreply.github.com"
] | 44142880+GregVargas1999@users.noreply.github.com |
435b28f935245fd044ae7e200217e9f90ae3bf8e | 34bb078fef4919c19e064d67919860f572eb42eb | /Chapter 3/3-3 MNIST without CNN.py | e146048a7927db49ebde30e7b825ba20dc36fa4d | [] | no_license | JYang17/MyTensorflow | 9d0388067ff02b7f62d2f740cc27ef2744fe8454 | ab9f61fee952e7b3d8b88c98568726cfe016c825 | refs/heads/master | 2020-03-17T22:56:49.243342 | 2018-06-05T10:14:05 | 2018-06-05T10:14:05 | 134,024,433 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,531 | py | #https://www.bilibili.com/video/av20034351/?p=9
#不使用卷积神经网络的MNIST
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
#import numpy as np
#载入数据集
#one-hot:将标签转化为的某一行,某一位是1,其他位都是0的形式
#"MNIST_data"创建一个当前程序路径下的文件夹,下载存放MNIST图片数据,也可以用绝对路径
mnist = input_data.read_data_sets("MNIST_data",one_hot=True)
#每个批次大小
batch_size = 100
#计算一共有多少个批次,除完取整
n_batch = mnist.train.num_examples//batch_size
#placeholder,x对应的是0-9手写图像数据(images),y对应的是实际上的数字是多少(labels)
#对于[None,784]你可以想,每次输入1行,784列,会输入很多这样的1行
x = tf.placeholder(tf.float32,[None,784])#任意行,784列
y = tf.placeholder(tf.float32,[None,10])#任意行,10列(输出10个标签)
#创建一个没有隐藏层的神经网络
W = tf.Variable(tf.zeros([784,10]))
#[10,10]不行,tf.zeros([1,10]和tf.zeros([10]是等效的,10列输出每列代表0-9中的一个识别选项
b = tf.Variable(tf.zeros([1,10]))
#用softmax函数作为activation function,将计算出0-9每种选项的概率
prediction = tf.nn.softmax(tf.matmul(x,W)+b)
#二次代价函数
loss = tf.reduce_mean(tf.square(y-prediction))
#梯度下降法优化器
train = tf.train.GradientDescentOptimizer(0.2).minimize(loss)
init = tf.global_variables_initializer()
#tf.argmax(y,1)代表概率最大的对于那个位置,
#如果预测的和实际的概率最大位置一样,说明所识别的数字是一致的,就认为识别数字正确
correction_prediction = tf.equal(tf.argmax(y,1),tf.argmax(prediction,1))
#求准确率
#tf.cast(prediction,tf.float32)将bool转化为float就是准确率
#这个平均值应该是这波输入图片全集(不论训练集或测试集)的所以训练批次的平均值
accuracy = tf.reduce_mean(tf.cast(correction_prediction,tf.float32))
with tf.Session() as sess:
sess.run(init)
for step in range(21):#总共数据反复训练21次
for batch in range(n_batch):#注意要加range,每批数据数据集分成多个批次
batch_xs,batch_ys = mnist.train.next_batch(batch_size)
sess.run(train,feed_dict={x:batch_xs,y:batch_ys})
#用测试集,进行测试
acc = sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels})
print("Step:"+str(step)+" Accuracy:"+str(acc))
| [
"yangzhe1122@sina.com.cn"
] | yangzhe1122@sina.com.cn |
7b687e9b543e4caf4e638199a82a10f413f00b95 | 10608dc311d410e86884a652dd8ff7e100891144 | /CW/CW2015.py | 5385db5e748b145580b64094444435307e123215 | [] | no_license | Chabo17/LegacyCode | da995abd2e835495dec0866a54269690f6760280 | 3455a65e4f8b31e1c9f0d7260e55d71c3c148999 | refs/heads/main | 2023-08-18T05:24:49.376947 | 2021-09-20T15:39:40 | 2021-09-20T15:39:40 | 408,500,555 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,517 | py | import sys
from collections import Counter
from matplotlib import pyplot as plt
def Prob7(text):
pval=[]
rval=[]
for ln in text:
if float(ln) != 0:#finds the p val and r val
p=float(ln)
pval.append(p)
rval.append(p**(2/3))
plt.plot(pval,rval,marker='o',color='blue',linestyle='')#plots all of the dots
for i in range (0,35):
plt.plot([i,i+1],[i**(2/3),(i+1)**(2/3)])#plots the line
plt.title("Kepler's Third Law")#title
plt.ylabel("R values")#y axis title
plt.xlabel("P values")#x axis
plt.show()
def Prob8(text):
dup=0
e=0
dict={}
for i in range (2,len(text),2):
if text[i-1]=='NA':
e+=1
else:
if(text[i] not in dict):
dict[text[i]]=1
else:
dict[text[i]]+=1
for d in dict:
if(dict[d]>1 and d!='0'):
dup+=1
x=["Empty Cubes","Duplicate Cubes","Employees without Cube"]#names of the bars
y=[e,dup,dict['0']]#value of the barss
xs = [i+.1 for i, _ in enumerate(x)]
plt.xticks([i + .5 for i, _ in enumerate(x)],x)
plt.xlabel("Number of cubes")
plt.bar(xs,y)
plt.title("Problem 8")
plt.show()
def Prob9(text):
c=Counter()
for i in range (0,len(text)-1,2):#makes the counter and adds the values
c[text[i]]+=int(text[i+1])
print(c.most_common(5))
def main(filename):
f = open(filename, 'rU')
text=f.read().split()
if sys.argv[2]=='-prob7':
Prob7(text)
elif sys.argv[2]=='-prob8':
Prob8(text)
else:
Prob9(text)
if __name__ == '__main__':
main(sys.argv[1])
| [
"noreply@github.com"
] | Chabo17.noreply@github.com |
f5b3b649e526059fc48bdfb670b92c747302094c | 080c05a5f2fe455d8b8227d71f514f647f847799 | /functions/reduced_oa_blocks.py | b6df3399b45b7396060b8eb294135895444c4e3a | [] | no_license | nevenaf/OAs | b5554e2ad3ef744b7cc54019ee46baf4ab065c6d | a1c29dee72067a231548f693d6d6b98925a4ea3d | refs/heads/master | 2021-01-10T07:09:22.039019 | 2015-09-30T09:37:35 | 2015-09-30T09:37:35 | 43,422,124 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,348 | py | from itertools import combinations, permutations
def is_non_decreasing(seq):
n = len(seq)
for i in range(n):
for j in range(i+1,n):
if seq[i]>seq[j]:
return False
return True
def get_rows(k,n):
order_of_row = {}
row_of_order =[]
idx = 0
row = [0 for _ in range(k)]
for comb in combinations(range(n), k-1): # no row of OA has any symbol twice
for perm in permutations(range(k-1)): # order of symbols in a row
for a in range(1,n): # first symbol in a row is not 0
row[0] = a
for i in range(k-1):
row[i+1] = comb[perm[i]]
good = True
# the first column of the first latin square is sorted
if row[1]==0:
good = good and row[0]==row[2]
if row[0]==row[2]:
good = good and row[1]==0
# order the latin squares by the second values in the first column
if good and (row[0]==1 and row[1]==0 and row[2]==1):
good = is_non_decreasing(row[3:k])
if good:
rowt = tuple(row)
order_of_row.update({rowt : idx})
row_of_order.append(rowt)
idx += 1
return [order_of_row,row_of_order]
def get_pairs(k,n):
''' assumes t=2 '''
order_of_pair = {}
pair_of_order =[]
idx = 0
c1 = 0
for a1 in range(1,n):
for c2 in range(1,k):
for a2 in range(n):
pair = (c1,a1,c2,a2)
order_of_pair.update({pair : idx})
pair_of_order.append(pair)
idx += 1
for c1 in range(1,k):
for c2 in range(c1+1, k):
for a1 in range(n):
for a2 in range(a1+1,n):
pair = (c1,a1,c2,a2)
order_of_pair.update({pair : idx})
pair_of_order.append(pair)
idx += 1
pair = (c1,a2,c2,a1)
order_of_pair.update({pair : idx})
pair_of_order.append(pair)
idx += 1
return [order_of_pair,pair_of_order]
| [
"nfrancetic@gmail.com"
] | nfrancetic@gmail.com |
62ddbd35dc33aa67364519ced4c59b35d415f98f | 7d2d96dab56c03bf99cb8de0712eb9bc2a67d9cc | /genetic-alg-sampling/load_cflux_masked.py | a467d3b474c709a90260f7fe33f2571162b90f48 | [] | no_license | nicholaschris/masters_thesis | abe892077f692a532e50514427d7b27f8bd65196 | 15cf8e7e2a83a7642ba6672a877dbb65ec8279f5 | refs/heads/master | 2020-05-27T03:21:16.142351 | 2013-04-03T10:52:43 | 2013-04-03T10:52:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,381 | py | #!usr/bin/python
# ~/Desktop/ga-session-2012-11-12/load_cflux_masked.py
'''This module loads the cflux netcdf file
or any netcdf file that needs to be masked.
'''
import numpy as np
from numpy import ma
from scipy.io.netcdf import netcdf_file
import os
cflux_5day = ''
cflux_daily = ''
dpco2_daily = ''
pco2_daily = ''
cflux_5day = '/data/netcdf_files/CFLX_2000_2009.nc'
cflux_dpco2_daily = '/data/netcdf_files/cflx_dpco2_1998_2007.nc'
### default values (These can be altered in make_gene_map.py)
file_name = os.getcwd() + '/../' + cflux_5day # cflux_dpco2_daily
time_start = 0
time_end = 730 #730 for 5day #3650 for daily
lat_start = 0
lat_end = 40 # for southern Ocean
lon_start = 0
lon_end = 182
masked_value = 1e+20
unit_changer = 60*60*24 * 1000 # micromol carbon per m per day (86400000)
def load_file(file_name = file_name, time_start = time_start,
time_end = time_end, lat_start = lat_start, lat_end = lat_end,
lon_start = lon_start, lon_end = lon_end, masked_value = masked_value):
nc = netcdf_file(file_name, 'r')
new_array = nc.variables['Cflx'][time_start:time_end, lat_start:lat_end, lon_start:lon_end]
nc.close()
new_array = ma.masked_values(new_array, masked_value)
new_array = ma.array(new_array, dtype=np.float32)
new_array = new_array * unit_changer
return new_array
if __name__ == '__main__':
array = load_file()
| [
"nicholaschristopherpringle@gmail.com"
] | nicholaschristopherpringle@gmail.com |
b71f3d3ff286e0d4311404d1ab8a0fde533ae2e0 | 44d4a051663179ddf4a87ef607070e8abed1b94f | /crab/run_getLumis.py | 717f627277f6a30fc4e8d83eaf7b711b471d14a5 | [] | no_license | mbandrews/h2aa | 458471abd90dc2a80d13ad2d2428cc0567d58c89 | de1549d472f0b815236598c9676c27719b949e67 | refs/heads/master | 2022-12-09T22:08:21.235484 | 2022-12-08T03:44:58 | 2022-12-08T03:44:58 | 232,382,628 | 0 | 1 | null | 2022-12-08T03:44:59 | 2020-01-07T17:49:35 | Jupyter Notebook | UTF-8 | Python | false | false | 7,017 | py | from __future__ import print_function
from Utilities.General.cmssw_das_client import get_data as das_query
from FWCore.PythonUtilities.LumiList import LumiList
import os, re, glob, shutil
import numpy as np
import subprocess
input_campaign = '06Sep2020_AODslim-ecal_v1' # data,h4g,hgg, DY2016
#input_campaign = '28Dec2020_AODslim-ecal_v1' # data-2016H
#input_campaign = '06Sep2020_AODslim-ecal_v2' # DY2017
samples = {
#'data2018-Run2018Cminiaod': '/EGamma/mandrews-data2018-Run2018C_Era2018_06Sep2020_MINIAOD-skimv2-6e67610c0756643cd1efca7b7fd48fa1/USER',
#'data2018-Run2018A': '/EGamma/mandrews-EGamma_2018A_Era2018_06Sep2020_AODslim-ecal_v1-6e67610c0756643cd1efca7b7fd48fa1/USER',
#'data2018-Run2018B': '/EGamma/mandrews-EGamma_2018B_Era2018_06Sep2020_AODslim-ecal_v1-6e67610c0756643cd1efca7b7fd48fa1/USER',
#'data2018-Run2018C': '/EGamma/mandrews-EGamma_2018C_Era2018_06Sep2020_AODslim-ecal_v1-6e67610c0756643cd1efca7b7fd48fa1/USER',
#'data2018-Run2018D': '/EGamma/mandrews-EGamma_2018D_Era2018_06Sep2020_AODslim-ecal_v1-306144291bb2d755797972fd22d33d6d/USER'
#'data2018-Run2018C': '/EGamma/mandrews-EGamma_2018C_AODslim-ecal_TESTv2-6e67610c0756643cd1efca7b7fd48fa1/USER'
#'data2018-Run2018C': '/EGamma/mandrews-data2018-Run2018C_Era2018_06Sep2020_MINIAOD-skimv2-6e67610c0756643cd1efca7b7fd48fa1/USER'
#'data2018-Run2018C': '/EGamma/mandrews-EGamma_2018C_Era2018_06Sep2020_AODslim-ecal_v2-6e67610c0756643cd1efca7b7fd48fa1/USER'
#'data2018-Run2018C': '/EGamma/mandrews-EGamma_2018C_AODslim-ecal_TESTv4-6e67610c0756643cd1efca7b7fd48fa1/USER'
#'data2018-Run2018C': '/EGamma/mandrews-EGamma_2018C_Era2018_06Sep2020_AODslim-ecal_v3-6e67610c0756643cd1efca7b7fd48fa1/USER'
#'data2018-Run2018C': '/EGamma/Run2018C-17Sep2018-v1/AOD'
#'data2016-Run2016H': '/DoubleEG/mandrews-DoubleEG_2016H_Era2016_28Dec2020_AODslim-ecal_v1-2427c69bd126da1063d393ec79219651/USER'
#'data2016-Run2016H': '/DoubleEG/mandrews-DoubleEG_2016H_Era2016_06Sep2020_AODslim-ecal_v1-2427c69bd126da1063d393ec79219651/USER'
# AOD-slims:
'data2016-Run2016B': '/DoubleEG/mandrews-DoubleEG_2016B_Era2016_06Sep2020_AODslim-ecal_v1-81ba725143e8ad84d6b47c9ab0eb90c4/USER',
'data2016-Run2016C': '/DoubleEG/mandrews-DoubleEG_2016C_Era2016_06Sep2020_AODslim-ecal_v1-81ba725143e8ad84d6b47c9ab0eb90c4/USER',
'data2016-Run2016D': '/DoubleEG/mandrews-DoubleEG_2016D_Era2016_06Sep2020_AODslim-ecal_v1-81ba725143e8ad84d6b47c9ab0eb90c4/USER',
'data2016-Run2016E': '/DoubleEG/mandrews-DoubleEG_2016E_Era2016_06Sep2020_AODslim-ecal_v1-81ba725143e8ad84d6b47c9ab0eb90c4/USER',
'data2016-Run2016F': '/DoubleEG/mandrews-DoubleEG_2016F_Era2016_06Sep2020_AODslim-ecal_v1-81ba725143e8ad84d6b47c9ab0eb90c4/USER',
'data2016-Run2016G': '/DoubleEG/mandrews-DoubleEG_2016G_Era2016_06Sep2020_AODslim-ecal_v1-2427c69bd126da1063d393ec79219651/USER',
'data2016-Run2016H': '/DoubleEG/mandrews-DoubleEG_2016H_Era2016_06Sep2020_AODslim-ecal_v1-2427c69bd126da1063d393ec79219651/USER',
'data2017-Run2017B': '/DoubleEG/mandrews-DoubleEG_2017B_Era2017_18May2020_AODslim-ecal_v1-3bfee02a0afb4bfd03fd5261a90623cd/USER',
'data2017-Run2017C': '/DoubleEG/mandrews-DoubleEG_2017C_Era2017_18May2020_AODslim-ecal_v1-3bfee02a0afb4bfd03fd5261a90623cd/USER',
'data2017-Run2017D': '/DoubleEG/mandrews-DoubleEG_2017D_Era2017_18May2020_AODslim-ecal_v1-3bfee02a0afb4bfd03fd5261a90623cd/USER',
'data2017-Run2017E': '/DoubleEG/mandrews-DoubleEG_2017E_Era2017_18May2020_AODslim-ecal_v1-3bfee02a0afb4bfd03fd5261a90623cd/USER',
'data2017-Run2017F': '/DoubleEG/mandrews-DoubleEG_2017F_Era2017_18May2020_AODslim-ecal_v1-964eedbb4080135606054ba835f474dc/USER',
'data2018-Run2018A': '/EGamma/mandrews-EGamma_2018A_Era2018_06Sep2020_AODslim-ecal_v1-6e67610c0756643cd1efca7b7fd48fa1/USER',
'data2018-Run2018B': '/EGamma/mandrews-EGamma_2018B_Era2018_06Sep2020_AODslim-ecal_v1-6e67610c0756643cd1efca7b7fd48fa1/USER',
'data2018-Run2018C': '/EGamma/mandrews-EGamma_2018C_Era2018_06Sep2020_AODslim-ecal_v1-6e67610c0756643cd1efca7b7fd48fa1/USER',
'data2018-Run2018D': '/EGamma/mandrews-EGamma_2018D_Era2018_06Sep2020_AODslim-ecal_v1-306144291bb2d755797972fd22d33d6d/USER'
# MINIAOD-skims:
#'data2016-Run2016B': '/DoubleEG/mandrews-data2016-Run2016B_Era2016_06Sep2020_MINIAOD-skimv2-da20b4dc2a59d4df854398f842269346/USER',
#'data2016-Run2016C': '/DoubleEG/mandrews-data2016-Run2016C_Era2016_06Sep2020_MINIAOD-skimv2-da20b4dc2a59d4df854398f842269346/USER',
#'data2016-Run2016D': '/DoubleEG/mandrews-data2016-Run2016D_Era2016_06Sep2020_MINIAOD-skimv2-da20b4dc2a59d4df854398f842269346/USER',
#'data2016-Run2016E': '/DoubleEG/mandrews-data2016-Run2016E_Era2016_06Sep2020_MINIAOD-skimv2-da20b4dc2a59d4df854398f842269346/USER',
#'data2016-Run2016F': '/DoubleEG/mandrews-data2016-Run2016F_Era2016_06Sep2020_MINIAOD-skimv2-da20b4dc2a59d4df854398f842269346/USER',
#'data2016-Run2016G': '/DoubleEG/mandrews-data2016-Run2016G_Era2016_06Sep2020_MINIAOD-skimv2-da20b4dc2a59d4df854398f842269346/USER',
#'data2016-Run2016H': '/DoubleEG/mandrews-data2016-Run2016H_Era2016_06Sep2020_MINIAOD-skimv2-da20b4dc2a59d4df854398f842269346/USER',
}
for s,dset in samples.iteritems(): #python3: samples.items()
print('For sample:',s)
assert os.environ['CMSSW_BASE'] != ''
assert os.environ['CMSSW_BASE'] in os.getcwd()
#assert input_campaign in dset, 'Input campaign does not match sample dset: %s vs %s'%(input_campaign, dset.split('/')[1])
year = re.findall('(201[6-8])', s.split('-')[0])[0]
#base_dir = '%s/src/h2aa/aodSkims/Era%s'%(os.environ['CMSSW_BASE'], input_campaign)
base_dir = '%s/src/h2aa/json/Era%s'%(os.environ['CMSSW_BASE'], input_campaign)
if not os.path.isdir(base_dir):
os.makedirs(base_dir)
print(' >> Doing:',s)
# Query DAS for run,lumi list
# das query dict if successful: [u'status', u'mongo_query', u'ctime', u'nresults', u'timestamp', u'ecode', u'data']
print(' .. querying: %s'%dset)
#q = das_query('run,lumi dataset=%s instance=prod/phys03'%dset)
q = das_query('run,lumi dataset=%s %s'%(dset, 'instance=prod/phys03' if 'USER' in dset else ''))
print(' .. status: %s'%q['status'])
if q['status'] != 'ok':
print(' !! Query failed, skipping...')
continue
# Get run,lumi data
lumi_dict = {}
qdata = q['data'] # len(qdata) == N runs
print(' .. N runs: %d'%len(qdata))
for d in qdata:
# d is a dict with keys: [u'run', u'lumi', u'qhash', u'das']
run = d['run'][0]['run_number'] # gets actual run number as int
lumi = d['lumi'][0]['number'] # gets actual lumis as list
#print(run, lumi)
lumi_dict[run] = lumi
#break
assert len(qdata) == len(lumi_dict)
# Convert dict to json
#base_dir = '.'
lumi_list = '%s/%s_lumi_list.json'%(base_dir, s)
print(' .. writing to: %s'%lumi_list)
lumi_json = LumiList(runsAndLumis=lumi_dict)
lumi_json.writeJSON(lumi_list)
#'''
| [
"michael.andrews@cern.ch"
] | michael.andrews@cern.ch |
5b4a97a204e541bf334a7db0e8b19738b970caa2 | 44e2977cd3205a17c7a861f034dcb1ab828f9ba0 | /arret.py | bed0d44b5222662975bd796ea78681cfe87c0475 | [] | no_license | bedaigiraneza/Algorithme-d-itineraire | 33ff041f9311902e6531ba1a56c9efad9d4b4026 | 2aa92aee2d51987849b1fd35ba5941173e182c52 | refs/heads/master | 2020-04-23T04:08:03.826137 | 2019-02-15T17:06:01 | 2019-02-15T17:06:01 | 170,899,378 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 302 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Jan 25 12:08:05 2019
@author: igiraneb
"""
class Arret:
def __init__(self, nomArret):
self.name = nomArret
# ================================================
def getName(self):
return self.name
| [
"noreply@github.com"
] | bedaigiraneza.noreply@github.com |
64a6d3b2e5aa8f4603538b235f00b632dbf05da5 | 491cde2db8a04f8c83c358ac09464412e7744c97 | /experiments/utilities.py | 3b4f35842ba944ec6d6d8991b7b2096056f1895f | [
"MIT"
] | permissive | ashah044/poisoning-clustering | 4811823a0c606d46f7e8dbba99398012fb63e758 | 288608f87f02d43589e0983b31ce6f2bd18b8814 | refs/heads/main | 2023-01-06T00:02:37.553596 | 2020-10-23T14:22:52 | 2020-10-23T14:22:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,899 | py | import numpy as np
import torch, random, os
from sklearn.metrics import (
adjusted_rand_score,
normalized_mutual_info_score,
adjusted_mutual_info_score,
)
class ClusteringWrapper3Dto2D:
def __init__(self, model):
self.model = model
def fit_predict(self, X):
"""
Since X has shape nxmxk (with k=1 for MNIST) we use only the first two dimensions
:param X:
:return:
"""
return self.model.fit_predict(X.squeeze(2))
def to_string(self):
return self.model.to_string()
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def attack_scores(Y, Ypred):
Y = Y.cpu()
Ypred = Ypred.cpu()
ARI, AMI, NMI = (
adjusted_rand_score(Y, Ypred),
adjusted_mutual_info_score(Y, Ypred),
normalized_mutual_info_score(Y, Ypred),
)
return ARI, AMI, NMI
def power_noise(X, Xadv):
l0 = torch.dist(X, Xadv, p=0)
l2 = torch.dist(X, Xadv, p=2)
linf = torch.dist(X, Xadv, p=float("inf"))
return l0, l2, linf
def store_tensors(name, Xadv, Yadv):
os.makedirs(os.path.dirname(name + "tensors/"))
torch.save(Xadv, name + "tensors/Xadv.pt")
torch.save(Yadv, name + "tensors/Yadv.pt")
def store_parameters(name, model, delta, n_samples):
with open(name + "parameters.csv", "w+") as writer:
writer.write("model;delta;n_samples\n")
writer.write("{};{};{}".format(model, delta, n_samples))
writer.write("\n")
writer.close()
def samples_in_cluster(X, Y, lb):
nc, mc, kc = X[Y == lb].shape
return nc
def relabel(y, y_hat):
k = len(y_hat.unique())
y_hat_rl = y_hat.clone()
for i in range(k):
l = torch.mode(y[y_hat == i])[0]
y_hat_rl[y_hat == i] = l
return y_hat_rl | [
"cina.antonio1995@gmail.com"
] | cina.antonio1995@gmail.com |
682a53ec7edd1b3b8745643e293763068ef80790 | 3bea1fd18eaa26d13225d30b28f6ef9794827c12 | /mysite/polls/apps.py | 3a3d341da90c0824d7c835c6c7b8e23007c0490d | [] | no_license | norlyakov/django_learning | e6887d491a6a4fe2424b5e0cc8c99109b8df7e0a | 57d2881c8805a4199520fe1fc123c0d6289f8b68 | refs/heads/master | 2023-04-30T07:33:30.635955 | 2019-11-26T15:07:42 | 2019-11-26T15:07:42 | 223,581,346 | 0 | 1 | null | 2022-04-22T22:48:22 | 2019-11-23T11:58:24 | Python | UTF-8 | Python | false | false | 155 | py | # -*- coding: utf-8 -*-
"""
:Authors: norlyakov
:Date: 23.11.2019
"""
from django.apps import AppConfig
class PollsConfig(AppConfig):
name = 'polls'
| [
"norlyakov@asdco.ru"
] | norlyakov@asdco.ru |
86b4ae02b26ad4834670f72d71b737c1ab20447f | 669b788991f28ee6949432642ba5d85096a6f3e5 | /app.py | a5703d9f0888cb99bc16f97b2d6c792c257fb6a1 | [] | no_license | Adrianna87/flask-survey | 56f10902c02dc236847f64c3cf9cb0a01d28434e | 60325e7b074c3bb3e00927b5df95ca31f80b436e | refs/heads/main | 2023-06-24T12:13:39.739277 | 2021-07-22T14:39:24 | 2021-07-22T14:39:24 | 388,494,848 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,369 | py | from flask import Flask, request, render_template, redirect, flash, session
from surveys import satisfaction_survey as survey
RESPONSES_KEY = "responses"
app = Flask(__name__)
@app.route("/")
def show_survey_start():
return render_template("survey_start.html", survey=survey)
@app.route("/begin", methods=["POST"])
def start_survey():
session[RESPONSES_KEY] = []
return redirect("/questions/0")
@app.route("/answer", methods=["POST"])
def handle_question():
choice = request.form['answer']
responses = session[RESPONSES_KEY]
responses.append(choice)
session[RESPONSES_KEY] = responses
if (len(responses) == len(survey.questions)):
return redirect("/complete")
else:
return redirect(f"/questions/{len(responses)}")
@app.route("/questions/<int:qid>")
def show_question(qid):
responses = session.get(RESPONSES_KEY)
if (responses is None):
return redirect("/")
if (len(responses) == len(survey.questions)):
return redirect("/complete")
if (len(responses) != qid):
flash(f"Invalid question id: {qid}.")
return redirect(f"/questions/{len(responses)}")
question = survey.questions[qid]
return render_template("question.html", question_num=qid, question=question)
@app.route("/complete")
def complete():
return render_template("completion.html")
| [
"adriannam@workmail.com"
] | adriannam@workmail.com |
1b9d368787014856bd07466ca12d10c3526d8068 | e5869ffd5cef2465c1cb85f69e8f194a98333f7b | /dbrb6.py | 62c03c00afcc0a53aca841a2da513fa37617cef3 | [] | no_license | samikabir/JointOptimizedDisjBRB | a71665f3027750540b2692ea3ebaf86a842a7d80 | 055a858942a6739df42ccb9f736e142c02790597 | refs/heads/master | 2020-05-29T21:45:21.007389 | 2019-05-30T10:14:34 | 2019-05-30T10:14:34 | 189,391,281 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 20,746 | py | #PM_H = 500.4
#PM_M = 35.5
#PM_L = 0.0
AQI_H = 500.0
AQI_M = 101.0
AQI_L = 0.0
numberOfAntAttributes = 2
#relativeWeight = 1.0
cbd_0 = 1.0
cbd_1 = 0.0
cbd_2 = 0.0
cbd_3 = 0.0
cbd_4 = 1.0
cbd_5 = 0.0
cbd_6 = 0.0
cbd_7 = 0.0
cbd_8 = 1.0
aqi1 = 1.0
aqi2 = 1.0
aqi3 = 1.0
aqi4 = 1.0
aqi5 = 1.0
def ruleBase(s,c,x):
global consequentBeliefDegree
#global relativeWeight1
#global relativeWeight2
temp_consequentBeliefDegree = [x[0], x[1], x[2], x[3], x[4], x[5], x[6], x[7], x[8], x[9], x[10], x[11], x[12], x[13], x[14], x[15], x[16], x[17]]
de0 = x[0]/(x[0] + x[1] + x[2])
de1 = x[1]/(x[0] + x[1] + x[2])
de2 = x[2]/(x[0] + x[1] + x[2])
de3 = x[3]/(x[3] + x[4] + x[5])
de4 = x[4]/(x[3] + x[4] + x[5])
de5 = x[5]/(x[3] + x[4] + x[5])
de6 = x[6]/(x[6] + x[7] + x[8])
de7 = x[7]/(x[6] + x[7] + x[8])
de8 = x[8]/(x[6] + x[7] + x[8])
de9 = x[9]/(x[9] + x[10] + x[11])
de10 = x[10]/(x[9] + x[10] + x[11])
de11 = x[11]/(x[9] + x[10] + x[11])
de12 = x[12]/(x[12] + x[13] + x[14])
de13 = x[13]/(x[12] + x[13] + x[14])
de14 = x[14]/(x[12] + x[13] + x[14])
de15 = x[15]/(x[15] + x[16] + x[17])
de16 = x[16]/(x[15] + x[16] + x[17])
de17 = x[17]/(x[15] + x[16] + x[17])
consequentBeliefDegree = [de0, de1, de2, de3, de4, de5, de6, de7, de8, de9, de10, de11, de12, de13, de14, de15, de16, de17]
attrw1 = x[18]
attrw2 = x[19]
irulewt1 = x[20]
irulewt2 = x[21]
irulewt3 = x[22]
irulewt4 = x[23]
irulewt5 = x[24]
irulewt6 = x[25]
print("Inside ruleBase(x) relativeWeight1 ",attrw1,"relativeWeight2 ",attrw2)
#consequentBeliefDegree = [cbd_0, cbd_1, cbd_2, cbd_3, cbd_4, cbd_5, cbd_6, cbd_7, cbd_8]
for u in range(18):
print(consequentBeliefDegree[u])
#transformInput1(384.5891688061617)
PMH = 0 + (x[26] * 499.4)
PMUM = 0 + (x[27] * 499.4) #500.4
PMM = 0 + (x[28] * 499.4)
PMLM = 0 + (x[29] * 499.4) #35.5
PMSL = 0 + (x[30] * 499.4)
PML = 0 + (x[31] * 499.4)
transformInput1(s,PMH,PMUM,PMM,PMLM,PMSL,PML)
transformInput2(c,PMH,PMUM,PMM,PMLM,PMSL,PML)
calculateMatchingDegreeBrbCnn(attrw1,attrw2, irulewt1, irulewt2, irulewt3, irulewt4, irulewt5, irulewt6)
showActivationWeight()
updateBeliefDegree()
result = aggregateER_BrbCnn()
return result
def transformInput1(i,j,k,l,m,n,p):
global H1
global UM1
global M1
global LM1
global SL1
global L1
PM_H = j
PM_UM = k
PM_M = l
PM_LM = m
PM_SL = n
PM_L = p
print("Inside transformInput1() Input is ",i,"PM_H ", PM_H,"PM_UM",PM_UM, "PM_M ",PM_M,"PM_LM", PM_LM,"PM_SL", PM_SL, "PM_L ",PM_L)
if (i >= PM_H):
H1 = 1
UM1 = 0
M1 = 0
LM1 = 0
SL1 = 0
L1 = 0
elif (i == PM_UM):
H1 = 0
UM1 = 1
M1 = 0
LM1 = 0
SL1 = 0
L1 = 0
elif (i == PM_M):
H1 = 0
UM1 = 0
M1 = 1
LM1 = 0
SL1 = 0
L1 = 0
elif (i == PM_LM):
H1 = 0
UM1 = 0
M1 = 0
LM1 = 1
SL1 = 0
L1 = 0
elif (i == PM_SL):
H1 = 0
UM1 = 0
M1 = 0
LM1 = 0
SL1 = 1
L1 = 0
elif (i <= PM_L):
H1 = 0
UM1 = 0
M1 = 0
LM1 = 0
SL1 = 0
L1 = 1
elif (i < PM_H) and (i > PM_UM):
UM1 = (PM_H-i)/(PM_H-PM_UM)
H1 = 1 - UM1
M1 = 0.0
L1 = 0.0
LM1 = 0.0
SL1 = 0.0
elif (i < PM_UM) and (i > PM_M):
M1 = (PM_UM-i)/(PM_UM-PM_M)
UM1 = 1 - M1
H1 = 0.0
L1 = 0.0
LM1 = 0.0
SL1 = 0
elif (i < PM_M) and (i > PM_LM):
LM1 = (PM_M-i)/(PM_M-PM_LM)
M1 = 1 - LM1
H1 = 0.0
UM1 = 0.0
L1 = 0.0
SL1 = 0
elif (i < PM_LM) and (i > PM_SL):
SL1 = (PM_LM-i)/(PM_LM-PM_SL)
LM1 = 1 - SL1
H1 = 0.0
UM1 = 0.0
M1 = 0.0
L1 = 0.0
elif (i < PM_SL) and (i > PM_L):
L1 = (PM_SL-i)/(PM_SL-PM_L)
SL1 = 1 - L1
H1 = 0.0
UM1 = 0.0
M1 = 0.0
LM1 = 0.0
print("Inside transformInput1(), H1", H1, "UM1 ", UM1, "M1 ",M1,"LM1 ", LM1, "SL1 ", SL1, "L1 ", L1)
def transformInput2(i,j,k,l,m,n,p):
global H2
global UM2
global M2
global LM2
global SL2
global L2
PM_H = j
PM_UM = k
PM_M = l
PM_LM = m
PM_SL = n
PM_L = p
print("Inside transformInput2() Input is ",i,"PM_H ", PM_H,"PM_UM",PM_UM, "PM_M ",PM_M,"PM_LM", PM_LM,"PM_SL", PM_SL, "PM_L ",PM_L)
if (i >= PM_H):
H2 = 1
UM2 = 0
M2 = 0
LM2 = 0
SL2 = 0
L2 = 0
elif (i == PM_UM):
H2 = 0
UM2 = 1
M2 = 0
LM2 = 0
SL2 = 0
L2 = 0
elif (i == PM_M):
H2 = 0
UM2 = 0
M2 = 1
LM2 = 0
SL2 = 0
L2 = 0
elif (i == PM_LM):
H2 = 0
UM2 = 0
M2 = 0
LM2 = 1
SL2 = 0
L2 = 0
elif (i == PM_SL):
H2 = 0
UM2 = 0
M2 = 0
LM2 = 0
SL2 = 1
L2 = 0
elif (i <= PM_L):
H2 = 0
UM2 = 0
M2 = 0
LM2 = 0
SL2 = 0
L2 = 1
elif (i < PM_H) and (i > PM_UM):
UM2 = (PM_H-i)/(PM_H-PM_UM)
H2 = 1 - UM2
M2 = 0.0
L2 = 0.0
LM2 = 0.0
SL2 = 0.0
elif (i < PM_UM) and (i > PM_M):
M2 = (PM_UM-i)/(PM_UM-PM_M)
UM2 = 1 - M2
H2 = 0.0
L2 = 0.0
LM2 = 0.0
SL2 = 0
elif (i < PM_M) and (i > PM_LM):
LM2 = (PM_M-i)/(PM_M-PM_LM)
M2 = 1 - LM2
H2 = 0.0
UM2 = 0.0
L2 = 0.0
SL2 = 0
elif (i < PM_LM) and (i > PM_SL):
SL2 = (PM_LM-i)/(PM_LM-PM_SL)
LM2 = 1 - SL2
H2 = 0.0
UM2 = 0.0
M2 = 0.0
L2 = 0.0
elif (i < PM_SL) and (i > PM_L):
L2 = (PM_SL-i)/(PM_SL-PM_L)
SL2 = 1 - L2
H2 = 0.0
UM2 = 0.0
M2 = 0.0
LM2 = 0.0
print("Inside transformInput1(), H2", H2, "UM2 ", UM2, "M2 ",M2,"LM2 ", LM2, "SL2 ", SL2, "L2 ", L2)
def takeInput():
global a1
temp_a1 = input("Insert value for PM2.5 (between 0 and 500.4 µg/m3): ")
a1 = float(temp_a1)
#transformInput1(a1)
def calculateMatchingDegreeBrbCnn(aw1,aw2,irw1,irw2,irw3,irw4,irw5,irw6):
antattrw1 = aw1
antattrw2 = aw2
global initialRuleWeight
initialRuleWeight = [irw1, irw2, irw3, irw4, irw5, irw6]
increment = 0
global matchingDegree
matchingDegree = [1.51, 1.51, 1.51, 1.51, 1.51, 1.51]
global trainedMatchingDegree
trainedMatchingDegree = [1.51, 1.51, 1.51, 1.51, 1.51, 1.51]
ti1 = [H1, UM1, M1, LM1, SL1, L1]
#print("ti1[0] is ")
#print(ti1[0])
#ti2 = array.array('f', [normalized_cnn_severe_degree, normalized_cnn_mild_degree, normalized_cnn_nominal_degree])
ti2 = [H2, UM2, M2, LM2, SL2, L2]
for c in range(6):
#print(ti1[c])
print("Inside calculateMatchingDegreeBrbCnn() initialRuleWeight[increment] is ",initialRuleWeight[increment])
matchingDegree[increment] = initialRuleWeight[increment] * (ti1[c] ** antattrw1) * (ti2[c] ** antattrw2)
trainedMatchingDegree[increment] = ((ti1[c] ** antattrw1) + (ti2[c] ** antattrw2))
increment +=1
print("Inside calculateMatchingDegreeBrbCnn() relativeWeight1 ",antattrw1,"relativeWeight2 ",antattrw2)
#print("Inside calculateMatchingDegreeBrbCnn() best9 relativeWeight1 ",best[9]," best10 relativeWeight2 ",best[10])
def showMatchingDegree():
track = 1
for counter in range(9):
track+=1
def showActivationWeight():
trace = 1
totalWeight = 0
totalActivationWeight = 0
global activationWeight
activationWeight = [1.51, 1.41, 1.45, 1.45, 1.45, 1.45]
temp_activationWeight = [1.57, 1.81, 1.92, 1.45, 1.45, 1.45]
for x in range(6):
totalWeight += matchingDegree[x]
for counter in range(6):
print("Inside showActivationWeight() initialRuleWeight[counter] is ",initialRuleWeight[counter])
inter = initialRuleWeight[counter] * trainedMatchingDegree[counter]
temp_activationWeight[counter] = inter/totalWeight
for naw in range(6):
totalActivationWeight += temp_activationWeight[naw]
for fin in range(6):
activationWeight[fin] = temp_activationWeight[fin]/totalActivationWeight
def takeCnnOutput():
global normalized_cnn_severe_degree
global normalized_cnn_mild_degree
global normalized_cnn_nominal_degree
parser = 0
#f = open("cnn_prediction.txt", "r") #cnn output
f = open("cnn_prediction1.txt", "r") #severe 408
#f = open("cnn_prediction2.txt", "r") #nominal 36
#f = open("cnn_prediction3.txt", "r") #mild 117
if f.mode == 'r':
#print("reading cnn_prediction.txt file \n")
f1 = f.readlines()
for line in f1:
if parser == 0:
cnn_mild = line
elif parser == 1:
cnn_nominal = line
else:
cnn_severe = line
parser +=1
f.close()
else:
print("Unable to open the file.");
a = float(cnn_mild)
b = float(cnn_nominal)
c = float(cnn_severe)
mild_degree = a/100
nominal_degree = b/100
severe_degree = c/100
sum_degree = severe_degree + mild_degree + nominal_degree
normalized_cnn_severe_degree = severe_degree/sum_degree
normalized_cnn_mild_degree = mild_degree/sum_degree
normalized_cnn_nominal_degree = nominal_degree/sum_degree
if ((normalized_cnn_severe_degree > normalized_cnn_mild_degree) and (normalized_cnn_severe_degree > normalized_cnn_nominal_degree)):
cnn_pm25 = (150.5 + 349.9*normalized_cnn_severe_degree) + ((150.4*normalized_cnn_mild_degree)/2)
print ("PM2.5 computed by CNN: ",cnn_pm25," µg/m3")
elif ((normalized_cnn_nominal_degree > normalized_cnn_mild_degree) and (normalized_cnn_nominal_degree > normalized_cnn_severe_degree)):
cnn_pm25 = (35.4*(1 - normalized_cnn_nominal_degree)) + ((150.4*normalized_cnn_mild_degree)/2)
print ("PM2.5 computed by CNN: ",cnn_pm25," µg/m3")
elif ((normalized_cnn_mild_degree > normalized_cnn_severe_degree) and (normalized_cnn_mild_degree > normalized_cnn_nominal_degree)):
if normalized_cnn_severe_degree > normalized_cnn_nominal_degree:
cnn_pm25 = (35.5 + 114.9*normalized_cnn_mild_degree) + ((500.4*normalized_cnn_severe_degree)/2)
print ("PM2.5 computed by CNN: ",cnn_pm25," µg/m3")
elif (normalized_cnn_nominal_degree > normalized_cnn_severe_degree):
cnn_pm25 = (35.5 + 114.9*normalized_cnn_mild_degree) + ((35.4*normalized_cnn_nominal_degree)/2)
print ("PM2.5 computed by CNN: ",cnn_pm25," µg/m3")
def updateBeliefDegree():
update = 0
sumAntAttr1 = 1
sumAntAttr2 = 1
if (H1 + UM1 + M1 + LM1 + SL1 + L1) < 1:
sumAntAttr1 = H1 + UM1 + M1 + LM1 + SL1 + L1
update = 1
if (H2 + UM2 + M2 + LM2 + SL2 + L2) < 1:
sumAntAttr2 = H2 + UM2 + M2 + LM2 + SL2 + L2
update = 1
if update == 1:
beliefDegreeChangeLevel = (sumAntAttr1 + sumAntAttr2)/numberOfAntAttributes
for go in range(18):
consequentBeliefDegree[go] = beliefDegreeChangeLevel * consequentBeliefDegree[go]
else:
print ("No upgradation of belief degree required.")
def aggregateER_BrbCnn():
parse = 0
move1 = 0
move2 = 1
move3 = 2
action1 = 0
action2 = 1
action3 = 2
global ruleWiseBeliefDegreeSum
ruleWiseBeliefDegreeSum = [1.51, 1.51, 1.51, 1.51, 1.51, 1.51]
part11 = 1.51
part12 = 1.51
part13 = 1.51
part1 = 1.0
part2 = 1.0
value = 1.0
meu = 1.0
numeratorH1 = 1.0
numeratorH2 = 1.0
numeratorH = 1.0
denominatorH1 = 1.0
denominatorH = 1.0
numeratorM1 = 1.0
numeratorM = 1.0
numeratorL1 = 1.0
numeratorL = 1.0
utilityScoreH = 1.0
utilityScoreM = 0.5
utilityScoreL = 0.0
crispValue = 1.0
degreeOfIncompleteness = 1.0
utilityMax = 1.0
utilityMin = 1.0
utilityAvg = 1.0
global aqi
for s in range(18):
print("Inside aggregateER)BrbCNN() consequentBeliefDegree: ",consequentBeliefDegree[s])
for t in range(6):
parse = t * 3
ruleWiseBeliefDegreeSum[t] = consequentBeliefDegree[parse] + consequentBeliefDegree[parse+1] + consequentBeliefDegree[parse+2]
for rule in range(6):
part11 *= (activationWeight[rule] * consequentBeliefDegree[move1] + 1 - (activationWeight[rule] * ruleWiseBeliefDegreeSum[rule]))
move1 += 3
for rule in range(6):
part12 *= (activationWeight[rule] * consequentBeliefDegree[move2] + 1 - (activationWeight[rule] * ruleWiseBeliefDegreeSum[rule]))
move2 += 3
for rule in range(6):
part13 *= (activationWeight[rule] * consequentBeliefDegree[move3] + 1 - (activationWeight[rule] * ruleWiseBeliefDegreeSum[rule]))
move3 += 3
part1 = (part11 + part12 + part13)
for rule in range(6):
part2 *= (1 - (activationWeight[rule] * ruleWiseBeliefDegreeSum[rule]))
value = part1 - part2
meu = 1/value
for rule in range(6):
numeratorH1 *= (activationWeight[rule] * consequentBeliefDegree[action1] + 1 - (activationWeight[rule] * ruleWiseBeliefDegreeSum[rule]))
action1 += 3
for rule in range(6):
numeratorH2 *= (1 - (activationWeight[rule] * ruleWiseBeliefDegreeSum[rule]))
numeratorH = meu * (numeratorH1 - numeratorH2)
for rule in range(6):
denominatorH1 *= (1 - activationWeight[rule])
denominatorH = 1 - (meu * denominatorH1)
aggregatedBeliefDegreeH = (numeratorH/denominatorH)
for rule in range(6):
numeratorM1 *= (activationWeight[rule] * consequentBeliefDegree[action2] + 1 - (activationWeight[rule] * ruleWiseBeliefDegreeSum[rule]))
action2 += 3
numeratorM = meu * (numeratorM1 - numeratorH2)
aggregatedBeliefDegreeM = (numeratorM/denominatorH)
for rule in range(6):
numeratorL1 *= (activationWeight[rule] * consequentBeliefDegree[action3] + 1 - (activationWeight[rule] * ruleWiseBeliefDegreeSum[rule]))
action3 += 3
numeratorL = meu * (numeratorL1 - numeratorH2)
aggregatedBeliefDegreeL = (numeratorL/denominatorH)
if (aggregatedBeliefDegreeH + aggregatedBeliefDegreeM + aggregatedBeliefDegreeL) == 1:
crispValue = (aggregatedBeliefDegreeH * utilityScoreH) + (aggregatedBeliefDegreeM * utilityScoreM) + (aggregatedBeliefDegreeL * utilityScoreL)
brbH = aggregatedBeliefDegreeH
brbM = aggregatedBeliefDegreeM
brbL = aggregatedBeliefDegreeL
print ("\n BRB-CNN integrated Belief Degree for Hazardous AQI: ",aggregatedBeliefDegreeH,"\n")
print ("\n BRB-CNN integrated Belief Degree for Unhealthy AQI: ",aggregatedBeliefDegreeM,"\n")
print ("\n BRB-CNN integrated Belief Degree for Good AQI: ",aggregatedBeliefDegreeL,"\n")
#cout << "brbH: " << brbH << " brbM: " << brbM << " brbL: " << brbL <<endl;
else:
degreeOfIncompleteness = 1 - (aggregatedBeliefDegreeH + aggregatedBeliefDegreeM + aggregatedBeliefDegreeL)
utilityMax = ((aggregatedBeliefDegreeH + degreeOfIncompleteness) * utilityScoreH + (aggregatedBeliefDegreeM*utilityScoreM) + (aggregatedBeliefDegreeL*utilityScoreL))
utilityMin = (aggregatedBeliefDegreeH*utilityScoreH) + (aggregatedBeliefDegreeM*utilityScoreM) + (aggregatedBeliefDegreeL + degreeOfIncompleteness) * utilityScoreL
utilityAvg = (utilityMax + utilityMin)/2
print ("BRB-CNN integrated Belief Degrees considering degree of Incompleteness: ")
finalAggregatedBeliefDegreeH = aggregatedBeliefDegreeH/(aggregatedBeliefDegreeH + aggregatedBeliefDegreeM + aggregatedBeliefDegreeL)
finalAggregatedBeliefDegreeM = aggregatedBeliefDegreeM/(aggregatedBeliefDegreeH + aggregatedBeliefDegreeM + aggregatedBeliefDegreeL)
finalAggregatedBeliefDegreeL = aggregatedBeliefDegreeL/(aggregatedBeliefDegreeH + aggregatedBeliefDegreeM + aggregatedBeliefDegreeL)
brbH = finalAggregatedBeliefDegreeH
brbM = finalAggregatedBeliefDegreeM
brbL = finalAggregatedBeliefDegreeL
if (finalAggregatedBeliefDegreeH > finalAggregatedBeliefDegreeM) and (finalAggregatedBeliefDegreeH > finalAggregatedBeliefDegreeL):
aqi = (201 + 299*finalAggregatedBeliefDegreeH) + ((200*finalAggregatedBeliefDegreeM)/2)
print ("AQI predicted by BRB-CNN:",aqi)
elif (finalAggregatedBeliefDegreeL > finalAggregatedBeliefDegreeM) and (finalAggregatedBeliefDegreeL > finalAggregatedBeliefDegreeH):
aqi = (100*(1 - finalAggregatedBeliefDegreeL)) + ((200*finalAggregatedBeliefDegreeM)/2)
print ("AQI predicted by BRB-CNN:",aqi)
elif (finalAggregatedBeliefDegreeM > finalAggregatedBeliefDegreeH) and (finalAggregatedBeliefDegreeM > finalAggregatedBeliefDegreeL):
if finalAggregatedBeliefDegreeH > finalAggregatedBeliefDegreeL:
aqi = (101 + 99*finalAggregatedBeliefDegreeM) + ((500*finalAggregatedBeliefDegreeH)/2)
print ("AQI predicted by BRB-CNN: ",aqi)
elif (finalAggregatedBeliefDegreeL > finalAggregatedBeliefDegreeH):
aqi = (101 + 99*finalAggregatedBeliefDegreeM) + ((100*finalAggregatedBeliefDegreeL)/2)
print ("AQI predicted by BRB-CNN:",aqi)
print("aqi ",aqi)
if aqi >= 301:
aqi6 = (aqi- 301)/199.0
elif (aqi >= 201)and (aqi <= 300.9999999999):
aqi6 = (aqi- 201)/99.0
elif (aqi >= 151)and (aqi <= 200.9999999999):
aqi6 = (aqi- 151)/49.0
elif((aqi >= 101)and (aqi <= 150.9999999999)):
aqi6 = (aqi- 101)/49.0
elif((aqi >= 51)and (aqi <= 100.9999999999)):
aqi6 = (aqi- 51)/49.0
elif(aqi <= 50.9999999999):
aqi6 = (aqi/49.0)
print("aqi6 ",aqi6)
print ("BRB-CNN integrated Belief Degree for Hazardous AQI:",finalAggregatedBeliefDegreeH*aqi6)
print ("BRB-CNN integrated Belief Degree for Very Unhealthy AQI:",finalAggregatedBeliefDegreeH*(1-aqi6))
print ("BRB-CNN integrated Belief Degree for Unhealthy AQI: ",finalAggregatedBeliefDegreeM*aqi6)
print ("BRB-CNN integrated Belief Degree for Unhealthy (Sensitive Groups) AQI:",finalAggregatedBeliefDegreeM*(1-aqi6))
print ("BRB-CNN integrated Belief Degree for Moderate AQI:",finalAggregatedBeliefDegreeL*aqi6)
print ("BRB-CNN integrated Belief Degree for Good AQI:",finalAggregatedBeliefDegreeL*(1-aqi6))
return aqi
#def getAQI(x):
# cbd_de0 = x[0]
# cbd_de1 = x[1]
# cbd_de2 = x[2]
# ruleBase()
#aqi = x[0] + x[1] + x[2] + x[3]
#print("Diff Evo BRB/CNN AQI is ",aqi)
# return cbd_de0 + cbd_de1 + cbd_de2
#def main():
# ruleBase()
# takeInput()
#showTransformedInput() unnecessary
# takeCnnOutput()
# calculateMatchingDegreeBrbCnn()
#showMatchingDegree() unnecessary
# showActivationWeight()
# updateBeliefDegree()
# aggregateER_BrbCnn()
#getAQI(x) unnecessary
#main() | [
"noreply@github.com"
] | samikabir.noreply@github.com |
44c4ae1504cb9383fdc583226306490178d8f514 | 08d59232fec474afe8bf20fcd6b35dc89638720c | /madird-air-quality-evaluation/code/analyze-rnn-results.py | 9ec0a2e3261f20699c182dc9e2647be7c98524c6 | [] | no_license | jamaltoutouh/urban-health-no2-experiments | d91999e69ee21a14570f91eefbf9445ca79b4f18 | a51b01ed6adafd87a4a3ac661a1270b1cdff6a31 | refs/heads/master | 2021-01-15T02:12:40.620348 | 2020-02-25T23:19:25 | 2020-02-25T23:19:25 | 242,844,934 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,068 | py | import random
import sys
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.figure import figaspect
import seaborn as sns
import glob
import scipy.stats
from sklearn.linear_model import LinearRegression
from scipy.stats import shapiro
from statistics import mean
from scipy.stats import ttest_ind
from scipy.stats import wilcoxon
from scipy.stats import iqr
import scikit_posthocs as sp
import os
resutls_folder = '.'
def get_folders(hiden_layer_size, loock_back):
return glob.glob(resutls_folder + '/*' + str(hiden_layer_size) + '-' + str(loock_back))
def get_results_from_csv(folder, csv_file):
if not os.path.exists(folder + '/' + csv_file): return None
data = pd.read_csv(folder + '/' + csv_file)
predicted = np.array(data['predicted'].tolist())
actual = np.array(data['actual'].tolist())
over_predicted_ratio = sum(actual < predicted) / len(predicted)
diff = predicted - actual
over_predicted_val = np.sum(diff[diff>0]) / np.sum(diff>0)
over_predicted_ratio = np.sum(diff>0) / len(predicted)
mse = sum(data['mse'].tolist()) / len(predicted)
mae = sum(data['mae'].tolist()) / len(predicted)
return mse, mae, over_predicted_ratio, over_predicted_val
def get_results_pre_MC(folder):
return get_results_from_csv(folder, 'pre-MC.csv')
def get_results_post_MC(folder):
return get_results_from_csv(folder, 'post-MC.csv')
def get_results(hiden_layer_size, loock_back):
folders = get_folders(hiden_layer_size, loock_back)
for folder in folders:
results_pre_MC = get_results_pre_MC(folder)
if not results_pre_MC is None:
print('Pre-MC')
print(results_pre_MC)
results_post_MC = get_results_post_MC(folder)
if not results_post_MC is None:
print('Post-MC')
print(results_post_MC)
hidden_layer_size = [2, 4, 6, 8, 10, 100]
loock_backs = [6, 12, 24]
for hls in hidden_layer_size:
for lb in loock_backs:
print('{} - {}'.format(hls, lb))
get_results(hls, lb) | [
"toutouh@alfad1812.csail.mit.edu"
] | toutouh@alfad1812.csail.mit.edu |
9ab0fdbb526d963d7c90bf9dd6662f3bcd222495 | 5caee0aea844d1024dadb1a3a551ed347c4ce534 | /Amazon_interview/lowest_number_removing_n_digits.py | 0a54a7cf8e2e15a1486fcaf2e5e1dbfb285e5659 | [] | no_license | Shikhar0907/Algo-and-data-structure-questions | 840c004acb93d7f97615d87fc8c69d73434b380d | 27d24cd18a9878cb59a81ef8b39aa4e31d8b8a7a | refs/heads/master | 2020-03-27T18:28:52.707843 | 2018-11-02T05:08:19 | 2018-11-02T05:08:19 | 146,924,651 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 534 | py | def remove_n_digits(arr,n,res):
if n ==0:
res = res + arr
return
l = len(arr)
if len(arr) <= n:
return
min_point = 0
for i in range(len(arr)):
if arr[i] < arr[min_point]:
min_point = i
res = res+arr[min_point]
new_str = arr[min_point+1:l-min_point]
remove_n_digits(new_str,n-min_point,res)
def main():
string = str(input("Please enter the elements: "))
res = ""
n = 3
print(remove_n_digits(string,n,res))
print(res)
main()
| [
"shikharsuman59@gmail.com"
] | shikharsuman59@gmail.com |
087243b55df2646274a5808accd1290346960684 | 33ffb5ee0b9160210857a610516c6af76f4fbf7f | /expense_report/migrations/0001_initial.py | 07f820e9e5f2b732a6782b8b40ed4a08d85c0331 | [] | no_license | vince015/Memorial-Park-Management-System | 7d366a05ff4dee1e0e37e394f704e23d89a2f66c | 5ef02df56a87457d49254da241735e08cb4d1484 | refs/heads/master | 2021-08-10T15:29:02.666898 | 2018-11-13T14:40:26 | 2018-11-13T14:40:26 | 143,298,254 | 1 | 0 | null | 2021-06-10T20:42:43 | 2018-08-02T13:26:20 | HTML | UTF-8 | Python | false | false | 2,508 | py | # Generated by Django 2.0 on 2018-11-03 07:40
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Expense',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('date', models.DateField(default=django.utils.timezone.now)),
('reference_number', models.CharField(blank=True, max_length=256, null=True)),
('payee', models.CharField(max_length=256)),
('amount', models.FloatField(default=0.0)),
('category', models.CharField(choices=[('Salaries', 'Salaries'), ('Commissions', 'Commissions'), ('Petty Cash Replenishment', 'Petty Cash Replenishment'), ('Labor Fees', 'Labor Fees'), ('Professional Fees', 'Professional Fees'), ('Allowance', 'Allowance'), ('Repair & Maintenance', 'Repair and Maintenance'), ('Office Supplies', 'Office Supplies'), ('Transportation', 'Transportation'), ('Electicity', 'Electicity'), ('Utilities', 'Utilities'), ('Miscellaneous', 'Miscellaneous')], default='Salaries', max_length=128)),
('description', models.CharField(blank=True, max_length=512, null=True)),
],
),
migrations.CreateModel(
name='Transaction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('timestamp', models.DateTimeField(default=django.utils.timezone.now)),
('payee', models.CharField(max_length=256)),
('amount', models.FloatField(default=0.0)),
('description', models.CharField(blank=True, max_length=512, null=True)),
('transaction_type', models.SmallIntegerField(choices=[(1, 'CREDIT'), (-1, 'DEBIT')], default=-1)),
('value', models.FloatField(default=0.0)),
],
options={
'verbose_name': 'Petty Cash Transaction',
'verbose_name_plural': 'Petty Cash Transactions',
},
),
]
| [
"vincetapang@gmail.com"
] | vincetapang@gmail.com |
63b3c6d7dce8fd65cd3e00c6622f9d68091d0507 | 0e3d7a350a2b1b962f7308ba369398f3e4c539a0 | /HaxballParser/__init__.py | 995f4704f37b39ebe0cf89ef36fd2eefcf6f2f63 | [] | no_license | jalm10/HaxballParser | 370d4fbf1754a79a2a5f17e00b474286a805154a | c53ff6677de97c2bb34f088827dc0bd734a714de | refs/heads/master | 2020-06-04T22:39:36.682511 | 2016-01-08T17:25:05 | 2016-01-08T17:25:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 45 | py | from .dumper import dump
from .utils import * | [
"ivanfelicianox@gmail.com"
] | ivanfelicianox@gmail.com |
8c07529f651c18ec4baeaf8196c9089dc669994a | 2b9dfbb89d197f0318edc9f78e9d0e6cbcf3e68f | /supervisor/resolution/evaluations/base.py | 30d14714d93ca280cdf87440cb6bb7683263bd38 | [
"Apache-2.0"
] | permissive | fredrike/hassio | ea26a9a8e5c321ee5368fa75c1ce183cad499c29 | 1df447272eb8f1d891530acf91f43320361d6811 | refs/heads/dev | 2023-03-20T12:22:07.248308 | 2020-12-18T20:42:42 | 2020-12-18T20:42:42 | 183,160,796 | 1 | 0 | Apache-2.0 | 2023-03-06T06:58:11 | 2019-04-24T06:16:38 | Python | UTF-8 | Python | false | false | 1,780 | py | """Baseclass for system evaluations."""
from abc import ABC, abstractmethod, abstractproperty
import logging
from typing import List
from ...const import CoreState
from ...coresys import CoreSys, CoreSysAttributes
from ..const import UnsupportedReason
_LOGGER: logging.Logger = logging.getLogger(__name__)
class EvaluateBase(ABC, CoreSysAttributes):
"""Baseclass for evaluation."""
def __init__(self, coresys: CoreSys) -> None:
"""Initialize the evaluation class."""
self.coresys = coresys
async def __call__(self) -> None:
"""Execute the evaluation."""
if self.sys_core.state not in self.states:
return
if await self.evaluate():
if self.reason not in self.sys_resolution.unsupported:
self.sys_resolution.unsupported = self.reason
_LOGGER.warning(
"%s (more-info: https://www.home-assistant.io/more-info/unsupported/%s)",
self.on_failure,
self.reason.value,
)
else:
if self.reason in self.sys_resolution.unsupported:
_LOGGER.info("Clearing %s as reason for unsupported", self.reason)
self.sys_resolution.dismiss_unsupported(self.reason)
@abstractmethod
async def evaluate(self):
"""Run evaluation."""
@property
@abstractproperty
def reason(self) -> UnsupportedReason:
"""Return a UnsupportedReason enum."""
@property
@abstractproperty
def on_failure(self) -> str:
"""Return a string that is printed when self.evaluate is False."""
@property
def states(self) -> List[CoreState]:
"""Return a list of valid states when this evaluation can run."""
return []
| [
"noreply@github.com"
] | fredrike.noreply@github.com |
a5e3b8c4b09c96202874b0d917b89981cd95ae34 | 8dd926fcbff258a5f03b414581bad5848e5e8ad7 | /service/app/models.py | 39f53e2409f38fccf8c07e5567c26c7db19a833b | [] | no_license | xxl4tomxu98/Benchmarking_Engineers_Full_Stack | ab1d4c74430607b30d259a941af76298e16b6e0c | d633f2ea1ec63336e4d1de2778c170be07a71c07 | refs/heads/master | 2023-02-28T04:27:56.279633 | 2021-02-06T19:31:02 | 2021-02-06T19:31:02 | 334,595,031 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 787 | py | from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
class Company(db.Model):
__tablename__ = 'companies'
id = db.Column(db.Integer, primary_key=True)
fractal_index = db.Column(db.Numeric(4, 3), nullable=False)
records = db.relationship("Record", back_populates="company")
class Record(db.Model):
__tablename__ = "score_records"
id = db.Column(db.Integer, primary_key=True)
candidate_id = db.Column(db.Integer, nullable=False)
communication_score = db.Column(db.Integer, nullable=False)
coding_score = db.Column(db.Integer, nullable=False)
title = db.Column(db.Text, nullable=False)
company_id = db.Column(db.Integer, db.ForeignKey('companies.id'), nullable=False)
company = db.relationship("Company", back_populates="records")
| [
"tomxu@udel.edu"
] | tomxu@udel.edu |
5d7b482c75e3fba9a743f187bf5e02f5cc8bddf2 | b80a6aa13a1928dc320c1edd6b186bcb2dcc63ce | /core/nodeLibrary.py | 1e9853b9bcae8f668c16af28a65cecd4f383943e | [] | no_license | walternate/meShaderEd | 225e6998f8b5ccd03311ed750bbf08482fed6651 | faf8e5ba028cbb12c9cb3a107263469999049905 | refs/heads/master | 2021-01-24T01:00:30.265664 | 2012-07-19T16:22:20 | 2012-07-19T16:22:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,867 | py | #===============================================================================
# nodeLibrary.py
#
#
#
#===============================================================================
import os, sys
from PyQt4 import QtCore, QtGui, QtXml
from PyQt4.QtCore import QDir, QFile, QVariant
from PyQt4.QtGui import QStandardItemModel, QStandardItem
from global_vars import app_global_vars
from core.node import Node
from core.rslNode import RSLNode
from core.ribNode import RIBNode
from core.imageNode import ImageNode
from core.nodeParam import *
#from core.nodeParam import FloatNodeParam
#from core.nodeParam import ColorNodeParam
#
# NodeLibrary
#
class NodeLibrary (): # QtCore.QObject
#
#
def __init__ ( self, dirName ):
self.dirName = dirName
self.libdir = QDir( dirName );
self.model = QStandardItemModel()
self.parentItem = self.model.invisibleRootItem()
print '>> NodeLibrary: libdir = %s' % dirName
self.liblevel = ''
self.scanLibDir()
#
#
def scanLibDir ( self ):
# process directories
sortFlags = QDir.Name
filterFlags = ( QDir.AllDirs | QDir.NoDotAndDotDot )
fileList = self.libdir.entryInfoList ( filterFlags, sortFlags )
for f in fileList :
item = QStandardItem ( f.fileName() )
item.setEditable ( False )
item.setDragEnabled ( False )
# set bold font for folders
font = item.font()
font.setBold ( True )
item.setFont ( font )
item.setWhatsThis ( 'folder' )
currparent = self.parentItem
self.parentItem.appendRow ( item )
self.parentItem = item
currlevel = self.liblevel # store current level
self.liblevel = self.liblevel + f.fileName () + '/'
self.libdir.cd( f.fileName () )
self.scanLibDir() # recurcive call itself
self.liblevel = currlevel # restore current level
self.libdir.cdUp()
self.parentItem = currparent
# process XML files
filterFlags = QDir.Files
fileList = self.libdir.entryInfoList ( ["*.xml"], filterFlags, sortFlags )
for f in fileList :
self.scanXmlNodes ( f.fileName () )
#
#
def scanXmlNodes ( self, filename ):
dom = QtXml.QDomDocument('')
nodeFilename = self.dirName + '/' + self.liblevel + filename
file = QFile ( self.libdir.filePath ( filename ) )
if file.open ( QtCore.QIODevice.ReadOnly ) :
if dom.setContent ( file ) :
node = dom.documentElement()
if node.nodeName() == 'nodenet' or node.nodeName() == 'node' :
nodeName = node.attributes().namedItem('name').nodeValue()
nodeType = node.attributes().namedItem('type').nodeValue()
nodeAuthor = node.attributes().namedItem('author').nodeValue()
nodeIcon = node.attributes().namedItem('icon').nodeValue()
nodeHelp = ''
help_tag = node.namedItem ('help')
if not help_tag.isNull() : nodeHelp = help_tag.toElement().text()
item = QStandardItem( nodeName )
item.setEditable( False )
item.setData( QVariant( nodeAuthor ), QtCore.Qt.UserRole + 1 )
item.setData( QVariant( nodeType ), QtCore.Qt.UserRole + 2 )
item.setData( QVariant( nodeHelp ), QtCore.Qt.UserRole + 3 )
item.setData( QVariant( nodeFilename ), QtCore.Qt.UserRole + 4 )
item.setData( QVariant( nodeIcon ), QtCore.Qt.UserRole + 5 )
if node.nodeName() == 'nodenet' :
# set Blue color for nodenet items
brush = QtGui.QBrush ()
brush.setColor ( QtCore.Qt.blue )
item.setForeground ( brush )
item.setWhatsThis ( 'nodenet' )
else:
item.setWhatsThis ( 'node' )
self.parentItem.appendRow ( item )
file.close()
| [
"Yuri.Meshalkin@gmail.com"
] | Yuri.Meshalkin@gmail.com |
47083e5f84b754fc1c05b38d3675fee1b760ae9f | a5669790b3809e10ec7016d66981f915b47a166c | /scripts/collect_lang_stats.py | 33064bb2fe3e96764cb14480e3c8c3b262b4f8b9 | [
"MIT"
] | permissive | jxhe/cross-lingual-struct-flow | ca9c45983316a83b0ac1bd88b9ed76e47ce976dc | 8fb90fef4f6fcd79c42fc6aefec5418ca5e54833 | refs/heads/master | 2023-06-28T14:53:45.503938 | 2020-12-03T03:28:53 | 2020-12-03T03:28:53 | 158,045,710 | 25 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,265 | py | """
This script collects language statistics from UD treebank dataset
"""
import os
from io import open
from conllu import parse_incr
lang_stats = []
for root, subdirs, files in os.walk("ud-treebanks-v2.2"):
train_flag = False
valid_dir = False
for fname in files:
if fname.strip().split('.')[-1] == "conllu":
valid_dir = True
lang = fname.strip().split('.')[0].split('_')[0]
break
if valid_dir:
for fname in files:
if fname.strip().split('.')[-1] == "conllu":
train = fname.strip().split('.')[0].split('-')[-1]
if train != "train":
continue
train_flag = True
fin = open(os.path.join(root, fname), "r", encoding="utf-8")
sents = list(parse_incr(fin))
lang_stats.append((lang, root, len(sents)))
break
# no training file
if not train_flag:
lang_stats.append((lang, root, 0))
with open("lang_stats.txt", "w") as fout:
for name_root_value in sorted(lang_stats, key=lambda name_root_value: name_root_value[2]):
name, root, value = name_root_value
fout.write("{} {} {}\n".format(name, root, value))
| [
"junxianh2@gmail.com"
] | junxianh2@gmail.com |
a0149391f5831cb229256d024b17784f97c94ccb | 740c363ddf8ea6e9a37862503db076ace7783664 | /testing/test_calc.py | 800e3db007b0f6dca8698cdd0337e00ffaddc8f3 | [] | no_license | gaoxue12350/pytest_excise | a864a5ceded97f6e09ddbee1b4b8050af8ee7e7b | 59dd0236246b429e97a715bd34386191ad8cbdc8 | refs/heads/master | 2023-01-04T17:36:49.866637 | 2020-10-29T11:12:28 | 2020-10-29T11:12:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,536 | py | import pytest
import yaml
def get_datas():
with open("./datas/calc.yml") as f:
datas = yaml.safe_load(f)
add_datas = datas['add']['datas']
add_ids = datas['add']['ids']
sub_datas = datas['sub']['datas']
sub_ids = datas['sub']['ids']
mul_datas = datas['mul']['datas']
mul_ids = datas['mul']['ids']
div_datas = datas['div']['datas']
div_ids = datas['div']['ids']
print(add_datas, add_ids)
return [add_datas, add_ids, sub_datas, sub_ids, mul_datas, mul_ids, div_datas, div_ids]
class TestCalc:
@pytest.mark.run(order=0)
@pytest.mark.parametrize('a,b,expect', get_datas()[0], ids=get_datas()[1])
def test_add(self, get_calc, a, b, expect):
# calc=Calculator()
result = get_calc.add(a, b)
assert result == expect
@pytest.mark.run(order=2)
@pytest.mark.parametrize('a,b,expect', get_datas()[2], ids=get_datas()[3])
def test_sub(self, get_calc, a, b, expect):
# calc=Calculator()
result = get_calc.sub(a, b)
assert result == expect
@pytest.mark.run(order=-1)
@pytest.mark.parametrize('a,b,expect', get_datas()[4], ids=get_datas()[5])
def test_mul(self, get_calc, a, b, expect):
# calc=Calculator()
result = get_calc.mul(a, b)
assert result == expect
@pytest.mark.run(order=1)
@pytest.mark.parametrize('a,b,expect', get_datas()[6], ids=get_datas()[7])
def test_div(self, get_calc, a, b, expect):
result = get_calc.div(a, b)
assert result == expect
| [
"973140551@qq.com"
] | 973140551@qq.com |
754e900ec3ae5f4d33dddc2eb01eedef2b442d05 | a53532e28a8c7ecc3b683e66cc2af0a5fcf135f4 | /Code/bookmanager/bookmanager/settings.py | 52d51cd8168d9d78917fd27ceeb25b2bdc30f3a0 | [] | no_license | 1015600087/cuitingting | b4b754e2f311a67469e6b6db6911c1107ecf8b84 | 097341675561e965256bd7c5fd47870d476715c1 | refs/heads/master | 2023-02-09T06:20:52.995122 | 2020-12-30T14:18:01 | 2020-12-30T14:18:01 | 323,898,087 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,187 | py | """
Django settings for bookmanager project.
Generated by 'django-admin startproject' using Django 2.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'm0-l$!ed&eg4_x7gq%@^qqe6hnhs6jaq&q4^!af6pa667+v5fp'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'book.apps.BookConfig'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'bookmanager.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR,'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'bookmanager.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'zh-Hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| [
"1015600087@qq.com"
] | 1015600087@qq.com |
d55e42d699b32c5fd39e7695c09027af0919048a | 741568a4e5a97478a80acbafb0ea03f2c9c5494b | /labtest/urls.py | 5170c93d469f03528bcb09d7052373606b2db618 | [] | no_license | Viswa1999/EAD-Lab-Test2 | 9d34155583f479b512dadcff9cba901d9036a8fa | 5c964bf688a5d47ab6874e436d395d80d6580c9f | refs/heads/main | 2023-06-26T17:27:39.840864 | 2021-07-20T12:59:20 | 2021-07-20T12:59:20 | 387,791,217 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,389 | py | """labtest URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
from labapp import views
from django.conf.urls import url
"""from labapp.views import VoterView
from labapp import views
from rest_framework import routers
router = routers.DefaultRouter()
router.register(r'voters', views.VoterView,'voters')"""
urlpatterns = [
path('admin/', admin.site.urls),
path('',views.index),
url(r'^create$', views.create, name='create'),
url(r'^edit/(?P<id>\d+)$', views.edit, name='edit'),
url(r'^edit/update/(?P<id>\d+)$', views.update, name='update'),
url(r'^delete/(?P<id>\d+)$', views.delete, name='delete'),
]
#path('',include('labapp.urls')),
#path('api/',include(router.urls)),
| [
"noreply@github.com"
] | Viswa1999.noreply@github.com |
3a8d9df201c08824ce372d8b9123d0ab54a6afbd | db4ecd3108f742fe5944efcb9cce6481088147ff | /keisan2/37.py | 168571e11abd459179c49f32acb59f5c31701fc2 | [] | no_license | Moemoe2358/Data-contest-old | cb7a9d84fa76e7e161be7eb433c79adb16f16683 | ccc00a567bc4a97f2756c4f38d2a6bce612b54bc | refs/heads/master | 2021-06-17T18:38:35.375775 | 2017-04-29T15:06:12 | 2017-04-29T15:06:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,992 | py | # -*- coding: utf-8 -*-
# 長さの分布:[3, 42, 316, 3, 71, 177, 5, 1, 169, 33, 7, 32, 1504]
import numpy as np
import pandas as pd
from sklearn import linear_model
import time
start_time = time.time()
train = pd.read_csv("data3/train.tsv", delimiter = "\t")
test = pd.read_csv("data3/test.tsv", delimiter = "\t")
submit = pd.read_csv("sample_submit.csv", header = None)
submit.columns = ["a", "b"]
weight = {0:{"観光立地":{"関東":0.97, "近畿":0.96, "東北":0.92, "四国":1, "北海道":0.68, "九州":0.96, "中部":1, "中国":1}, \
"住宅立地":{"関東":0.97, "近畿":0.96, "東北":0.76, "四国":0.5, "北海道":0.95, "九州":0.94, "中部":1, "中国":0.78}, \
"ビジネス立地":{"関東":0.96, "近畿":0.97, "東北":0.88, "四国":0.75, "北海道":0.8, "九州":0.99, "中部":0.94, "中国":0.88}, \
"学校立地":{"関東":1, "近畿":0.88, "東北":0.63, "四国":0.5, "北海道":0.5, "九州":0.72, "中部":0.82, "中国":0.62}}, \
1:{"観光立地":{"関東":0.87}, "住宅立地":{"関東":0.9}, "ビジネス立地":{"関東":0.88}}}
# lel, ael, alel, llel = [], [], [], []
# p = 0
for i in range(len(test)):
pid = test['pid'][i]
area = test['area'][i]
location = test['location'][i]
natural = test['natural_lawson_store'][i]
temp = train[(train['pid'] == pid) & (train['area'] == area) & (train['location'] == location) & (train['natural_lawson_store'] == natural)]
temp = temp.reset_index(drop = True)
length = len(temp)
if length == 0:
submit["b"][i] = 2.0
if length == 1:
submit["b"][i] = temp["y"][length - 1] * 1.4
if length == 2:
submit["b"][i] = temp["y"][length - 1]
if length >= 3:
submit["b"][i] = weight[natural][location][area] * temp["y"][length - 1] + (1 - weight[natural][location][area]) * temp["y"][length - 2]
if natural == 1:
submit["b"][i] = submit["b"][i] * 1.07
# if length < 12:
# continue
# length -= p
# le = (np.log(temp['y'][length - 1] + 1) - np.log(temp['y'][length - 2] + 1)) ** 2
# lel.append(le)
# ale = (np.log(temp['y'][length - 1] + 1) - np.log(0.5 * temp['y'][length - 2] + 0.25 * temp['y'][length - 3] + 0.25 * temp['y'][length - 4] + 1)) ** 2
# alel.append(ale)
# lle = (np.log(temp['y'][length - 1] + 1) - np.log(temp['y'][length - 3] + 1)) ** 2
# llel.append(lle)
# ae = (np.log(temp['y'][length - 1] + 1) - np.log(temp['y'][1:length - 1].mean() + 1)) ** 2
# ael.append(ae)
# print ""
# print "Len:", 12 - p
# print "Last", round(np.sqrt(np.mean(lel)), 3)
# print "Moving Average", round(np.sqrt(np.mean(alel)), 3)
# print "2rd Last", round(np.sqrt(np.mean(llel)), 3)
# print "Average", round(np.sqrt(np.mean(ael)), 3)
# print ""
submit.to_csv("result.csv", header = None, index = None)
elapsed_time = time.time() - start_time
print ("elapsed_time:{0}".format(elapsed_time)) + "[sec]"
# for those length < 3: * 1.4
# 78 0.192254608911
# 79 0.192254599957
# 80 0.192271166456
# only length >= 4
# 78 0.16905541744
# 79 0.169055405648
# 80 0.169077223264
| [
"ryuhouketsu@ryuhouketsu-no-MacBook-Air.local"
] | ryuhouketsu@ryuhouketsu-no-MacBook-Air.local |
e687ecab21b61945bf62fd92120d0df127f08282 | 9fa64b4bcdb1061d08a1898cbccfac979035acb8 | /elit/component/dep/parser/biaffine_parser.py | 3cc50eca4c8964c366953731b73cb5640f2e0d4c | [
"Apache-2.0"
] | permissive | elitcloud/elit | bf11d1acfa235440f1826c27d9dad07ac397c2c2 | 78c00ec098d7626fd29ca49a9aef28950fabfed9 | refs/heads/develop | 2022-07-11T16:40:33.623416 | 2020-02-19T19:39:55 | 2020-02-19T19:40:53 | 72,214,112 | 38 | 6 | NOASSERTION | 2022-06-21T21:06:56 | 2016-10-28T14:23:47 | Python | UTF-8 | Python | false | false | 13,914 | py | # coding: utf-8
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import mxnet as mx
import numpy as np
from mxnet import nd, ndarray, autograd
from mxnet.gluon import nn
from mxnet.gluon.loss import SoftmaxCrossEntropyLoss
from elit.component.dep.common.utils import orthonormal_VanillaLSTMBuilder, bilinear, reshape_fortran, arc_argmax, \
rel_argmax, leaky_relu, biLSTM, orthonormal_initializer, arc_mst
from gluonnlp.model import apply_weight_drop
class BiaffineParser(nn.Block):
def __init__(self, vocab,
word_dims,
tag_dims,
dropout_dim,
lstm_layers,
lstm_hiddens,
dropout_lstm_input,
dropout_lstm_hidden,
mlp_arc_size,
mlp_rel_size,
dropout_mlp,
debug=False
):
"""A MXNet replicate of biaffine parser, see following paper
Dozat, T., & Manning, C. D. (2016). Deep biaffine attention for neural dependency parsing. arXiv:1611.01734.
It's a re-implementation of DyNet version https://github.com/jcyk/Dynet-Biaffine-dependency-parser
Parameters
----------
vocab : ParserVocabulary
built from a data set
word_dims : int
word vector dimension
tag_dims : int
tag vector dimension
dropout_dim : float
keep rate of word dropout (drop out entire embedding)
lstm_layers : int
number of lstm layers
lstm_hiddens : int
size of lstm hidden states
dropout_lstm_input : float
dropout on x in variational RNN
dropout_lstm_hidden : float
dropout on h in variational RNN
mlp_arc_size : int
output size of MLP for arc feature extraction
mlp_rel_size : int
output size of MLP for rel feature extraction
dropout_mlp : float
dropout on the output of LSTM
debug : bool
debug mode
"""
super(BiaffineParser, self).__init__()
def embedding_from_numpy(_we, trainable=True):
word_embs = nn.Embedding(_we.shape[0], _we.shape[1], weight_initializer=mx.init.Constant(_we))
apply_weight_drop(word_embs, 'weight', dropout_dim, axes=(1,))
if not trainable:
word_embs.collect_params().setattr('grad_req', 'null')
return word_embs
self._vocab = vocab
self.word_embs = embedding_from_numpy(vocab.get_word_embs(word_dims))
self.pret_word_embs = embedding_from_numpy(vocab.get_pret_embs(),
trainable=False) if vocab.has_pret_embs() else None
self.tag_embs = embedding_from_numpy(vocab.get_tag_embs(tag_dims))
self.f_lstm = nn.Sequential()
self.b_lstm = nn.Sequential()
self.f_lstm.add(orthonormal_VanillaLSTMBuilder(1, word_dims + tag_dims, lstm_hiddens, dropout_lstm_input,
dropout_lstm_hidden, debug))
self.b_lstm.add(orthonormal_VanillaLSTMBuilder(1, word_dims + tag_dims, lstm_hiddens, dropout_lstm_input,
dropout_lstm_hidden, debug))
for i in range(lstm_layers - 1):
self.f_lstm.add(orthonormal_VanillaLSTMBuilder(1, 2 * lstm_hiddens, lstm_hiddens, dropout_lstm_input,
dropout_lstm_hidden, debug))
self.b_lstm.add(orthonormal_VanillaLSTMBuilder(1, 2 * lstm_hiddens, lstm_hiddens, dropout_lstm_input,
dropout_lstm_hidden, debug))
self.dropout_lstm_input = dropout_lstm_input
self.dropout_lstm_hidden = dropout_lstm_hidden
mlp_size = mlp_arc_size + mlp_rel_size
W = orthonormal_initializer(mlp_size, 2 * lstm_hiddens, debug)
self.mlp_dep_W = self.parameter_from_numpy('mlp_dep_W', W)
self.mlp_head_W = self.parameter_from_numpy('mlp_head_W', W)
self.mlp_dep_b = self.parameter_init('mlp_dep_b', (mlp_size,), mx.init.Zero())
self.mlp_head_b = self.parameter_init('mlp_head_b', (mlp_size,), mx.init.Zero())
self.mlp_arc_size = mlp_arc_size
self.mlp_rel_size = mlp_rel_size
self.dropout_mlp = dropout_mlp
self.arc_W = self.parameter_init('arc_W', (mlp_arc_size, mlp_arc_size + 1), init=mx.init.Zero())
self.rel_W = self.parameter_init('rel_W', (vocab.rel_size * (mlp_rel_size + 1), mlp_rel_size + 1),
init=mx.init.Zero())
self.softmax_loss = SoftmaxCrossEntropyLoss(axis=0, batch_axis=-1)
self.initialize()
def parameter_from_numpy(self, name, array):
""" Create parameter with its value initialized according to a numpy tensor
Parameters
----------
name : str
parameter name
array : np.ndarray
initiation value
Returns
-------
mxnet.gluon.parameter
a parameter object
"""
p = self.params.get(name, shape=array.shape, init=mx.init.Constant(array))
return p
def parameter_init(self, name, shape, init):
"""Create parameter given name, shape and initiator
Parameters
----------
name : str
parameter name
shape : tuple
parameter shape
init : mxnet.initializer
an initializer
Returns
-------
mxnet.gluon.parameter
a parameter object
"""
p = self.params.get(name, shape=shape, init=init)
return p
def forward(self, word_inputs, tag_inputs, arc_targets=None, rel_targets=None):
"""Run decoding
Parameters
----------
word_inputs : mxnet.ndarray.NDArray
word indices of seq_len x batch_size
tag_inputs : mxnet.ndarray.NDArray
tag indices of seq_len x batch_size
arc_targets : mxnet.ndarray.NDArray
gold arc indices of seq_len x batch_size
rel_targets : mxnet.ndarray.NDArray
gold rel indices of seq_len x batch_size
Returns
-------
tuple
(arc_accuracy, rel_accuracy, overall_accuracy, loss) when training, else if given gold target
then return arc_accuracy, rel_accuracy, overall_accuracy, outputs, otherwise return outputs, where outputs is a
list of (arcs, rels).
"""
is_train = autograd.is_training()
def flatten_numpy(ndarray):
"""Flatten nd-array to 1-d column vector
Parameters
----------
ndarray : numpy.ndarray
input tensor
Returns
-------
numpy.ndarray
A column vector
"""
return np.reshape(ndarray, (-1,), 'F')
batch_size = word_inputs.shape[1]
seq_len = word_inputs.shape[0]
mask = np.greater(word_inputs, self._vocab.ROOT).astype(np.float32)
num_tokens = int(np.sum(mask)) # non padding, non root token number
if is_train or arc_targets is not None:
mask_1D = flatten_numpy(mask)
mask_1D_tensor = nd.array(mask_1D)
unked_words = np.where(word_inputs < self._vocab.words_in_train, word_inputs, self._vocab.UNK)
word_embs = self.word_embs(nd.array(unked_words, dtype='int'))
if self.pret_word_embs:
word_embs = word_embs + self.pret_word_embs(nd.array(word_inputs))
tag_embs = self.tag_embs(nd.array(tag_inputs))
# Dropout
emb_inputs = nd.concat(word_embs, tag_embs, dim=2) # seq_len x batch_size
top_recur = biLSTM(self.f_lstm, self.b_lstm, emb_inputs, batch_size,
dropout_x=self.dropout_lstm_input if is_train else 0)
top_recur = nd.Dropout(data=top_recur, axes=[0], p=self.dropout_mlp)
W_dep, b_dep = self.mlp_dep_W.data(), self.mlp_dep_b.data()
W_head, b_head = self.mlp_head_W.data(), self.mlp_head_b.data()
dep, head = leaky_relu(nd.dot(top_recur, W_dep.T) + b_dep), leaky_relu(nd.dot(top_recur, W_head.T) + b_head)
dep, head = nd.Dropout(data=dep, axes=[0], p=self.dropout_mlp), nd.Dropout(data=head, axes=[0],
p=self.dropout_mlp)
dep, head = nd.transpose(dep, axes=[2, 0, 1]), nd.transpose(head, axes=[2, 0, 1])
dep_arc, dep_rel = dep[:self.mlp_arc_size], dep[self.mlp_arc_size:]
head_arc, head_rel = head[:self.mlp_arc_size], head[self.mlp_arc_size:]
W_arc = self.arc_W.data()
arc_logits = bilinear(dep_arc, W_arc, head_arc, self.mlp_arc_size, seq_len, batch_size, num_outputs=1,
bias_x=True, bias_y=False)
# (#head x #dep) x batch_size
flat_arc_logits = reshape_fortran(arc_logits, (seq_len, seq_len * batch_size))
# (#head ) x (#dep x batch_size)
arc_preds = arc_logits.argmax(0)
# seq_len x batch_size
if is_train or arc_targets is not None:
correct = np.equal(arc_preds.asnumpy(), arc_targets)
arc_correct = correct.astype(np.float32) * mask
arc_accuracy = np.sum(arc_correct) / num_tokens
targets_1D = flatten_numpy(arc_targets)
losses = self.softmax_loss(flat_arc_logits, nd.array(targets_1D))
arc_loss = nd.sum(losses * mask_1D_tensor) / num_tokens
if not is_train:
arc_probs = np.transpose(
np.reshape(nd.softmax(flat_arc_logits, axis=0).asnumpy(), (seq_len, seq_len, batch_size), 'F'))
# #batch_size x #dep x #head
W_rel = self.rel_W.data()
rel_logits = bilinear(dep_rel, W_rel, head_rel, self.mlp_rel_size, seq_len, batch_size,
num_outputs=self._vocab.rel_size, bias_x=True, bias_y=True)
# (#head x rel_size x #dep) x batch_size
flat_rel_logits = reshape_fortran(rel_logits, (seq_len, self._vocab.rel_size, seq_len * batch_size))
# (#head x rel_size) x (#dep x batch_size)
_target_vec = nd.array(targets_1D if is_train else flatten_numpy(arc_preds.asnumpy())).reshape(
seq_len * batch_size, 1)
_target_mat = _target_vec * nd.ones((1, self._vocab.rel_size))
partial_rel_logits = nd.pick(flat_rel_logits, _target_mat.T, axis=0)
# (rel_size) x (#dep x batch_size)
if is_train or arc_targets is not None:
rel_preds = partial_rel_logits.argmax(0)
targets_1D = flatten_numpy(rel_targets)
rel_correct = np.equal(rel_preds.asnumpy(), targets_1D).astype(np.float32) * mask_1D
rel_accuracy = np.sum(rel_correct) / num_tokens
losses = self.softmax_loss(partial_rel_logits, nd.array(targets_1D))
rel_loss = nd.sum(losses * mask_1D_tensor) / num_tokens
if not is_train:
rel_probs = np.transpose(np.reshape(nd.softmax(flat_rel_logits.transpose([1, 0, 2]), axis=0).asnumpy(),
(self._vocab.rel_size, seq_len, seq_len, batch_size), 'F'))
# batch_size x #dep x #head x #nclasses
if is_train or arc_targets is not None:
loss = arc_loss + rel_loss
correct = rel_correct * flatten_numpy(arc_correct)
overall_accuracy = np.sum(correct) / num_tokens
if is_train:
return arc_accuracy, rel_accuracy, overall_accuracy, loss
outputs = []
for msk, arc_prob, rel_prob in zip(np.transpose(mask), arc_probs, rel_probs):
# parse sentences one by one
msk[0] = 1.
sent_len = int(np.sum(msk))
arc_pred = arc_mst(arc_prob, sent_len, msk)
rel_prob = rel_prob[np.arange(len(arc_pred)), arc_pred]
rel_pred = rel_argmax(rel_prob, sent_len)
outputs.append((arc_pred[1:sent_len], rel_pred[1:sent_len]))
if arc_targets is not None:
return arc_accuracy, rel_accuracy, overall_accuracy, outputs
return outputs
def save_parameters(self, filename):
"""Save model
Parameters
----------
filename : str
path to model file
"""
params = self._collect_params_with_prefix()
if self.pret_word_embs: # don't save word embeddings inside model
params.pop('pret_word_embs.weight', None)
arg_dict = {key: val._reduce() for key, val in params.items()}
ndarray.save(filename, arg_dict)
def save(self, save_path):
"""Save model
Parameters
----------
filename : str
path to model file
"""
self.save_parameters(save_path)
def load(self, load_path):
"""Load model
Parameters
----------
load_path : str
path to model file
"""
self.load_parameters(load_path, allow_missing=True)
| [
"jfservice@126.com"
] | jfservice@126.com |
ba166e925f28217c4618acca4463a058bf51fb87 | 456c7ca7c0f8d7056abb99c0af659472166896aa | /shorty/core/migrations/0006_auto_20201127_0852.py | 7bdf4e439b5946a6e3bfee854872d6e697e480cf | [] | no_license | MrAmbiG/shorty | 5988842651b551be4f77716bbdfb46f8f3c8e62d | b3da39ab73b6eea16dde25a485980736a25f9789 | refs/heads/main | 2023-02-22T04:42:53.168745 | 2021-01-21T23:17:51 | 2021-01-21T23:17:51 | 316,470,576 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 410 | py | # Generated by Django 3.1.3 on 2020-11-27 08:52
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
dependencies = [
('core', '0005_shorties_https'),
]
operations = [
migrations.AlterField(
model_name='shorties',
name='alias',
field=models.UUIDField(default=uuid.uuid4, unique=True),
),
]
| [
"amd@ubuntuserver-18-x8664.local.lan"
] | amd@ubuntuserver-18-x8664.local.lan |
f5f0aa235873cd336744f8fca21242d7eb9573e4 | 3a14dc7e73b8ab69734f078a8810977250868882 | /catkin_ws/build/bl_group_a/catkin_generated/pkg.develspace.context.pc.py | ce809a93d0cfe7f6c47c8353bbfd6716a9f9e46f | [] | no_license | TianmingQiu/Biologically_Inspired_Learning_for_Humanoid_Robots | a9e6562ae002753fbac107fcbace2b95717e3fc0 | 43691273cb7b5713c51b3af6c0eebd4d2a21b9c7 | refs/heads/master | 2021-01-22T06:23:16.765943 | 2017-09-28T18:20:32 | 2017-09-28T18:20:32 | 92,548,642 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 524 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/biolearning_a/catkin_ws/src/bl_group_a/include".split(';') if "/home/biolearning_a/catkin_ws/src/bl_group_a/include" != "" else []
PROJECT_CATKIN_DEPENDS = "robot_specific_msgs;roscpp;rospy;std_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "bl_group_a"
PROJECT_SPACE_DIR = "/home/biolearning_a/catkin_ws/devel"
PROJECT_VERSION = "0.0.0"
| [
"qtianming@hotmail.com"
] | qtianming@hotmail.com |
430b684a53b630a0c16ed80771c9c00fa5114ed3 | 18d5f2166427567401c170584fc0698344d142ca | /server/flaskr/login.py | 6ef9d1f495646a1dfcf6248c9abedcd5cae25be0 | [] | no_license | Jayuses/RPVdb | 01741575da914efe257ce4c458ed3c430325a581 | 9b41073025497c618e7bad5180cefbc214057aba | refs/heads/main | 2023-06-08T09:07:58.752721 | 2023-05-31T03:31:38 | 2023-05-31T03:31:38 | 352,701,184 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,813 | py | #!/RPVdb/server/flaskr/getdata.py
# -*- conding: utf-8 -*-
# 蓝图文件'login',处理前端登录请求
from flask import (Blueprint, request)
from flask import Flask, jsonify
from flaskr.db import get_users
from werkzeug.security import check_password_hash, generate_password_hash
#辅助状态变量
statusdata = {'status':'','class':-1}
bp = Blueprint('login', __name__, url_prefix='/login')
@bp.route('/',methods=['GET','POST'])
def login():
response = {}
if request.method == 'POST':
loginForm = request.get_json()
username = loginForm.get('username')
password = loginForm.get('password')
users = get_users()
cursor = users.cursor()
statusdata['status'] = -1
user_class = -1 #用户等级
user = cursor.execute(
"SELECT * FROM tbUser WHERE NAME = ?",(username,)).fetchone()
if user is None:
statusdata['status'] = 2 #用户名不存在
else:
if user[2] == 'user':
user_class = 1
elif user[2] == 'administrator':
user_class = 0
if not check_password_hash(user[1], password):
statusdata['status'] = 0 #密码错误
else:
statusdata['status'] = 1 #登录成功
statusdata['class'] = user_class
cursor.close()
else:
response = statusdata
return jsonify(response)
@bp.route('/change',methods=['POST','GET'])
def change():
response = {}
if request.method == 'POST':
change_quest = request.get_json()
del_data = change_quest.get('delData')
mod = change_quest.get('mod')
users = get_users()
cursor = users.cursor()
for item in del_data:
cursor.execute('DELETE FROM tbUser WHERE NAME=?',(item['name'],))
for item in mod:
user = cursor.execute("SELECT * FROM tbUser WHERE NAME = ?",(item['name'],)).fetchone()
if user:
if item['password'] == '******' or item['password'] == '':
sql = 'UPDATE tbUser SET [LEVEL]=? WHERE NAME=?'
cursor.execute(sql,(item['level'],item['name']))
else:
sql = 'UPDATE tbUser SET PASSWORD=?,[LEVEL]=? WHERE NAME=?'
cursor.execute(sql,(generate_password_hash(item['password']),item['level'],item['name']))
else:
sql = 'INSERT INTO tbUser (NAME,PASSWORD,[LEVEL]) VALUES (?,?,?)'
cursor.execute(sql,(item['name'],generate_password_hash(item['password']),item['level']))
cursor.commit()
cursor.close()
statusdata['status'] = 1
else:
response = statusdata['status']
return jsonify(response)
| [
"1367435459@qq.com"
] | 1367435459@qq.com |
f41c8ef0c42de09b23f2fdd1ba5d874ab2f6e3ac | eefddeb24d8eb1b0122dc3c783226a9ef23b1c48 | /vKiriroMarketing/settings.py | 9d2d8d97f1e8ad80587a5f74e64ca9bd4fbce977 | [] | no_license | monyoudom/vKiriromMartketing | 1ddfc4668b199d01c76f111e543a021d1273d74d | 3240a96b83407b43b2e4529ab2a37646d50ede74 | refs/heads/master | 2021-05-02T01:46:36.944264 | 2018-02-10T01:25:04 | 2018-02-10T01:25:04 | 120,874,684 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,385 | py | """
Django settings for vKiriroMarketing project.
Generated by 'django-admin startproject' using Django 1.11.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '%cz@jh=7z@sx8qi9&3j7hm#1(2*0vpv@ab7x#m$7&t@s*320-z'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'multiupload',
'Index',
'imagekit',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'vKiriroMarketing.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'vKiriroMarketing.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'vk.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/statics/'
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'statics')]
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
MEDIA_DIRS = [os.path.join(BASE_DIR, 'media')]
| [
"thongmonyoudom@outlook.com"
] | thongmonyoudom@outlook.com |
afec826c58124ebffe7ce5d984202b83272b2f34 | dcca0d8581c2d9c5690137e689ffda74dda4b78e | /mysite/core/models.py | 8c2787a965265e3659edc41e0607b98bc9047e63 | [] | no_license | mamkinproger/my-first-site | b9a0d701994feeb2e2c72c6e74bfa52edaec01b7 | d4886c3c7a6b459ba38101528670cee1ce252ac1 | refs/heads/master | 2021-08-31T04:12:29.573978 | 2017-12-20T09:47:29 | 2017-12-20T09:47:29 | 114,870,672 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,698 | py | from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
bio = models.TextField(max_length=500, blank=True)
location = models.CharField(max_length=30, blank=True)
email = models.EmailField(max_length=254, blank=True, verbose_name='Email', unique=True)
def __str__(self):
return self.user
class DeviceStandart(models.Model):
type = models.IntegerField()
class Meta:
managed = True
db_table = 'device_standart'
def __str__(self):
return self.type
class Device(models.Model):
name = models.TextField(blank=True, null=True)
standarts = models.ManyToManyField(DeviceStandart, through='DeviceStandartToDevice')
state = models.IntegerField()
class Meta:
managed = True
db_table = 'device'
def __str__(self):
return self.name
class DeviceStandartToDevice(models.Model):
deviceid = models.ForeignKey(Device, models.CASCADE,
db_column='deviceid', blank=True, null=True)
devicestandartid = models.ForeignKey(DeviceStandart, models.CASCADE,
db_column='devicestandartid', blank=True, null=True, )
class Meta:
managed = True
db_table = 'device_standart_to_device'
@receiver(post_save, sender=User)
def update_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
instance.profile.save()
| [
"kim_chernov@mail.ru"
] | kim_chernov@mail.ru |
9a8348fbe4665c05c3f40c4871a58afb60e8de78 | 59239e9aecc4b0611cd1f82dad74a13b21c0a97e | /model/train/train_skill_update.py | a77758383594682d02c3c766480cf6a18c43666b | [] | no_license | charlesXu86/FAQ | 53a8160fef2920c1e5005ea4f55a642071986494 | d5e253f44377f63b3af9accd9eab39fc599ed7a7 | refs/heads/master | 2023-01-10T10:45:21.403238 | 2020-11-11T13:40:23 | 2020-11-11T13:40:23 | 278,859,443 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,361 | py | # -*- coding: utf-8 -*-
"""
@Author : Xu
@Software: PyCharm
@File : train_skill.py
@Time : 2020/7/18 10:11 上午
@Desc : 新技能训练
"""
import requests
import json
def train_skill_up(skillId):
"""
:param skillId: 技能ID
:return:
"""
data = {}
url = 'http://47.103.73.160:9005/model/train'
# domain = "intents:\n - greet\n - goodbye\n - weather\n - weather_info\n\n actions:\n - utter_greet\n - utter_ask_address\n - utter_ask_date-time\n - utter_working_on_it\n - utter_report_weather\n - utter_goodbye\n\ntemplates:\n utter_greet:\n - text: \"您好,我是小笨,有什么可以帮您的?\"\n\nutter_goodbye:\n - text: \"再见\"\n\n"
# config = "language: en\npipeline: supervised_embeddings\npolicies:\n - name: MemoizationPolicy\n - name: KerasPolicy"
# nlu = "## intent:greet\n- 你好\n- hello\n- hi\n## intent:goodbye\n- bye\n- goodbye\n- 再见\n- see you\n## intent:weather\n- 天气\n- 用着摄氏度显示天气\n## intent:weather_info\n- 告诉我在[广州](address)怎么样\n- 不好意思可以帮我查[香港](address)的天气"
# stories = "## greet* greet\n- utter_greet\n\n## simple path\n* weather_address_date-time\n- utter_working_on_it\n- action_report_weather\n- utter_report_weather\n\n## address > date-time path\n* weather_address\n- utter_ask_date-time\n* weather_date-time\n- utter_working_on_it\n- action_report_weather\n- utter_report_weather"
domain_en = "intents:\n - greet\n - goodbye\n - affirm\n - deny\n - mood_great\n - mood_unhappy\n\nactions:\n - utter_greet\n - utter_cheer_up\n - utter_did_that_help\n - utter_happy\n - utter_goodbye\n\ntemplates:\n utter_greet:\n - text: \"Hey! How are you?\"\n\n utter_cheer_up:\n - text: \"Here is something to cheer you up:\"\n image: \"https://i.imgur.com/nGF1K8f.jpg\"\n\n utter_did_that_help:\n - text: \"Did that help you?\"\n\n utter_happy:\n - text: \"Great carry on!\"\n\n utter_goodbye:\n - text: \"Bye\""
config_en = "language: en\npipeline: supervised_embeddings\npolicies:\n - name: MemoizationPolicy\n - name: KerasPolicy"
nlu_en = "## intent:greet\n- hey\n- hello\n- hi\n- see hi hi\n- see hello\n## intent:goodbye\n- bye\n- goodbye\n- have a nice day\n- see you\n- see you later\n## intent:affirm\n- yes\n- indeed\n## intent:deny\n- no\n- never\n## intent:mood_great\n- perfect\n- very good\n- great\n## intent:mood_unhappy\n- sad\n- not good\n- unhappy"
stories_en = "## happy path\n* greet\n\n - utter_greet\n\n* mood_great\n\n - utter_happy\n\n## sad path 1\n* greet\n\n - utter_greet\n\n* mood_unhappy\n\n - utter_cheer_up\n\n - utter_did_that_help\n\n* affirm\n\n - utter_happy\n\n## sad path 2\n* greet\n\n - utter_greet\n\n* mood_unhappy\n\n - utter_cheer_up\n\n - utter_did_that_help\n\n* deny\n\n - utter_goodbye\n\n## say goodbye\n* goodbye\n\n - utter_goodbye"
data["domain"] = domain_en
data["config"] = config_en
data["nlu"] = nlu_en
data["stories"] = stories_en
data["force"] = False
data["save_to_default_model_directory"] = False
data_en = """{
"domain": "intents:\n - greet\n - goodbye\n - affirm\n - deny\n - mood_great\n - mood_unhappy\n\nactions:\n - utter_greet\n - utter_cheer_up\n - utter_did_that_help\n - utter_happy\n - utter_goodbye\n\ntemplates:\n utter_greet:\n - text: \"Hey! How are you?\"\n\n utter_cheer_up:\n - text: \"Here is something to cheer you up:\"\n image: \"https://i.imgur.com/nGF1K8f.jpg\"\n\n utter_did_that_help:\n - text: \"Did that help you?\"\n\n utter_happy:\n - text: \"Great carry on!\"\n\n utter_goodbye:\n - text: \"Bye\"",
"config": "language: en\npipeline: supervised_embeddings\npolicies:\n - name: MemoizationPolicy\n - name: KerasPolicy",
"nlu": "## intent:greet\n- hey\n- hello\n- hi\n## intent:goodbye\n- bye\n- goodbye\n- have a nice day\n- see you\n## intent:affirm\n- yes\n- indeed\n## intent:deny\n- no\n- never\n## intent:mood_great\n- perfect\n- very good\n- great\n## intent:mood_unhappy\n- sad\n- not good\n- unhappy",
"stories": "## happy path\n* greet\n\n - utter_greet\n\n* mood_great\n\n - utter_happy\n\n## sad path 1\n* greet\n\n - utter_greet\n\n* mood_unhappy\n\n - utter_cheer_up\n\n - utter_did_that_help\n\n* affirm\n\n - utter_happy\n\n## sad path 2\n* greet\n\n - utter_greet\n\n* mood_unhappy\n\n - utter_cheer_up\n\n - utter_did_that_help\n\n* deny\n\n - utter_goodbye\n\n## say goodbye\n* goodbye\n\n - utter_goodbye",
"force": false,
"save_to_default_model_directory": true
}"""
domain_en = "intents:\n - greet\n - goodbye\n - affirm\n - deny\n - mood_great\n - mood_unhappy\n\nactions:\n - utter_greet\n - utter_cheer_up\n - utter_did_that_help\n - utter_happy\n - utter_goodbye\n\ntemplates:\n utter_greet:\n - text: \"Hey! How are you?\"\n\n utter_cheer_up:\n - text: \"Here is something to cheer you up:\"\n image: \"https://i.imgur.com/nGF1K8f.jpg\"\n\n utter_did_that_help:\n - text: \"Did that help you?\"\n\n utter_happy:\n - text: \"Great carry on!\"\n\n utter_goodbye:\n - text: \"Bye\""
post_data = json.dumps(data)
results = requests.post(url, post_data)
# print(results.text)
return results.text
# if __name__=='__main__':
# skillId = '2222'
# train_skill(skillId=skillId) | [
"charlesxu86@163.com"
] | charlesxu86@163.com |
4cb0b660ba917efc4f0d597f4ca7d91d516705e3 | 7f725355c8a95ea3b69b07c1e2e5a9ffc4caf17a | /manage.py | 57444202dc8c52e536b74a92f6e346903ef7fa1e | [] | no_license | taddeimania/nfl_stat_scraper | 1d62ac38b46a7562936c6150103b8eee621ecfc5 | 0b9d0eadb80fd9e7c3e1e0de1c759b2e5659cb6b | refs/heads/master | 2021-01-18T16:35:36.859869 | 2012-11-26T03:29:06 | 2012-11-26T03:29:06 | 6,858,032 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 246 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "nfl.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"jtaddei@gmail.com"
] | jtaddei@gmail.com |
1195022dbbe97772e58c428b745a30662179e059 | b3495ba937ec99453bd8f95ced8703e7d8548a95 | /leetcode/IntervalList Intersection.py | 08dc72b2b8a86148458249cad8b9474bf21dea8c | [] | no_license | luoChengwen/Hacker_rank_test | 975a47bde862a33c057883a7f394ea280efece31 | 858cef7b7c01b86c1f771e2b262e931af673cb73 | refs/heads/master | 2021-09-24T18:10:39.215407 | 2021-09-23T07:35:54 | 2021-09-23T07:35:54 | 239,445,087 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 573 | py | class Solution:
def intervalIntersection(self, A, B): # AB are lists
output = []
ai,bi = 0, 0
while ai < len(A) and bi < len(B):
start_A, end_A = A[ai]
start_B, end_B = B[bi]
start = max(start_A, start_B)
end = min(end_A, end_B)
if start <= end: output.append([start, end])
if end_A < end_B: ai+=1
else: bi+=1
return output
#
A = [[8, 15]]
B = [[2, 6], [8, 10], [12, 20]]
A = [[0,2],[5,10],[13,23],[24,25]]
B = [[1,5],[8,12],[15,24],[25,26]] | [
"lcwdaisy@gmail.com"
] | lcwdaisy@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.