text stringlengths 8 6.05M |
|---|
#! /usr/bin/env python3
# -*- coding:utf-8 -*-
__author__ = 'wjq'
from apscheduler.schedulers.blocking import BlockingScheduler
from methods.RedisInfo import *
import multiprocessing
def start():
scheduler = BlockingScheduler()
scheduler.add_job(monitor , 'interval', seconds=60)
scheduler.start()
def task(host , port):
monitor = RedisInfo(host , port , 0 )
monitor.save()
def monitor():
try:
pool = multiprocessing.Pool()
with open('./redis.properties') as f:
for line in f.readlines():
if line.startswith('#'):
continue
sever = line.split(' ')
pool.apply_async(task , args=(sever[0] , sever[1]))
pool.close()
pool.join()
except Exception as e:
print(e)
if __name__ == '__main__':
start() |
import pyttsx3
import datetime
import speech_recognition
import wikipedia
import webbrowser
import os
import random
engine = pyttsx3.init("sapi5")
voices = engine.getProperty("voices")
engine.setProperty("voice", voices[0].id)
def speak(audio):
engine.say(audio)
engine.runAndWait()
def wishMe():
hour = int(datetime.datetime.now().hour)
if 0 <= hour <= 12:
speak("Good Morning")
elif 12 <= hour <= 18:
speak("Good Afternoon")
else:
speak("Good evening")
speak("I am Jarvis! manthan, How may I help you?")
def takeCommand():
r = speech_recognition.Recognizer()
with speech_recognition.Microphone() as source:
print("Listening...")
r.pause_threshold = 1
audio = r.listen(source)
try:
print("Recognizing...")
query = r.recognize_google(audio, language="en-in")
print(f"User said: {query}\n")
except Exception as e:
print("Say that again please...")
return "None"
return query
list = [
"https://music.youtube.com/watch?v=iznXe9d79jw&list=MLCT",
"https://music.youtube.com/watch?v=SKm_GN2LaXU&list=MLCT",
"https://music.youtube.com/watch?v=aDkEJ-DdA0w&list=MLCT",
"https://music.youtube.com/watch?v=8m77vBdtnAs&list=MLCT",
"https://music.youtube.com/watch?v=fNWK3HlD0-A&list=MLCT",
"https://music.youtube.com/watch?v=Y9Qpya1AlXM&list=MLCT",
"https://music.youtube.com/watch?v=xOxNrZBTuP4&list=MLCT",
"https://music.youtube.com/watch?v=VPPKfQ3adc4&list=MLCT",
"https://music.youtube.com/watch?v=4eHABt5SIgc&list=MLCT",
"https://music.youtube.com/watch?v=VmkNFegq7VI&list=MLCT",
"https://music.youtube.com/watch?v=o-yueRd-k0c&list=MLCT",
"https://music.youtube.com/watch?v=R4hDcd9fzRk&list=MLCT",
"https://music.youtube.com/watch?v=6PL39H2B7UQ&list=MLCT",
"https://music.youtube.com/watch?v=xBqpEpMGZe8&list=MLCT",
]
if __name__ == "__main__":
wishMe()
while True:
query = takeCommand().lower()
# Logic for excecuting tasks based on query
if "what is" in query:
speak("Searching Wikipedia...")
query = query.replace("wikipedia", "")
results = wikipedia.summary(query, sentences=100)
speak("According to wikipedia")
print(results)
speak(results)
elif "who is" in query:
speak("Searching Wikipedia...")
query = query.replace("wikipedia", "")
results = wikipedia.summary(query, sentences=100)
speak("According to wikipedia")
print(results)
speak(results)
elif "open youtube" in query:
webbrowser.open("youtube.com")
elif "open my website" in query:
webbrowser.open("techfornerdz.com")
elif "open google" in query:
webbrowser.open("google.com")
elif "open whatsapp" in query:
webbrowser.open("web.whatsapp.com")
elif "play naruto music" in query:
webbrowser.open(
"https://music.youtube.com/watch?v=X4fIqbUjEEI&list=RDAMVMX4fIqbUjEEI"
)
elif "play hindi music" in query:
webbrowser.open(
"https://music.youtube.com/watch?v=NeXbmEnpSz0&list=RDAMVMNeXbmEnpSz0"
)
elif "play music" in query:
x = random.randint(0, 13)
webbrowser.open(list[x])
elif "time" in query:
strTime = datetime.datetime.now().strftime("%H:%M:%S")
print(strTime)
speak(f"Sir ji time is {strTime}")
elif "open chrome" in query:
filesPath = "C:\\Program Files\\Google\\Chrome\\Application\\chrome.exe"
os.startfile(filesPath)
elif "who are you" in query:
speak("I am JARVIS! your e-friend; Or you can say... your robo-friend.")
elif "quit" in query:
speak("Bye Bye manthan, have a nice day!")
|
from bs4 import BeautifulSoup
import bs4
import requests
from operation import Operation
import pandas as pd
def mount_references(bs4_source):
refs = bs4_source.find_all('cite')
j_refs = {}
for item in refs:
_id = item.parent.parent.attrs['id']
_link = None
for c in item.contents:
if c.name == 'a':
_link = c.attrs['href']
break
j_refs[_id] = _link
return j_refs
def scrap_page():
"""
"""
url = 'https://pt.wikipedia.org/wiki/Lista_de_opera%C3%A7%C3%B5es_da_Pol%C3%ADcia_Federal_do_Brasil#cite_note-245'
raw_content = BeautifulSoup(requests.get(url).text, 'html.parser')
operations = []
items = raw_content.find_all('li')
references = mount_references(raw_content)
# Keeping only the resources i've mapped
items = [
i for i in items
if not i.attrs
and (
(
'style' in i.parent.parent.attrs
and i.parent.parent.attrs['style'].startswith('-moz-column-count')
)
or
(
'class' in i.parent.parent.attrs
and i.parent.parent.attrs['class'] == ['mw-parser-output']
)
or
(
i.parent.name == 'ul'
and i.parent.parent.name == 'li'
)
)
and not (
'class' in i.parent.attrs
and i.parent.attrs['class'] == 'references'
)
]
for i in items:
operations.append(
Operation().mount_from_bs4(i, references)
)
return operations
operations = scrap_page()
json_operations = operations
for o in operations:
o.enrich()
pd.DataFrame(json_operations).to_csv('pf_operations.csv', sep=',')
|
from onegov.ballot.models.vote.mixins import DerivedAttributesMixin
from onegov.ballot.models.vote.mixins import DerivedBallotsCountMixin
from onegov.core.orm import Base
from onegov.core.orm.mixins import TimestampMixin
from onegov.core.orm.types import UUID
from sqlalchemy import Boolean
from sqlalchemy import Column
from sqlalchemy import ForeignKey
from sqlalchemy import Integer
from sqlalchemy import Text
from uuid import uuid4
class BallotResult(Base, TimestampMixin, DerivedAttributesMixin,
DerivedBallotsCountMixin):
""" The result of a specific ballot. Each ballot may have multiple
results. Those results may be aggregated or not.
"""
__tablename__ = 'ballot_results'
#: identifies the result, may be used in the url
id = Column(UUID, primary_key=True, default=uuid4)
#: The entity id (e.g. BFS number).
entity_id = Column(Integer, nullable=False)
#: the name of the entity
name = Column(Text, nullable=False)
#: the district this entity belongs to
district = Column(Text, nullable=True)
#: True if the result has been counted and no changes will be made anymore.
#: If the result is definite, all the values below must be specified.
counted = Column(Boolean, nullable=False)
#: number of yeas, in case of variants, the number of yeas for the first
#: option of the tie breaker
yeas = Column(Integer, nullable=False, default=lambda: 0)
#: number of nays, in case of variants, the number of nays for the first
#: option of the tie breaker (so a yay for the second option)
nays = Column(Integer, nullable=False, default=lambda: 0)
#: number of empty votes
empty = Column(Integer, nullable=False, default=lambda: 0)
#: number of invalid votes
invalid = Column(Integer, nullable=False, default=lambda: 0)
#: number of eligible voters
eligible_voters = Column(Integer, nullable=False, default=lambda: 0)
#: number of expats
expats = Column(Integer, nullable=True)
#: the ballot this result belongs to
ballot_id = Column(
UUID,
ForeignKey('ballots.id', ondelete='CASCADE'),
nullable=False
)
|
#!/usr/bin/env python
import numpy as np
import time
import math
import csv
import sys
#from misvmio import parse_c45, bag_set
import misvm
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn import svm
from sklearn import datasets
def disp_results(this_algorithm, reg_range, accuracies):
for reg in reg_range:
this_algorithm_reg = this_algorithm + str(reg)
this_accuracy = accuracies[this_algorithm_reg]
print '\n%s Accuracy: %.1f %%' % (this_algorithm_reg, 100.0 * this_accuracy)
def assign_test_instance_labels( num_test_bags, test_labels, bags ):
# assume all instances have the same label as their bag
instance_test_labels = []
for b in range( 0, num_test_bags ):
this_bag_test_label = test_labels[ b ]
this_bag = bags[ b : (b+1) ]
this_bag_array = np.asarray( this_bag ) # convert a list to an array
this_bag_shape = this_bag_array.shape
num_instances = this_bag_shape[1]
for i in range( 0, num_instances ):
instance_test_labels.append( this_bag_test_label )
return instance_test_labels
def shape_csv( facetrack_filename ):
num_rows = 0
with open( facetrack_filename ) as csvfile:
this_reader = csv.reader( csvfile, delimiter=' ' )
for row in this_reader:
num_rows = num_rows + 1 ;
return num_rows
def person_to_num( person ):
switcher = {
'1': 1,
'2': 1,
'3': -1,
}
return switcher.get( person, "error" )
def get_bags_and_labels( narrative_filename, num_face_features, face_track_features ):
# read in ground truth narrative label and form bags and instance bags and labels
bbt_bags = []
bbt_labels = []
bbt_inst_bags = []
bbt_inst_labels = []
result = []
with open( narrative_filename ) as csvfile:
narrative_reader = csv.reader( csvfile, delimiter=' ' )
for row in narrative_reader:
narrative_len = len( row )
#print '\nget_bags_and_labels for row of narrative length (includes label at index 0 ) is %d' % narrative_len
#print row
this_num_face_tracks = narrative_len - 1 # label is at index 0
this_bag = np.zeros( ( this_num_face_tracks, num_face_features ) ) # unknown number of face tracks if skipping empty ones
this_label = row[ 0 ]
face_track_index = 0
for row_index in range( 1, narrative_len ): # the label is at index 0
this_face_track_id = int( row[ row_index ] )
this_face_track_features = face_track_features[ ( this_face_track_id - 1 ) : this_face_track_id ]
#face_track_index = row_index - 1 # cannot use as skipping over empty face tracks
#this_bag[ face_track_index ] = this_face_track_features
num_non_zero = np.count_nonzero( this_face_track_features )
if ( num_non_zero == 0 ):
print '\n******* warning ********* no features for this face track id %d' %( this_face_track_id )
#return
else:
bbt_inst_bags.append( this_face_track_features ) # one face track per bag
bbt_inst_labels.append( this_label )
this_bag[ face_track_index ] = this_face_track_features # lots of face tracks per bag
face_track_index = face_track_index + 1
this_bag_length = len( this_bag )
#print '\n length of this_bag %d' % this_bag_length
#print '\n face_track_index %d' % face_track_index
# redux bag size if skipped over empty face tracks
if ( this_bag_length > face_track_index ):
#print this_bag
this_bag = this_bag[ 0:-1 , : ]
this_bag_length = len( this_bag )
#print '\n length of adjusted this_bag %d' % this_bag_length
#print '\n face_track_index %d' % face_track_index
#print this_bag
bbt_labels.append( this_label )
bbt_bags.append( this_bag )
#print '\ndebug bbt_inst_labels'
#print bbt_inst_labels
result.append( bbt_bags )
result.append( bbt_inst_bags )
result.append( bbt_labels )
result.append( bbt_inst_labels )
#print '\n number of results %d ' % len( result )
return( result )
def get_instance_labels( persons_name, face_track_person_filename ):
label_index = 0
#this_result = []
csv_data = open( face_track_person_filename)
with csv_data as csvfile:
face_track_reader = csv.reader( csvfile, delimiter=' ' )
row_count = sum( 1 for row in face_track_reader )
face_track_labels = np.zeros( row_count )
csv_data.seek( 0 ) # set file reader to start of file
for row in face_track_reader:
this_persons_name = row[ 1 ]
this_persons_name_cap = this_persons_name.capitalize()
if ( this_persons_name_cap == persons_name ):
face_track_labels[ label_index ] = 1
else:
face_track_labels[ label_index ] = -1
label_index = label_index + 1
#this_result = face_track_labels
return( face_track_labels )
def episode_bags_and_labels( this_episode, this_person ):
results = []
# read in face track features
face_track_features = np.loadtxt( "bbt_s01e0" + this_episode + "_cnn_mean_std.txt" )
#print '\nfirst line of face track id 1 features mean std is %%%%%%%%%%%%%%%%%%%%%%'
#print face_track_features[:1]
num_face_tracks = face_track_features.shape[ 0 ]
num_face_features = face_track_features.shape[ 1 ]
#print '\nnum_face_features %d' % num_face_features
# collect bags, instances and labels
# noisey labels for bags
#narrative_filename = "bbt_s01e01_Sheldon_narrative_label_face_tracks.txt"
narrative_filename = "bbt_s01e0" + this_episode + "_" + this_person + "_narrative_label_face_tracks.txt"
bags_and_labels = get_bags_and_labels( narrative_filename, num_face_features, face_track_features )
#print '\ndebug after get_bags_and_labels'
bbt_bags = bags_and_labels[ 0 ]
bbt_inst_bags = bags_and_labels[ 1 ]
bbt_labels = bags_and_labels[ 2 ]
bbt_instance_test_labels = bags_and_labels[ 3 ]
bbt_label_nums = []
bbt_instance_test_label_nums = []
num_rows = shape_csv( narrative_filename )
# convert 1st, 2nd person to +1 and 3rd person to -1
for n in range( 0, num_rows ):
bbt_label_nums.append( person_to_num( bbt_labels[ n ] ) )
for m in range( 0, len( bbt_instance_test_labels ) ):
bbt_instance_test_label_nums.append( person_to_num( bbt_instance_test_labels[ m ] ) )
#print '\nbbt_labels are '
#print bbt_labels
#print '\nbbt_label_nums are '
#print bbt_label_nums
# instance labels for this person in s01e01
#face_track_person_filename = "bbt_s01e0" + this_episode + "_" + this_person + "_face_track_id_person.txt"
#bbt_instance_test_labels = get_instance_labels( this_person, face_track_person_filename )
#print '\ndebug bbt_instance_test_labels'
#print bbt_instance_test_labels
#print '\ndebug bbt_instance_test_label_nums'
#print bbt_instance_test_label_nums
results.append( bbt_bags )
results.append( bbt_label_nums )
results.append( bbt_inst_bags )
results.append( bbt_instance_test_label_nums )
return( results )
def run_person_for_one_episode( this_episode, this_person ):
results = []
#print '\nstart of run_person_for_one_episode'
# bbt data *************************************************************
this_episode_bags_n_labels = episode_bags_and_labels( this_episode, this_person )
this_episode_bags = this_episode_bags_n_labels[ 0 ]
this_episode_label_nums = this_episode_bags_n_labels[ 1 ]
this_episode_inst_bags = this_episode_bags_n_labels[ 2 ]
this_episode_instance_test_labels = this_episode_bags_n_labels[ 3 ]
bbt_bags = this_episode_bags
bbt_label_nums = this_episode_label_nums
bbt_inst_bags = this_episode_inst_bags
bbt_instance_test_labels = this_episode_instance_test_labels
print '\nnumber bags %d' % len( bbt_bags )
#print len( bbt_bags )
num_pos_bags = sum( x > 0 for x in bbt_label_nums )
print '\nnumber of positive bags %d' % num_pos_bags
#print num_pos_bags
num_neg_bags = sum( x < 0 for x in bbt_label_nums )
print '\nnumber of negative bags %d' % num_neg_bags
#print num_neg_bags
if ( num_neg_bags == 0 ) or ( num_pos_bags == 0 ):
return( results )
# split test/training sets ***************************************************************************************
#num_test_bbt_bags = 6
#bbt_train_bags = bbt_bags[num_test_bbt_bags:]
#bbt_train_labels = bbt_label_nums[num_test_bbt_bags:]
#bbt_test_bags = bbt_bags[:num_test_bbt_bags]
#bbt_test_labels = bbt_label_nums[:num_test_bbt_bags]
# for debugging have test equal train
bbt_train_bags = bbt_bags
bbt_train_labels = bbt_label_nums
bbt_test_bags = bbt_bags
bbt_test_labels = bbt_label_nums
#print '\nbbt_train_labels'
#print bbt_train_labels
#print '\nbbt_test_labels'
#print bbt_test_labels
# reg is regularisation ************************************************************************
bbt_reg_range = [ 1.0 ]
#bbt_reg_range = [ 0.001, 1.0, 1000 ]
#reg_range = [0.0001, 0.001, 0.01, 0.1, 1.0, 10.0, 100.0, 1000.0]
# run bag classificaton ***********************************************************************************************
bag_classifier_names = [ "NSK", "stMIL", "SIL", "MISVM" ]
bbt_classifiers = {} #empty dictionary
for reg in bbt_reg_range:
bbt_classifiers["NSK" + str(reg)] = misvm.NSK(verbose=False, C=reg)
bbt_classifiers["stMIL" + str(reg)] = misvm.stMIL(verbose=False, C=reg)
bbt_classifiers["SIL" + str(reg)] = misvm.SIL(verbose=False, C=reg)
bbt_classifiers["MISVM" + str(reg)] = misvm.MISVM(kernel='linear',verbose=False, C=reg, max_iters=50)
#bbt_accuracies = run_bag_classifiers( bbt_train_bags, bbt_train_labels, bbt_test_bags, bbt_test_labels, bbt_classifiers )
# Train/Evaluate classifiers
bbt_accuracies = {} #empty dictionary
for bbt_algorithm, bbt_classifier in bbt_classifiers.items():
bbt_classifier.fit( bbt_train_bags, bbt_train_labels )
bbt_predictions = bbt_classifier.predict( bbt_test_bags )
bbt_accuracies[ bbt_algorithm ] = np.average( bbt_test_labels == np.sign( bbt_predictions ) )
print this_person
print this_episode
print '\n********** bbt bag only results'
for d in range( 0, len( bag_classifier_names ) ):
disp_results( bag_classifier_names[ d ], bbt_reg_range, bbt_accuracies)
# test set with a single face track in each bag ********************************************
# instance labels for this person in s01e01
#face_track_person_filename = "bbt_s01e0" + this_episode + "_" + this_person + "_face_track_id_person.txt"
#bbt_instance_test_labels = get_instance_labels( this_person, face_track_person_filename )
number_bbt_instance_test_labels = len( bbt_instance_test_labels )
#print '\nbbt instance test labels length %d' % number_bbt_instance_test_labels
bbt_inst_bag_accuracies = {}
for bbt_algorithm, bbt_classifier in bbt_classifiers.items():
number_bbt_inst_bags = len( bbt_inst_bags )
#print '\nbbt inst bags length %d' % number_bbt_inst_bags
if number_bbt_instance_test_labels != number_bbt_inst_bags:
sys.exit( "something has gone very wrong with inst bags" )
bbt_inst_bag_predictions = bbt_classifier.predict( bbt_inst_bags )
bbt_inst_bag_accuracies[ bbt_algorithm ] = np.average( bbt_instance_test_labels == np.sign( bbt_inst_bag_predictions ) )
print '\n********** testing bbt one face track instances per bag results'
for d in range( 0, len( bag_classifier_names ) ):
disp_results( bag_classifier_names[ d ], bbt_reg_range, bbt_inst_bag_accuracies)
# instance predictions ************************************************************
inst_classifier_names = [ "SIL", "MISVM" ]
bbt_inst_classifiers = {} #empty dictionary
for reg in bbt_reg_range:
bbt_inst_classifiers["SIL" + str(reg)] = misvm.SIL(verbose=False, C=reg)
bbt_inst_classifiers["MISVM" + str(reg)] = misvm.MISVM(kernel='linear',verbose=False, C=reg, max_iters=50)
# Train/Evaluate classifiers
bbt_inst_accuracies = {} #empty dictionary
bbt_bag_accuracies = {}
for bbt_inst_algorithm, bbt_inst_classifier in bbt_inst_classifiers.items():
bbt_inst_classifier.fit( bbt_train_bags, bbt_train_labels )
bbt_bag_label_predictions, bbt_inst_label_predictions = bbt_inst_classifier.predict( bbt_test_bags, instancePrediction=True )
bbt_bag_accuracies[ bbt_inst_algorithm ] = np.average( bbt_test_labels == np.sign( bbt_bag_label_predictions ) )
bbt_inst_accuracies[ bbt_inst_algorithm ] = np.average( bbt_instance_test_labels == np.sign( bbt_inst_label_predictions ) )
print '\n********** bbt instance results'
for d in range( 0, len( inst_classifier_names ) ):
disp_results( inst_classifier_names[ d ], bbt_reg_range, bbt_inst_accuracies)
results.append( num_pos_bags )
results.append( num_neg_bags )
return( results )
def main():
all_people = [ 'Sheldon', 'Leonard', 'Howard', 'Penny', 'Raj' ]
#all_people = [ 'Leonard' ]
all_episodes = [ '1', '2', '3', '4', '5', '6' ]
#all_episodes = [ '4', '5' ]
num_pos_bags = 0 ;
num_neg_bags = 0 ;
for this_person in all_people:
print '\n this person %s' % this_person
for this_episode in all_episodes:
print "\n this episode %s" % this_episode
one_run_results = run_person_for_one_episode( this_episode, this_person )
#print '\ndebug main after run_person_for_one_episode'
#print len( one_run_results )
if len( one_run_results ) != 0:
one_run_results[0]
one_run_results[1]
num_pos_bags = num_pos_bags + one_run_results[ 0 ]
num_neg_bags = num_neg_bags + one_run_results[ 1 ]
print '\nafter this person %s' % this_person
print '\nso far, number of positive bags '
print num_pos_bags
print '\nso far, number of negative bags '
print num_neg_bags
#keyboard_input = raw_input("Enter input")
print '\n******************'
print '\ntotal number of positive bags '
print num_pos_bags
print '\ntotal number of negative bags '
print num_neg_bags
# *******************************************************************************************************
#iris=datasets.load_iris()
#clf = svm.SVC(kernel='linear', C=1)
#scores = cross_val_score(clf, iris.data, iris.target, cv=5)
#print '\n results from SVM cross val iris data '
#print scores
if __name__ == '__main__':
main()
|
import certifi
import morepath
import ssl
from concurrent.futures import ThreadPoolExecutor
from datetime import datetime
from elasticsearch import ConnectionError # shadows a python builtin!
from elasticsearch import Elasticsearch
from elasticsearch import Transport
from elasticsearch import TransportError
from elasticsearch.connection import create_ssl_context
from more.transaction.main import transaction_tween_factory
from onegov.search import Search, log
from onegov.search.errors import SearchOfflineError
from onegov.search.indexer import Indexer
from onegov.search.indexer import ORMEventTranslator
from onegov.search.indexer import TypeMappingRegistry
from onegov.search.utils import searchable_sqlalchemy_models
from sortedcontainers import SortedSet
from sqlalchemy import inspect
from sqlalchemy.orm import undefer
from urllib3.exceptions import HTTPError
class TolerantTransport(Transport):
""" A transport class that is less eager to rejoin connections when there's
a failure. Additionally logs all Elasticsearch transport errors in one
location.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.failure_time = None
self.failures = 0
@property
def skip_request(self):
""" Returns True if the request should be skipped. """
if not self.failures:
return False
if not self.seconds_remaining:
return False
return True
@property
def seconds_remaining(self):
""" Returns the seconds remaining until the next try or 0.
For each failure we wait an additional 10s (10s, then 20s, 30s, etc),
up to a maximum of 300s (5 minutes).
"""
timeout = min((self.failures * 10), 300)
elapsed = (datetime.utcnow() - self.failure_time).total_seconds()
return int(max(timeout - elapsed, 0))
def perform_request(self, *args, **kwargs):
if self.skip_request:
log.info(f"Elasticsearch down, retry in {self.seconds_remaining}s")
raise SearchOfflineError()
try:
response = super().perform_request(*args, **kwargs)
except (TransportError, HTTPError) as exception:
# transport errors might be caused by bugs (for example, when we
# refer to a non-existant index) -> we are only tolerant of
# connection errors
if isinstance(exception, TransportError):
if not isinstance(exception, ConnectionError):
if not is_5xx_error(exception):
raise
self.failures += 1
self.failure_time = datetime.utcnow()
log.exception("Elasticsearch cluster is offline")
raise SearchOfflineError() from exception
else:
self.failures = 0
return response
def is_5xx_error(error):
return error.status_code and str(error.status_code).startswith('5')
class ElasticsearchApp(morepath.App):
""" Provides elasticsearch integration for
:class:`onegov.core.framework.Framework` based applications.
The application must be connected to a database.
Usage::
from onegov.core import Framework
class MyApp(Framework, ESIntegration):
pass
"""
def configure_search(self, **cfg):
""" Configures the elasticsearch client, leaving it as a property
on the class::
app.es_client
The following configuration options are accepted:
:enable_elasticsearch:
If True, elasticsearch is enabled (defaults to True).
:elasticsearch_hosts:
A list of elasticsearch clusters, including username, password,
protocol and port.
For example: ``https://user:secret@localhost:443``
By default the client connects to the localhost on port 9200
(the default), and on port 19200 (the default of boxen).
At least one host in the list of servers must be up at startup.
:elasticsearch_may_queue_size:
The maximum queue size reserved for documents to be indexed. This
queue is filling up if the elasticsearch cluster cannot be reached.
Once the queue is full, warnings are emitted.
Defaults to 10'000
:elasticsearch_verify_certs:
If true, the elasticsearch client verifies the certificates of
the ssl connection. Defaults to true. Do not disable, unless you
are in testing!
:elasticsearch_languages:
The languages supported by onegov.search. Defaults to:
- en
- de
- fr
"""
if not cfg.get('enable_elasticsearch', True):
self.es_client = None
return
self.es_hosts = cfg.get('elasticsearch_hosts', (
'http://localhost:9200',
))
self.es_verify_certs = cfg.get('elasticsearch_verify_certs', True)
if cfg.get('elasticsearch_verify_certs', True):
self.es_extra_params = {
'verify_certs': True,
'ca_certs': certifi.where()
}
else:
ssl_context = create_ssl_context()
ssl_context.check_hostname = False
ssl_context.verify_mode = ssl.CERT_NONE
self.es_extra_params = {
'verify_certs': False,
'ssl_context': ssl_context
}
self.es_configure_client(usage='default')
if self.has_database_connection:
max_queue_size = int(cfg.get(
'elasticsarch_max_queue_size', '10000'))
self.es_mappings = TypeMappingRegistry()
for base in self.session_manager.bases:
self.es_mappings.register_orm_base(base)
self.es_orm_events = ORMEventTranslator(
self.es_mappings,
max_queue_size=max_queue_size
)
self.es_indexer = Indexer(
self.es_mappings,
self.es_orm_events.queue,
es_client=self.es_client
)
self.session_manager.on_insert.connect(
self.es_orm_events.on_insert)
self.session_manager.on_update.connect(
self.es_orm_events.on_update)
self.session_manager.on_delete.connect(
self.es_orm_events.on_delete)
def es_configure_client(self, usage='default'):
usages = {
'default': {
'timeout': 3,
'max_retries': 1
},
'reindex': {
'timeout': 10,
'max_retries': 3
}
}
self.es_client = Elasticsearch(
hosts=self.es_hosts,
transport_class=TolerantTransport,
**usages[usage],
**self.es_extra_params
)
def es_search(self, languages='*', types='*', include_private=False,
explain=False):
""" Returns a search scoped to the current application, with the
given languages, types and private documents excluded by default.
"""
search = Search(
session=self.session(),
mappings=self.es_mappings,
using=self.es_client,
index=self.es_indices(languages, types),
extra=dict(explain=explain)
)
if not include_private:
search = search.filter("term", es_public=True)
# by default, do not include any fields (this will still include
# the id and the type, which is enough for the orm querying)
search = search.source(excludes=['*'])
return search
def es_indices(self, languages='*', types='*'):
return self.es_indexer.ixmgr.get_external_index_names(
schema=self.schema,
languages=languages,
types=types
)
def es_search_by_request(self, request, types='*', explain=False,
limit_to_request_language=False):
""" Takes the current :class:`~onegov.core.request.CoreRequest` and
returns an elastic search scoped to the current application, the
requests language and it's access rights.
"""
if limit_to_request_language:
languages = [request.locale.split('_')[0]]
else:
languages = '*'
return self.es_search(
languages=languages,
types=types,
include_private=self.es_may_use_private_search(request),
explain=explain
)
def es_suggestions(self, query, languages='*', types='*',
include_private=False):
""" Returns suggestions for the given query. """
if not query:
return []
if include_private:
context = ['public', 'private']
else:
context = ['public']
search = self.es_search(
languages=languages,
types=types,
include_private=include_private
)
search = search.suggest(
name='es_suggestion',
text=query,
completion={
'field': 'es_suggestion',
'skip_duplicates': True,
'contexts': {
'es_suggestion_context': context
}
}
)
result = search.execute()
# if there's no matching index, no suggestions are returned, which
# happens if the Elasticsearch cluster is being rebuilt
if not hasattr(result, 'suggest'):
return ()
suggestions = SortedSet()
for suggestion in getattr(result.suggest, 'es_suggestion', []):
for item in suggestion['options']:
suggestions.add(item['text'].strip())
return tuple(suggestions)
def es_suggestions_by_request(self, request, query, types='*',
limit_to_request_language=False):
""" Returns suggestions for the given query, scoped to the language
and the login status of the given requst.
"""
if limit_to_request_language:
languages = [request.locale.split('_')[0]]
else:
languages = '*'
return self.es_suggestions(
query,
languages=languages,
types=types,
include_private=self.es_may_use_private_search(request)
)
def es_may_use_private_search(self, request):
""" Returns True if the given request is allowed to access private
search results. By default every logged in user has access to those.
This method may be overwritten if this is not desired.
"""
return request.is_logged_in
def es_perform_reindex(self, fail=False):
""" Reindexes all content.
This is a heavy operation and should be run with consideration.
By default, all exceptions during reindex are silently ignored.
"""
self.es_configure_client(usage='reindex')
self.es_indexer.ixmgr.created_indices = set()
# delete all existing indices
ixs = self.es_indexer.ixmgr.get_managed_indices_wildcard(self.schema)
self.es_client.indices.delete(index=ixs)
# have no queue limit for reindexing (that we're able to change
# this here is a bit of a CPython implementation detail) - we can't
# necessarily always rely on being able to change this property
self.es_orm_events.queue.maxsize = 0
# load all database objects and index them
def reindex_model(model):
session = self.session()
try:
q = session.query(model).options(undefer('*'))
i = inspect(model)
if i.polymorphic_on is not None:
q = q.filter(i.polymorphic_on == i.polymorphic_identity)
for obj in q:
self.es_orm_events.index(self.schema, obj)
finally:
session.invalidate()
session.bind.dispose()
# by loading models in threads we can speed up the whole process
with ThreadPoolExecutor() as executor:
results = executor.map(
reindex_model, (
model
for base in self.session_manager.bases
for model in searchable_sqlalchemy_models(base)
)
)
if fail:
tuple(results)
self.es_indexer.bulk_process()
@ElasticsearchApp.tween_factory(over=transaction_tween_factory)
def process_indexer_tween_factory(app, handler):
def process_indexer_tween(request):
if not request.app.es_client:
return handler(request)
result = handler(request)
request.app.es_indexer.process()
return result
return process_indexer_tween
|
#!/usr/bin/env python
"""
Update the MYSQL_USER and MYSQL_PASSWORD variables below.
"""
import subprocess
import re
import sys
"""
This script runs /usr/bin/mysqladmin status and cuts up the output into Nagios format.
You may need to update the MYSQL_USER and MYSQL_PASSWORD with an account that can connect.
"""
MYSQL_USER = 'root'
MYSQL_PASSWORD = ''
command = ['/usr/bin/mysqladmin', 'status']
if MYSQL_USER:
command.append('-u%s' % MYSQL_USER)
if MYSQL_PASSWORD:
command.append('-p%s' % MYSQL_PASSWORD)
try:
status = subprocess.check_output(command)
except:
print "connection failure"
sys.exit(2)
output = "OK | "
metric_list = status.split(' ')
for metric in metric_list:
k = metric.split(':')[0].lower().replace(' ', '_').strip()
v = metric.split(':')[1].strip()
output += k + '=' + v + ';;;; '
print output
sys.exit(0)
|
#!/usr/bin/env python
import sys
from twython import Twython
import os
import time
import serial
import RPi.GPIO as GPIO
CONSUMER_KEY = 'YOUR USER DATA'
CONSUMER_SECRET = 'YOUR USER DATA'
ACCESS_KEY = 'YOUR USER DATA'
ACCESS_SECRET = 'YOUR USER DATA'
api = Twython(CONSUMER_KEY,CONSUMER_SECRET,ACCESS_KEY,ACCESS_SECRET)
GPIO.setmode(GPIO.BOARD)
# set up GPIO 11 as output
GPIO.setup(11,GPIO.OUT)
ser = serial.Serial('/dev/ttyUSB0', 9600)
while True:
user_timeline = api.get_user_timeline(screen_name="YOUR USER IDA",count=1)
time.sleep(2)
for tweet in user_timeline:
tweet['text'] = Twython.html_for_tweet(tweet)
print tweet['text']
if (tweet['text'] == "RPi ON"):
ser.write('H')
if (tweet['text'] == "RPi OFF"):
ser.write('L')
GPIO.cleanup()
|
"""added electricity use
Revision ID: f64c62f21745
Revises: bc6d06c013a1
Create Date: 2021-07-06 12:08:22.934817
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = 'f64c62f21745'
down_revision = 'bc6d06c013a1'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('e_use',
sa.Column('date_time', sa.DateTime(), nullable=False),
sa.Column('electricity_use', sa.Float(), nullable=True),
sa.PrimaryKeyConstraint('date_time')
)
op.alter_column('role', 'name',
existing_type=mysql.VARCHAR(length=80),
nullable=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('role', 'name',
existing_type=mysql.VARCHAR(length=80),
nullable=True)
op.drop_table('e_use')
# ### end Alembic commands ###
|
# Função: Calcular peso com o IMC
# Autor: Roberta de Lima Ribeiro
print("CALCULAR PESO")
peso = int(input("Digite peso"))
altura = float(input("Digite a altura"))
imc = peso/(altura**2)
print("Seu IMC: ", imc)
if (imc<18.5):
print("Abaixo do peso")
elif (imc>25):
print("Acima do peso")
else :
print("Peso normal")
print("fim do programa")
|
class Employee:
company = "google"
@staticmethod
def greet():
print("Hello Good Morning ! ")
|
# Generated by Django 2.1.7 on 2019-03-30 12:57
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('core', '0006_auto_20190330_1524'),
]
operations = [
migrations.CreateModel(
name='BaseProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(blank=True, max_length=100, null=True, verbose_name='Краткое описание')),
('phone', models.CharField(blank=True, max_length=100, null=True, verbose_name='Телефон')),
('email', models.CharField(blank=True, max_length=100, null=True, verbose_name='Email')),
('image', models.ImageField(blank=True, null=True, upload_to='', verbose_name='Картинка')),
('name', models.CharField(max_length=100, verbose_name='Название')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='ManagerProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(blank=True, max_length=100, null=True, verbose_name='Краткое описание')),
('phone', models.CharField(blank=True, max_length=100, null=True, verbose_name='Телефон')),
('email', models.CharField(blank=True, max_length=100, null=True, verbose_name='Email')),
('image', models.ImageField(blank=True, null=True, upload_to='', verbose_name='Картинка')),
('name', models.CharField(max_length=100, verbose_name='Название')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Профиль организатора',
'verbose_name_plural': 'Профили организаторов',
},
),
migrations.CreateModel(
name='PartnerProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_phys', models.BooleanField(blank=True, default=True, verbose_name='Физическое лицо?')),
('inn', models.CharField(blank=True, max_length=100, null=True, verbose_name='ИНН')),
('site', models.CharField(blank=True, max_length=100, null=True, verbose_name='Веб-сайт')),
('description', models.CharField(blank=True, max_length=100, null=True, verbose_name='Краткое описание')),
('phone', models.CharField(blank=True, max_length=100, null=True, verbose_name='Телефон')),
('email', models.CharField(blank=True, max_length=100, null=True, verbose_name='Email')),
('image', models.ImageField(blank=True, null=True, upload_to='', verbose_name='Картинка')),
('name', models.CharField(max_length=100, verbose_name='Название')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Профиль партнера',
'verbose_name_plural': 'Профили партнеров',
},
),
migrations.CreateModel(
name='StaffProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(blank=True, max_length=100, null=True, verbose_name='Краткое описание')),
('phone', models.CharField(blank=True, max_length=100, null=True, verbose_name='Телефон')),
('email', models.CharField(blank=True, max_length=100, null=True, verbose_name='Email')),
('image', models.ImageField(blank=True, null=True, upload_to='', verbose_name='Картинка')),
('name', models.CharField(max_length=100, verbose_name='Название')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Профиль сотрудника',
'verbose_name_plural': 'Профили сотрудников',
},
),
migrations.CreateModel(
name='VolunteerProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(blank=True, max_length=100, null=True, verbose_name='Краткое описание')),
('phone', models.CharField(blank=True, max_length=100, null=True, verbose_name='Телефон')),
('email', models.CharField(blank=True, max_length=100, null=True, verbose_name='Email')),
('image', models.ImageField(blank=True, null=True, upload_to='', verbose_name='Картинка')),
('name', models.CharField(max_length=100, verbose_name='Название')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Профиль волонтера',
'verbose_name_plural': 'Профили волонтеров',
},
),
migrations.RemoveField(
model_name='profilemodel',
name='user',
),
migrations.RemoveField(
model_name='taskmodel',
name='dt',
),
migrations.AddField(
model_name='taskmodel',
name='deadline',
field=models.DateTimeField(null=True, verbose_name='Deadline'),
),
migrations.AlterField(
model_name='taskmodel',
name='event',
field=models.ForeignKey(default='1', on_delete=django.db.models.deletion.CASCADE, to='core.EventModel', verbose_name='Мероприятие'),
preserve_default=False,
),
migrations.AlterField(
model_name='taskmodel',
name='name',
field=models.CharField(max_length=100, verbose_name='Название'),
),
migrations.AlterField(
model_name='taskmodel',
name='partner',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='core.PartnerProfile', verbose_name='Партнер'),
),
migrations.DeleteModel(
name='ProfileModel',
),
migrations.AddField(
model_name='eventmodel',
name='owner',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='core.ManagerProfile'),
preserve_default=False,
),
migrations.AddField(
model_name='taskmodel',
name='perfomer',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='core.VolunteerProfile'),
preserve_default=False,
),
]
|
import sys
from PyQt5 import QtCore, QtWidgets, QtGui
from dis import Ui_MainWindow
class MY_Window(QtWidgets.QMainWindow):
def __init__(self, parent=None):
super().__init__(parent)
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.ui.pushButton.clicked.connect(self.open_File)
self.ui.pushButton_3.clicked.connect(self.open_Files)
self.ui.pushButton_2.clicked.connect(self.save_file)
self.ui.pushButton_5.clicked.connect(self.open_folder)
self.ui.pushButton_4.clicked.connect(self.color)
def open_File(self):
file = QtWidgets.QFileDialog.getOpenFileName(self, "Open File", "", "Python File *.py\nВсе файлы (*)")
print("hi")
# fil = open(file[0], "r")
# with fil as f:
# print(f.readlines())
def open_Files(self):
file = QtWidgets.QFileDialog.getOpenFileNames(self, "Open Files", "", "Python File *.py\nВсе файлы (*)")
print(file)
def save_file(self):
file = QtWidgets.QFileDialog.getSaveFileName(self, "Save File", "", "Txt File *.txt\nВсе файлы (*)")
print(file)
with open(file[0], "w") as f:
f.write("STEPA LOH")
def open_folder(self):
folder = QtWidgets.QFileDialog.getExistingDirectory(self)
print(folder)
def color(self):
color = QtWidgets.QColorDialog(self).getColor()
print(color.getRgb())
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
my_app = MY_Window()
my_app.show()
sys.exit(app.exec_()) |
m = input("Introduce un numero: ")
print "Numeros primos: ",
for i in range(2, m+1):
primo = True
for x in range(2, i):
if i%x == 0:
primo = False
if primo == True:
print i,
|
from BSTIterator import BSTIterator
from BSTNode import BSTNode
# Class name: BST
# Instance Variables: root (the root of the BST)
# isize (number of elements in BST)
# iheight (height of BST)
# Description: Implements a BST
# Methods: init, insert, find, begin, first, end, traverse, traverseHelper
class BST:
# Default constructor.
# Initialize an empty BST.
def __init__(self):
# root of BST
self.root = None
# Number of data items in this BST
self.size = 0
# Height of BST
self.height = 0
# Given a reference to a Data item, insert a copy of it in this BST.
# Return true if the item was added to this BST as a result of this
# call to insert.
# Return false if an item equal to this one was already in this BST.
def insert(self, Data):
# Working or current node
working = self.root
# Parent of working node
parent = None
# How far we went down in the BST to insert
localHeight = 0
# While working node is not null, if item is less than data of
# working node, go to left, and increment local height. If data
# in working node is greater than item, go to right, and increment
# local height. Otherwise, data in working node equals item;
# return false to prevent duplicate insert.
while(working != None):
parent = working
if(Data < working.val):
working = working.left
localHeight+=1
elif(working.val < Data):
working = working.right
localHeight+=1
else:
return False
# If insertion should happen, compare local height to height of
# tree. Update height of tree if local height > height
if(localHeight > self.height):
self.height = localHeight
# Create a new BST node whose data is item and whose parent is
# the parent of the working node. Insert it into the tree. Update
# size of tree and return true
working = BSTNode(Data)
working.parent = parent
if self.root == None:
self.root = working
else:
if Data < parent.val:
parent.left = working
else:
parent.right = working
self.size+=1
return True
# Find a Data item in the BST.
# Return an iterator pointing to the item, or pointing past
# the last node in the BST if not found.
def find(self, Data):
# working or current node
working = self.root
# While working is not null, if item is less than data in working
# node, go left. If data in working node is less than item, go
# right. Otherwise, data in working node equals item, so return
# iterator pointing to item.
while(working != None):
if Data < working.val:
working = working.left
elif working.val < Data:
working = working.right
else:
return BSTIterator(working)
# If item is not found, return iterator pointing past the last
# node in the BST
return BSTIterator(None)
# Traverse the BST in order
def traverse(self):
# Call traverseHelper function taking root as a parameter
self.traverseHelper(self.root)
# Recursive inorder traversal 'helper' function
def traverseHelper(self, working):
# If current node is not null
# Recursively traverse left subtree
# Print current node data
# Recursively traverse right subtree
if working != None:
self.traverseHelper(working.left)
print(repr(working))
self.traverseHelper(working.right)
# Return an iterator pointing to the first item in the BST
# (not the root).
def begin(self):
return BSTIterator(self.first())
# Find the first element of the BST
def first(self):
# working or current node
working = self.root
# parent of working node
parent = None
# keep going left until you can't
while working != None:
parent = working
working = working.left
return parent
# Return an iterator pointing past the last item in the BST.
def end(self):
return BSTIterator(None)
|
# coding: utf8
import json
import configparser
import html.parser
import urllib, urllib.request, urllib.parse
# 读取URL获得验证码的路径HTML解析类
class LoginRandCodeParser(html.parser.HTMLParser):
def __init__(self):
self.randCodeUrl = ""
self.rand = ''
html.parser.HTMLParser.__init__(self)
def handle_starttag(self, tag, attrs):
if tag == 'img' and ('id', 'img_rand_code') in attrs:
tag_attrs = dict(attrs)
if 'src' in tag_attrs and tag_attrs['src']:
# 登录验证码的相对路径
relative_path = tag_attrs['src']
# 完整路径
self.randCodeUrl = "https://kyfw.12306.cn" + relative_path
img_code_params = urllib.parse.parse_qs(relative_path)
if 'rand' in img_code_params:
# 登录验证码的验证令牌
self.rand = img_code_params['rand'][0] if img_code_params['rand'] else ''
# 解析登录后返回的HTML, 获取用户帐户信息
# 用于判断用户是否成功登录
class InfoCenterParser(html.parser.HTMLParser):
def __init__(self):
self.account_name = ""
self.user_info_link = False
self.flag = False
html.parser.HTMLParser.__init__(self)
def handle_starttag(self, tag, attrs):
if tag == 'a' and ('id', 'login_user') in attrs:
self.user_info_link = True
if tag == 'span' and self.user_info_link:
self.flag = True
def handle_data(self, data):
if self.user_info_link and self.flag:
self.account_name = data
def handle_endtag(self, tag):
if tag == 'a':
self.user_info_link = False
if tag == 'span':
self.flag = False
# 获取验证码图片
def getRandImageUrlAndCodeRand(ht):
result = {'url': '', 'rand': ''}
# 得到登录页面HTML内容
loginHtml = ht.get(url="https://kyfw.12306.cn/otn/login/init")
# 解析登录页面内容,获取图片验证码的URL地址,以及验证码令牌rand
loginParer = LoginRandCodeParser()
loginParer.feed(loginHtml)
randUrl = loginParer.randCodeUrl
rand = loginParer.rand
if randUrl and rand:
result['url'] = randUrl
result['rand'] = rand
return result
else:
f = open("login.html", 'w', encoding='utf-8')
f.write(loginHtml)
f.close()
print("验证码URL获取失败, 详情查看返回的login.html页面")
return result
def login(ht, username, password, randCode, rand, check_rand_status='Y'):
# 判断用户是否进行验证码的检查操作,如果check_rand_status为N则直接跳过进行登录
if check_rand_status == 'Y':
# 判断用户输入的验证码是否正确
post_datas = {
'randCode': randCode, # 输入验证码
'rand': rand # 验证令牌
}
# 检证输入验证码的合法性
json_str = ht.post(url="https://kyfw.12306.cn/otn/passcodeNew/checkRandCodeAnsyn", params=post_datas)
json_data = json.loads(json_str)
else:
json_data = {'data': 'Y'}
if (json_data["data"] == 'Y'):
post_data = {
"loginUserDTO.user_name": username,
"userDTO.password": password,
"randCode": randCode
}
# 请求 https://kyfw.12306.cn/otn/login/loginAysnSuggest
# 用于判断当前网络环境是否可以登录,得到JSON数据:
# {"validateMessagesShowId":"_validatorMessage","status":true,"httpstatus":200,"data":"Y","messages":[],"validateMessages":{}}
json_str = ht.post(url="https://kyfw.12306.cn/otn/login/loginAysnSuggest", params=post_data)
json_data = json.loads(json_str)
# loginRand = 0
# 检查用户是否可以登录
if ("data" in json_data and json_data["data"] and "loginCheck" in json_data["data"] and json_data["data"][
"loginCheck"] == 'Y'):
# 用户登录,获取登录返回的HTML
content = ht.post(url="https://kyfw.12306.cn/otn/login/userLogin", params=post_data)
# 解析登录返回的HTML判断用户是否成功登录
infocenterParser = InfoCenterParser()
infocenterParser.feed(content)
user_info = infocenterParser.account_name
if user_info:
print('您好, %s' % user_info)
return True
else:
f = open("login_result.html", 'w', encoding='utf-8', errors='ignore')
f.write(content)
f.close()
print("登录失败, 详情查看登录返回的login_result.html页面")
else:
messages = json_data.get('messages', '') if type(json_data) == dict else json_str
if not messages: messages = '当前网络繁忙不可登录访问!'
print(messages)
else:
print(json_str)
print('输入的验证码有误.')
return False
# 读取config.ini文件获取用户设置的帐号信息
def getUserInfo():
config = configparser.ConfigParser()
config.read("config.ini")
try:
username = config.get("UserInfo", "username")
password = config.get("UserInfo", "password")
except configparser.NoSectionError:
print("请设置登录信息的config.ini文件")
input("\r\n输入任意字符结束...")
else:
if username.strip() != '' and password.strip() != '':
return (username, password)
else:
print("请设置登录的用户名与密码")
input("\r\n输入任意字符结束...")
return None
# 读取config.ini文件获取系统性配置信息
def getPerformanceInfo():
config = configparser.ConfigParser()
config.read("config.ini")
try:
performanceInfo = dict(config.items("PerformanceInfo"))
return performanceInfo
except configparser.NoSectionError:
print("系统性能配置装载失败!")
return {}
def getGoAgentHost():
config = configparser.ConfigParser()
config.read("config.ini")
try:
host = dict(config.items("GoAgentHost"))
return host
except configparser.NoSectionError:
print("未设定代理服务器!")
return {} |
import csv
from datetime import datetime
def parseCSV(lot_number,periods):
cp = []
parking_available = []
numbers_list = []
all_carpark = []
time_list=datetime.strptime('19/04/2021 10:59', '%d/%m/%Y %H:%M')
with open('carpark.csv') as csvfile:
rows = csv.reader(csvfile)
res = list(zip(*rows))
r = len(res)
for i in range((r - 2) - 1):
i = i + 2
if i % 2 == 0:
cpnum = res[i][0][14:]
lots = res[i + 1][2]
avail = (res[i][1:])
time = res[1][1:]
cp = [cpnum, lots, avail, time]
all_carpark.append(cp)
# parse data to display on chart
for carpark in all_carpark:
if carpark[0] == lot_number:
for available in carpark[2]:
if len(parking_available)<periods:
parking_available.append(int(available))
else:
break
numbers_list = list(range(0, len(parking_available)))
numbers_list.reverse()
# time conversion
time_list = datetime.strptime('19/04/2021 10:59', '%d/%m/%Y %H:%M')
print(time_list.strftime("%H%M %d/%m/%Y"))
break
print(parking_available)
return parking_available, numbers_list, time_list |
# There is a
# collection
# of
# strings(There
# can
# be
# multiple
# occurences
# of
# a
# particular
# string ).Each
# string
# 's length is no more than characters. There are also queries. For each query, you are given a string, and you need to find out how many times this string occurs in the given collection of strings.
#
# Input
# Format
#
# The
# first
# line
# contains, the
# number
# of
# strings.
# The
# next
# lines
# each
# contain
# a
# string.
# The
# nd
# line
# contains, the
# number
# of
# queries.
# The
# following
# lines
# each
# contain
# a
# query
# string.
#
# Constraints
#
# Sample
# Input
#
# 4
# aba
# baba
# aba
# xzxb
# 3
# aba
# xzxb
# ab
# Sample
# Output
#
# 2
# 1
# 0
# Explanation
#
# Here, "aba"
# occurs
# twice, in the
# first and third
# string.The
# string
# "xzxb"
# occurs
# once in the
# fourth
# string, and "ab"
# does
# not occur
# at
# all.
|
import pya
# create a unique representation of the application (klayout program)
app = pya.Application.instance()
# create the main window of the program
# (that include the menus, the tool panels, the layout views...)
mw = app.main_window()
# create a layout view, which is a representation of a layout tab
# can be multiple layout view. Here we select the current tab.
lv = mw.current_view()
# If there is no tab, a message is displayed and the program stop
if lv == None:
pya.MessageBox.info("Shape Statistics",
"No view selected.", pya.MessageBox.Ok)
exit()
# Remark: the preparation step can be simplified to:
# lv = pya.LayoutView.current()
# creation of app and mw was done for demonstration purpose
# set numbers of paths, polygons, boxes and texts to 0
paths = 0
polygons = 0
boxes = 0
texts = 0
# for each selected object, check the shape and add
# one to the number of corresponding shape
for sel in lv.each_object_selected():
shape = sel.shape
if shape.is_path():
paths += 1
elif shape.is_box():
boxes += 1
elif shape.is_polygon():
polygons += 1
elif shape.is_text():
texts += 1
# Prepare the message reporting the number of shapes
s = f"Paths: {paths}\n"
s += f"Polygons: {polygons}\n"
s += f"Boxes: {boxes}\n"
s += f"Texts: {texts}\n"
# Report the number of shapes in a message box
pya.MessageBox.info("Shape Statistics", s, pya.MessageBox.Ok) |
from .base import *
class User(Base):
__tablename__ = 'user'
__table_args__ = {'extend_existing': True}
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(250), nullable=False)
nama = db.Column(db.String(250), nullable=False)
genre = db.Column(db.String(250))
password = db.Column(db.String(250), nullable=False)
def __init__(self):
db.create_all()
|
#input
# 8
# 62 53
# 96 7
# 104 97
# 90 7
# 109 7
# 103 7
# 75 8
# 113 7
def factorial(n):
if n <= 1:
return 1
else:
return n * factorial(n-1)
def c(n, k):
result = factorial(n) / (factorial(k) * factorial(n - k))
return int(result)
n = int(input())
for i in range(0, n):
(n, k) = (int(x) for x in input().split())
comb = c(n, k)
print(comb, "", end="") |
from .database import Database
class EmailResultHelper(Database):
def __init__(self, *args):
super(EmailResultHelper, self).__init__(*args)
def create_email_result(self, list_id, list_segment_id, templates_id, result,
result_description, campaign_id=None,
ab_campaign_id=None):
data = {"campaign_id": campaign_id, "ab_campaign_id": ab_campaign_id, "list_id": list_id,
"list_segment_id": list_segment_id, "templates_id": templates_id,
"result": result, "result_description": result_description}
sql_cursor = self.insert("email_results", data)
return sql_cursor
def create_ab_email_result(self, ab_campaign_id, list_id, list_segment_id, templates_id, result, result_description):
data = {"ab_campaign_id": ab_campaign_id, "list_id": list_id,
"list_segment_id": list_segment_id, "templates_id": templates_id,
"result": result, "result_description": result_description}
sql_cursor = self.insert("email_results", data)
return sql_cursor
# def get_email_result_by_campaign_segment_id(self, segment_id, campaign_id):
# query = "Select id, list_id from email_results where list_segment_id=%s" \
# " and campaign_id=%s" % (segment_id, campaign_id)
#
# return self.fetch_all(query)
def check_if_all_emails_processed_for_campaign(self, campaign_id):
fields = ('id', 'list_id')
where = ("(result='SENT' or result='ERROR') and campaign_id=%s", [campaign_id])
return self.getAll('email_results', fields=fields, where=where)
def check_if_all_emails_processed_for_ab_campaign(self, campaign_id):
fields = ('id', 'list_id')
where = ("(result='SENT' or result='ERROR') and ab_campaign_id=%s", [campaign_id])
return self.getAll('email_results', fields=fields, where=where)
# def get_email_results_by_campaign_id(self, campaign_id):
# query = "Select em.id, em.list_id, em.list_segment_id, em.templates_id, em.result, em.result_description, " \
# "ls.list_name, lsg.email from email_results em" \
# " inner join list ls on ls.id=em.list_id" \
# " inner join list_segments lsg on lsg.id=em.list_segment_id" \
# " where campaign_id=%s" % (campaign_id)
#
# return self.fetch_all(query)
# def check_is_email_already_sent(self, list_segment_id, campaign_id):
# query = "Select * from email_results where list_segment_id=%s and campaign_id=%s" \
# % (list_segment_id, campaign_id)
#
# emails_sent = self.fetch_all(query)
# if emails_sent and len(emails_sent) > 0:
# return True
# return False
|
from qiskit import QuantumRegister, QuantumCircuit, ClassicalRegister, Aer, execute
from random_bin import random_bin
class channel:
def __init__(self, n):
self.channel = QuantumCircuit(n, name="Channel")
self.backend = Aer.get_backend("qasm_simulator")
def get_channel(self):
return self.channel
def send(self, key):
for i in range(key):
if (key[i]):
self.channel.x(i)
def recive(self):
c = ClassicalRegister(n)
self.channel.
|
"""Advent of Code Day 18 - Like a GIF For Your Yard"""
def light_show(part_two=False):
lights = {}
for num, line in enumerate(light_lines):
for pos, light in enumerate(line):
lights['{},{}'.format(pos, num)] = light
steps = 0
while steps < 100:
new_lights = {}
for light, state in lights.items():
coords = light.split(',')
x = int(coords[0])
y = int(coords[1])
adjacent = [str(x - 1) + ',' + str(y), str(x + 1) + ',' + str(y),
str(x) + ',' + str(y - 1), str(x) + ',' + str(y + 1),
str(x - 1) + ',' + str(y - 1), str(x - 1) + ',' + str(y + 1),
str(x + 1) + ',' + str(y - 1), str(x + 1) + ',' + str(y + 1),]
adjacent_on = 0
for to_check in adjacent:
if lights.get(to_check, '.') == '#':
adjacent_on += 1
if state == '#':
if adjacent_on in (2, 3):
new_lights['{},{}'.format(x, y)] = '#'
else:
new_lights['{},{}'.format(x, y)] = '.'
elif state == '.':
if adjacent_on == 3:
new_lights['{},{}'.format(x, y)] = '#'
else:
new_lights['{},{}'.format(x, y)] = '.'
if part_two:
for coords in ('0,0', '0,99', '99,0', '99,99',):
new_lights[coords] = '#'
lights = new_lights
steps += 1
on = 0
for state in lights.values():
if state == '#':
on += 1
return on
with open('input.txt') as f:
light_lines = [line.strip() for line in f]
# Answer One
print(light_show())
# Answer Two
print(light_show(part_two=True))
|
#import sys
#input = sys.stdin.readline
from math import gcd
def main():
a, b, c = map( int, input().split())
if a == b == c:
if a%2 == 1:
print("0")
else:
print("-1")
return
ans = 0
bi = 2
while a%bi == 0 and b%bi == 0 and c%bi == 0:
a, b, c = a//2+b//2, b//2+c//2, c//2+a//2
ans += 1
print(ans)
if __name__ == '__main__':
main()
|
# Generated by Django 2.2.1 on 2019-07-27 07:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('faculty', '0018_auto_20190727_1212'),
]
operations = [
migrations.AddField(
model_name='leave',
name='leave_type',
field=models.TextField(null=True),
),
]
|
def main():
openfile = open("Presidents.txt", "w")
openfile.write("Bill Clinton\n")
openfile.write("George Bush\n")
openfile.write("Barak Obama\n")
openfile.close()
openfile = open("Presidents.txt", "a")
openfile.write("\nPython is interpreted\n")
openfile.close()
openfile = open("Presidents.txt", "r")
print("Using read() method: ")
print(openfile.read())
print(openfile.tell())
openfile.seek(0, 0)
print(openfile.read())
openfile.close()
main()
|
def solve(marble_set, balance_scale):
return [1,2] |
#code
t = int(input())
for _ in range(t):
n,m = map(int,input().split())
if n==1:
n=2
print(*[p for p in range(n,m+1) if 0 not in [p%d for d in range(2,p)]]) |
class TeamMember:
def __init__(self, name, rating, bkiller):
self.name = name
self.rating = rating
self.bkiller = bkiller
def to_string(self):
return "%s,%s,%s" % (self.name, self.rating, self.bkiller)
|
# -*- coding: utf-8 -*-
import itertools
import time
from collections import defaultdict
from functools import partial
import numpy as np
from joblib import Parallel, delayed, logger as joblib_logger
from scipy.stats import rankdata
from sklearn.base import BaseEstimator, is_classifier, clone
from sklearn.model_selection import check_cv, GroupKFold, LeavePGroupsOut
from sklearn.model_selection._search import _check_param_grid, ParameterGrid
from sklearn.utils import indexable
from sklearn.utils.fixes import MaskedArray
from sklearn.utils.metaestimators import _safe_split
from sklearn.utils.validation import check_is_fitted
from cell_counting.utils import open_scoremap, merge_dicts
from sldc import StandardOutputLogger, Logger
__author__ = "Ulysse Rubens <urubens@uliege.be>"
__version__ = "0.1"
class GridSearchCV(BaseEstimator):
def __init__(self, default_estimator, param_grid, cv, me, untrainable_param_grid=None,
scoring_rank='f1_score', refit=False, iid=True, n_jobs=1, pre_dispatch='2*n_jobs',
logger=StandardOutputLogger(Logger.INFO)):
self.default_estimator = default_estimator
self.param_grid = param_grid
self.untrainable_param_grid = untrainable_param_grid
self.cv = cv
self.me = me
self.scoring_rank = scoring_rank
self.n_jobs = n_jobs
self.pre_dispatch = pre_dispatch
self.logger = logger
self.refit = refit
self.iid = iid
_check_param_grid(param_grid)
_check_param_grid(untrainable_param_grid)
def fit(self, X, y=None, groups=None):
estimator = self.default_estimator
cv = check_cv(self.cv, y, classifier=is_classifier(estimator))
X, y, groups = indexable(X, y, groups)
n_splits = cv.get_n_splits(X, y, groups)
# Regenerate parameter iterable for each fit
candidate_params = ParameterGrid(self.param_grid)
n_candidates = len(candidate_params)
candidate_untrainable_params = ParameterGrid(self.untrainable_param_grid)
untrainable_candidates = len(candidate_untrainable_params)
self.logger.i("[CV] Fitting {} folds for each of {} candidates, totalling"
" {} fits".format(n_splits, n_candidates, n_candidates * n_splits))
base_estimator = clone(self.default_estimator)
pre_dispatch = self.pre_dispatch
out = Parallel(
n_jobs=self.n_jobs, verbose=self.logger.level * 20,
pre_dispatch=pre_dispatch
)(delayed(_fit_and_score)(clone(base_estimator), X, y, self.me,
train, test, self.logger, parameters,
candidate_untrainable_params,
return_n_test_samples=True,
return_times=True)
for train, test in cv.split(X, y, groups)
for parameters in candidate_params)
out = np.vstack([o for o in out])
test_accuracy = out[:, 0]
test_precision = out[:, 1]
test_recall = out[:, 2]
test_f1 = out[:, 3]
test_distance = out[:, 4]
test_count = out[:, 5]
test_count_pct = out[:, 6]
test_raw_count = out[:, 7]
test_raw_count_pct = out[:, 8]
test_density = out[:, 9]
test_raw_density = out[:, 10]
test_sample_counts = out[:, 11]
fit_time = out[:, 12]
score_time = out[:, 13]
results = dict()
n_tot_candidates = n_candidates * untrainable_candidates
tot_candidate_params = list(itertools.product(list(candidate_params), list(candidate_untrainable_params)))
def _store(key_name, array, weights=None, splits=False, rank=False, error=False):
"""A small helper to store the scores/times to the cv_results_"""
# When iterated first by splits, then by parameters
array = np.array(array, dtype=np.float64).reshape(n_splits, n_tot_candidates).T
if splits:
for split_i in range(n_splits):
results["split%d_%s" % (split_i, key_name)] = array[:, split_i]
array_means = np.average(array, axis=1, weights=weights)
results['mean_%s' % key_name] = array_means
# Weighted std is not directly available in numpy
array_stds = np.sqrt(np.average((array - array_means[:, np.newaxis]) ** 2, axis=1, weights=weights))
results['std_%s' % key_name] = array_stds
if rank:
arr = array_means if error else -array_means
results["rank_%s" % key_name] = np.asarray(rankdata(arr, method='min'), dtype=np.int32)
# Computed the (weighted) mean and std for test scores alone
# NOTE test_sample counts (weights) remain the same for all candidates
if self.iid:
test_sample_counts = np.array(test_sample_counts[::n_tot_candidates], dtype=np.int)
else:
test_sample_counts = None
_store('accuracy_score', test_accuracy, splits=True, rank=True, weights=test_sample_counts)
_store('precision_score', test_precision, splits=True, rank=True, weights=test_sample_counts)
_store('recall_score', test_recall, splits=True, rank=True, weights=test_sample_counts)
_store('f1_score', test_f1, splits=True, rank=True, weights=test_sample_counts)
_store('distance_mae', test_distance, splits=True, rank=True, weights=test_sample_counts, error=True)
_store('count_mae', test_count, splits=True, rank=True, weights=test_sample_counts, error=True)
_store('count_pct_mae', test_count_pct, splits=True, rank=True, weights=test_sample_counts, error=True)
_store('raw_count_mae', test_raw_count, splits=True, rank=True, weights=test_sample_counts, error=True)
_store('raw_count_pct_mae', test_raw_count_pct, splits=True, rank=True, weights=test_sample_counts, error=True)
_store('density_mae', test_density, splits=True, rank=True, weights=test_sample_counts, error=True)
_store('raw_density_mae', test_raw_density, splits=True, rank=True, weights=test_sample_counts, error=True)
_store('fit_time', fit_time)
_store('score_time', score_time)
results['rank_custom'] = np.asarray(rankdata((results['rank_f1_score'] + results['rank_count_pct_mae']) / 2,
method='min'), dtype=np.int32)
best_index = np.flatnonzero(results['rank_custom'])[0]
best_parameters = tot_candidate_params[best_index]
# Use one MaskedArray and mask all the places where the param is not
# applicable for that candidate. Use defaultdict as each candidate may
# not contain all the params
param_results = defaultdict(partial(MaskedArray, np.empty(n_tot_candidates, ), mask=True, dtype=object))
for cand_i, params in enumerate(tot_candidate_params):
params = merge_dicts(*params)
for name, value in params.items():
# An all masked empty array gets created for the key
# `"param_%s" % name` at the first occurence of `name`.
# Setting the value at an index also unmasks that index
param_results["param_%s" % name][cand_i] = value
results.update(param_results)
# Store a list of param dicts at the key 'params'
results['params'] = tot_candidate_params
self.cv_results_ = results
self.best_index_ = best_index
self.n_splits_ = n_splits
if self.refit:
bp = best_parameters[0]
bp.update(best_parameters[1])
# fit the best estimator using the entire dataset
# clone first to work around broken estimators
best_estimator = clone(base_estimator).set_params(**bp)
best_estimator.fit(X, y)
self.best_estimator_ = best_estimator
return self
@property
def best_params_(self):
check_is_fitted(self, 'cv_results_')
return self.cv_results_['params'][self.best_index_]
@property
def best_score_(self):
check_is_fitted(self, 'cv_results_')
return self.cv_results_['mean_test_score'][self.best_index_]
def _fit_and_score(estimator, X, y, me, train, test, logger,
parameters, candidate_untrainable_params,
return_n_test_samples=False,
return_times=False):
"""Fit estimator and compute scores for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
logger : Logger
The verbosity level.
parameters : dict or None
Parameters to be set on the estimator.
Returns
-------
train_score : float, optional
Score on training set, returned only if `return_train_score` is `True`.
test_score : float
Score on test set.
n_test_samples : int
Number of test samples.
fit_time : float
Time spent for fitting in seconds.
score_time : float
Time spent for scoring in seconds.
parameters : dict or None, optional
The parameters that have been evaluated.
"""
if parameters is None:
msg = ''
else:
msg = '%s' % (', '.join('%s=%s' % (k, v) for k, v in parameters.items()))
logger.info("[CV] %s %s" % (msg, (64 - len(msg)) * '.'))
if parameters is not None:
estimator.set_params(**parameters)
start_time = time.time()
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
estimator.fit(X_train, y_train)
fit_time = time.time() - start_time
all_ret = []
for untrainable_parameters in candidate_untrainable_params:
me.reset()
for x, y in itertools.izip(X_test, y_test):
p = estimator.predict(np.array([x]))
pp = estimator.postprocessing([p], **untrainable_parameters)
me.compute([open_scoremap(y)], [pp], [p])
metrics = me.all_metrics()
score_time = time.time() - start_time - fit_time
total_time = score_time + fit_time
if parameters is None:
msg1 = ''
else:
msg1 = '%s' % (', '.join('%s=%s' % (k, v) for k, v in untrainable_parameters.items()))
end_msg = "%s %s, total=%s" % (msg1, msg, joblib_logger.short_format_time(total_time))
logger.info("[CV] %s %s" % ((64 - len(end_msg)) * '.', end_msg))
ret = [metrics['accuracy'],
metrics['precision'],
metrics['recall'],
metrics['f1'],
metrics['distance'],
metrics['count'],
metrics['count_pct'],
metrics['raw_count'],
metrics['raw_count_pct'],
metrics['density'],
metrics['raw_density']]
if return_n_test_samples:
ret.append(X_test.shape[0])
if return_times:
ret.extend([fit_time, score_time])
all_ret.append(ret)
return all_ret
def mk_tt_split(X, y, labels, test_labels):
"""
Perform a train/test split based on labels.
Parameters
----------
X : array_like
Input samples
y : array_like
Output samples
labels : array_like
Set of labels
test_labels : array_like
Set of test labels, that is, a subset of `labels`.
Returns
-------
X_LS
y_LS
labels_LS
X_TS
y_TS
labels_TS
"""
test_set_labels = np.unique(test_labels)
ts = np.in1d(labels, test_set_labels)
ls = np.logical_not(ts)
return (np.asarray(X[ls]), np.asarray(y[ls]), np.asarray(labels[ls]),
np.asarray(X[ts]), np.asarray(y[ts]), np.asarray(labels[ts]))
def cv_strategy(parameters):
if parameters.cv_mode == 'GKF':
return GroupKFold(n_splits=parameters.cv_param)
elif parameters.cv_mode == 'LPGO':
return LeavePGroupsOut(n_groups=parameters.cv_param)
else:
raise ValueError("Unknown CV mode")
def mk_param_grid(param_dict, param_keys):
ret = param_dict.copy()
for k in param_dict.keys():
if k not in param_keys:
del ret[k]
return ret
# def clone(estimator, safe=True):
# """Constructs a new estimator with the same parameters.
#
# Clone does a deep copy of the model in an estimator
# without actually copying attached data. It yields a new estimator
# with the same parameters that has not been fit on any data.
#
# Parameters
# ----------
# estimator: estimator object, or list, tuple or set of objects
# The estimator or group of estimators to be cloned
#
# safe: boolean, optional
# If safe is false, clone will fall back to a deepcopy on objects
# that are not estimators.
#
# """
# estimator_type = type(estimator)
# # XXX: not handling dictionaries
# if estimator_type in (list, tuple, set, frozenset):
# return estimator_type([clone(e, safe=safe) for e in estimator])
# elif not hasattr(estimator, 'get_params'):
# if not safe:
# return copy.deepcopy(estimator)
# else:
# raise TypeError("Cannot clone object '%s' (type %s): "
# "it does not seem to be a scikit-learn estimator "
# "as it does not implement a 'get_params' cell_counting."
# % (repr(estimator), type(estimator)))
# klass = estimator.__class__
# new_object_params = estimator.get_params(deep=False)
# for name, param in six.iteritems(new_object_params):
# new_object_params[name] = clone(param, safe=False)
# new_object = klass(**new_object_params)
# params_set = new_object.get_params(deep=False)
#
# # quick sanity check of the parameters of the clone
# for name in new_object_params:
# param1 = new_object_params[name]
# param2 = params_set[name]
# if param1 is param2:
# # this should always happen
# continue
# if isinstance(param1, np.ndarray):
# # For most ndarrays, we do not test for complete equality
# if not isinstance(param2, type(param1)):
# equality_test = False
# elif (param1.ndim > 0
# and param1.shape[0] > 0
# and isinstance(param2, np.ndarray)
# and param2.ndim > 0
# and param2.shape[0] > 0):
# equality_test = (
# param1.shape == param2.shape
# and param1.dtype == param2.dtype
# and (_first_and_last_element(param1) ==
# _first_and_last_element(param2))
# )
# else:
# equality_test = np.all(param1 == param2)
# elif sparse.issparse(param1):
# # For sparse matrices equality doesn't work
# if not sparse.issparse(param2):
# equality_test = False
# elif param1.size == 0 or param2.size == 0:
# equality_test = (
# param1.__class__ == param2.__class__
# and param1.size == 0
# and param2.size == 0
# )
# else:
# equality_test = (
# param1.__class__ == param2.__class__
# and (_first_and_last_element(param1) ==
# _first_and_last_element(param2))
# and param1.nnz == param2.nnz
# and param1.shape == param2.shape
# )
# else:
# # fall back on standard equality
# equality_test = param1 == param2
# if equality_test:
# pass
# else:
# raise RuntimeError('Cannot clone object %s, as the constructor '
# 'does not seem to set parameter %s' %
# (estimator, name))
#
# return new_object
|
from src.vision.camera_parameters import CameraParameters
from src.vision.transform import Transform
class TableCameraConfiguration:
def __init__(self, id: int, cam_param: CameraParameters, world_to_camera: Transform):
self.id = id
self.camera_parameters = cam_param
self.world_to_camera = world_to_camera
|
#!/usr/bin/python -u
#
# CS3700, Spring 2015
# Project 2 Starter Code
#
import sys
import socket
import time
import datetime
import select
import json
import random
def log(string):
sys.stderr.write(datetime.datetime.now().strftime("%H:%M:%S.%f") + " " + string + "\n")
# MSG_SIZE = 1500
MSG_SIZE = 20000
TIMEOUT = 30
MAX_SEQUENCE = 4294967296
SEQUENCE = 0
# Bind to localhost and an ephemeral port
UDP_IP = "127.0.0.1"
UDP_PORT = 0
# Set up the socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind((UDP_IP, UDP_PORT))
sock.settimeout(TIMEOUT)
handshake_completed = 0 # 0 = handshake not started
# 1 = received initial syn
# 2 = received final ack
acked_bytes = 0
# Get port we bound to
UDP_PORT = sock.getsockname()[1]
log("[bound] " + str(UDP_PORT))
# Now listen for packets
while True:
log("Acked_bytes: {}".format(acked_bytes))
if handshake_completed == 2:
log("Listening for data packets ")
result = sock.recvfrom(MSG_SIZE)
# If nothing is ready, we hit the timeout
if result:
(data, addr) = result
try:
decoded = json.loads(data)
log(str(decoded))
# If the EOF flag is set, exit
if (decoded.get('eof')):
log("[completed]")
sys.exit(0)
# If there is data, we accept it and print it out
if (decoded['data']):
log("receiver sequence: {}".format(SEQUENCE))
log("acked_btytes: {}".format(acked_bytes))
log("data length: {}".format(len(data)))
if decoded.get('sequence') == acked_bytes and decoded.get('ack') == SEQUENCE + 1:
sys.stdout.write(decoded['data'])
log("[recv data] " + str(decoded['sequence']) + " (" + str(len(decoded['data'])) + ") ACCEPTED")
acked_bytes += len(decoded.get('data'))
# Send back an ack to the sender
msg = json.dumps({"ack": acked_bytes, "sequence": SEQUENCE})
log("ABOUT TO SEND " + msg)
if sock.sendto(msg, addr) < len(msg):
log("[error] unable to fully send packet")
except (ValueError, KeyError, TypeError) as e:
log("[recv corrupt packet]")
raise e
else:
log("[error] timeout")
sys.exit(-1)
else:
if handshake_completed == 0:
SEQUENCE = random.randint(0, MAX_SEQUENCE)
result = sock.recvfrom(MSG_SIZE)
# If nothing is ready, we hit the timeout
if result:
(data, addr) = result
try:
decoded = json.loads(data)
if decoded.get('syn') and decoded.get('sequence_c'):
log("[handshake] Handshake started")
packet = {'syn': True, 'ack': True, 'data': '', 'sequence_c': decoded['sequence_c'] + 1, 'sequence_s': SEQUENCE}
acked_bytes = decoded.get('sequence_c') + 1
if sock.sendto(json.dumps(packet), addr) < len(packet):
log("[error] unable to fully send packet")
else:
log("[handshake] " + str(SEQUENCE) + " response sent")
handshake_completed = 1
except (ValueError, KeyError, TypeError) as e:
log("[recv corrupt packet]")
raise e
else:
log("[error] timeout")
sys.exit(-1)
elif handshake_completed == 1:
result = sock.recvfrom(MSG_SIZE)
# If nothing is ready, we hit the timeout
if result:
(data, addr) = result
try:
decoded = json.loads(data)
if decoded.get('reset'):
handshake_completed = 0
else:
if decoded.get('ack') and decoded.get('data'):
log("[handshake] Handshake completed")
# acked_bytes += len(decoded.get('data'))
# log("acked_bytes += {}".format(len(decoded.get('data'))))
SEQUENCE += 1
handshake_completed = 2
if decoded.get('ack') == SEQUENCE + 2:
sys.stdout.write(decoded['data'])
log("[recv data] " + str(decoded['sequence']) + " (" + str(len(decoded['data'])) + ") ACCEPTED")
acked_bytes += len(decoded.get('data'))
# Send back an ack to the sender
msg = json.dumps({"ack": decoded['sequence'] + len(decoded['data']), "sequence": SEQUENCE})
log("ABOUT TO SEND " + msg)
if sock.sendto(msg, addr) < len(msg):
log("[error] unable to fully send packet")
# else:
# acked_bytes += 1
except (ValueError, KeyError, TypeError) as e:
log("[recv corrupt packet]")
raise e
else:
log("[error] timeout")
sys.exit(-1)
|
from config.config import PATH_TO_QUERIES, PATH_TO_DOMAINS
from app.typing import T_QUERIES, T_DOMAINS, T_DOMAIN, T_BASE_URL, T_QUERY, T_URL
def get_queries() -> T_QUERIES:
return sorted(list(set(PATH_TO_QUERIES.read_text().splitlines())))
def get_domains() -> T_DOMAINS:
"""
Get unique domains
"""
return sorted(list(set(PATH_TO_DOMAINS.read_text().splitlines())))
def make_base_url(domain: T_DOMAIN, scheme: str = 'http') -> T_BASE_URL:
return f'{scheme}://{domain}'
def make_url_with_query(base_url: T_BASE_URL, query: T_QUERY) -> T_URL:
return f'{base_url}{query}'
|
import os
def createfile():
filename = input('Please input the filename you want to create: ')
i = 1
lines = []
while True:
line = input('Please input the %d line: ' % i)
if line == 'ENDLINE':
break
else:
lines.append(line)
i += 1
f = open(filename, 'w')
for j in lines:
f.write(j + '\n')
f.close()
def viewfile():
filename = input('Which file you want to see: ')
if os.path.isfile(filename):
f = open(filename, 'r')
for eachLine in f:
print(eachLine, end='')
else:
print('Wrong filename')
exit(0)
def editfile():
filename = input('Which file you want to edit: ')
linenum = int(input('Which line you want to edit: '))
f = open(filename, 'r')
lines = f.readlines()
f.close()
lines[linenum-1] = input('Please input the content: ') + '\n'
f = open(filename, 'w')
for line in lines:
f.write(line)
f.close()
def showmenu():
prompt = """
(N)ew file
(V)iew file content
(E)dit file
(Q)uit
Enter choice: """
done = False
while not done:
chosen = False
while not chosen:
try:
choice = input(prompt).strip()[0].lower()
except(EOFError, KeyboardInterrupt):
choice = 'q'
print('\nYou picked: [%s]' % choice)
if choice not in 'nveq':
print('Invalid option, try again')
else:
chosen = True
if choice == 'q': done = True
if choice == 'n': createfile()
if choice == 'v': viewfile()
if choice == 'e': editfile()
if __name__ == '__main__':
showmenu() |
"""
musicinformationretrieval.com/realtime_spectrogram.py
PyAudio example: display a live log-spectrogram in the terminal.
For more examples using PyAudio:
https://github.com/mwickert/scikit-dsp-comm/blob/master/sk_dsp_comm/pyaudio_helper.py
"""
import librosa
import numpy
import pyaudio
import time
import socket
import struct
import time
DATA_SEND_RATE = 60 #hz
#IP_ADDRESS = '192.168.1.76'
IP_ADDRESS = 'localhost'
SCREEN_WIDTH = 300
# sound global variables
CHANNELS = 1
RATE = 44100
FRAMES_PER_BUFFER = 1000
N_FFT = 4096
ENERGY_THRESHOLD = 0.2
# Choose the frequency range
F_LO = librosa.note_to_hz('C2')
F_HI = librosa.note_to_hz('C9')
M = librosa.filters.mel(RATE, N_FFT, SCREEN_WIDTH, fmin=F_LO, fmax=F_HI)
#init
p = pyaudio.PyAudio()
lastTime = time.time()
connected = False
#callb ack with every frame of audio
def callback(in_data, frame_count, time_info, status):
global connected
if not connected:
return (in_data, pyaudio.paContinue)
audio_data = numpy.frombuffer(in_data, dtype=numpy.float32)
x_fft = numpy.fft.rfft(audio_data, n=N_FFT)
melspectrum = M.dot(abs(x_fft))
#compress melspectrum into 2 byte floats
bbuf = []
bbuf.extend(float_to_bytes(float('inf')))
for v in melspectrum:
bbuf.extend(float_to_bytes(v))
global lastTime
#only send data at the DATA_SEND_RATE
if time.time()-lastTime > 1/DATA_SEND_RATE:
lastTime = time.time()
try:
s.sendall(bytes(bbuf))
except:
connected = False
return (in_data, pyaudio.paContinue)
#open socket with the stre
try:
float_to_bytes = struct.Struct('e').pack
stream = p.open(format=pyaudio.paFloat32,
channels=CHANNELS,
rate=RATE,
input=True, # Do record input.
output=False, # Do not play back output.
frames_per_buffer=FRAMES_PER_BUFFER,
stream_callback=callback)
while True:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
while not connected:
try:
s.connect((IP_ADDRESS, 12345))
connected = True
print("Connected")
except:
print("Connect failed, trying again...")
time.sleep(4)
stream.start_stream()
while stream.is_active() and connected:
time.sleep(0.100)
if not stream.is_active():
print("Sound stream is down")
break
stream.stop_stream()
print("Lost Connection, attempting reconnect...")
except KeyboardInterrupt:
print("Closing")
finally:
stream.stop_stream()
stream.close()
s.close()
p.terminate()
|
#coding:utf-8
https://docs.djangoproject.com/en/1.10/ref/contrib/admin/admindocs/
需要docutils这个模块的支持.下载地址是: http://docutils.sf.net/
INSTALLED_APPS
django.contrib.admindocs
(r'^admin/doc/', include('django.contrib.admindocs.urls'))
|
#!/usr/bin/env python
import random
import sys
if len(sys.argv) == 2:
N = int(sys.argv[1])
else:
N = 10
for _ in xrange(N):
print random.randint(0, 1000000)
|
# 多颜色多模板匹配示例
#
# 这个例子显示了使用OpenMV的多色跟踪。
import sensor, image, time
from image import SEARCH_EX, SEARCH_DS
from pyb import UART
from error_color import color_track
# 颜色跟踪阈值(L Min, L Max, A Min, A Max, B Min, B Max)
# 下面的阈值跟踪一般红色/绿色的东西。你不妨调整他们...
#blue=[(34, 40, 10, 18, -60, -40)]
#green=[(36, 58, -39, -24, -3, 19)]#green
#red=[(41, 54, 67, 80, 30, 63)] # generic_blue_thresholds,green(36, 58, -39, -24, -3, 19),
#blue (41, 54, 57, 80, 16, 63)
red = [(41, 54, 67, 80, 30, 63),(35,55,38,65,10,45),(40,53,62,72,36,47),(38,45,60,70,35,44)]
blue=[(30,40,-10,20,-45,-30),(35,40,13,20,-60,-43),(40,47,-3,6,-36,-26),(30 ,34,12,21,-58,49),
(38,58,-6,23,-63,-27)]
green = [(30,40,-30,-20,5,15),(25,33,-27,-23,2,13),(20,26,-25,-20,3,12),(20,39,-30,-20,0,17),
(23,33,-27,-24,5,15),(31,38,-35,-28,8,17),(17,26,-30,-20,7,17),(22,29,-28,-25,6,12),
(65,70,-24,-16,11,20),(47,50,-50,-45,35,40)]
yellow = [(63,70,-3,5,59,68),(71,77,-6,9,35,56)]
black = [(1,8,-10,10,-10,10),(3,8,-4,6,8,12)]
basketball = [(14,26,6,24,3,20)]
volleyball = [(66,70,-5,3,56,70),(30,36,0,9,28,45),(75,87,0,17,30,50)]
soccerball = [(37,45,-23,-1,-33,-28),(27,33,-9,1,-28,20),(42,48,-11,-8,-26,-25)]
white = [72,79,-1,3,-8,1]
#不要超过16个颜色阈值
uart = UART(3,19200)
sensor.reset()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QQVGA)
sensor.skip_frames(time = 2000)
sensor.set_auto_gain(False) # must be turned off for color tracking
sensor.set_auto_whitebal(False) # must be turned off for color tracking
sensor.set_vflip(False)
sensor.set_hmirror(False)
clock = time.clock()
# 只有比“pixel_threshold”多的像素和多于“area_threshold”的区域才被
# 下面的“find_blobs”返回。 如果更改相机分辨率,
# 请更改“pixels_threshold”和“area_threshold”。 “merge = True”合并图像中所有重叠的色块。
change=[0,0,0,0]
def find_max(blobs):
max_size=0
for blob in blobs:
if blob[2]*blob[3] > max_size:
max_blob=blob
max_size = blob[2]*blob[3]
return max_blob
#balls = ["basketball3.pgm","football1.pgm","volleyball2.pgm"]
zfx_tempaltes = ["zfx1.pgm","zfx2.pgm","zfx3.pgm","zfx4.pgm","zfx5.pgm",
"zfx6.pgm","zfx7.pgm","zfx8.pgm","zfx9.com"]
yx_tempaltes = ["yx1.pgm","yx2.pgm","yx3.pgm","yx4.pgm","yx5.pgm"]
sjx_tempaltes = ["sjx1.pgm","sjx2.pgm","sjx3.pgm"]
templates = [zfx_tempaltes,yx_tempaltes,sjx_tempaltes] #保存多个模板
def cal():
flag=0
zfx=0
yx=0
sjx=0
r=[0,0,0,0]
key = 0
G=0
while(True):
key=uart.readchar()
if key==1:
break
sum_zfx=0
sum_yx=0
sum_sjx=0
dis=0
clock.tick()
img = sensor.snapshot(1.8)
#img1 = img.binary(blue)
for x in templates :
img = sensor.snapshot(1.8)
img = img.to_grayscale()
flag = 0
for t in x:
clock.tick()
img = sensor.snapshot(1.8)
img = img.to_grayscale()
template = image.Image(t)
#ball = image.Image(t)
if x == zfx_tempaltes:
r = img.find_template(template, 0.80, step=4, search=SEARCH_EX) #, roi=(10, 0, 60, 60))
if r:
print(t)
zfx = r
sum_zfx=sum_zfx+1
elif x == yx_tempaltes:
for c in img.find_circles(threshold = 3500, x_margin = 10, y_margin = 10, r_margin = 10,r_min = 2, r_max = 100, r_step = 2):
img.draw_circle(c.x(), c.y(), c.r(), color = (255, 0, 0))
if c.r()>1:
x=c.x()-c.r()
y=c.y()-c.r()
w=c.r()*2
h=c.r()*2
r=[x,y,w,h]
yx = r
sum_yx=20
elif x == sjx_tempaltes:
r = img.find_template(template, 0.80, step=4, search=SEARCH_EX) #, roi=(10, 0, 60, 60))
if r:
print(t)
sjx = r
sum_sjx=sum_sjx+1
if (sum_zfx>sum_yx and sum_zfx>sum_sjx) :
r=zfx
t=8#"zfx"
elif (sum_yx>sum_zfx and sum_yx>sum_sjx) :
r=yx
t=9#"yx"
else:
r=sjx
t=10#"sjx"
if (sum_zfx!=0 or sum_yx!=0 or sum_sjx!=0):
#change[0]=r[0]+0
#change[1]=r[1]+0
#change[2]=r[2]-0
#change[3]=r[3]-0
sum_red=0
sum_green=0
sum_blue=0
x=r[0]
y=r[1]
w=r[2]
h=r[3]
center_x=r[0]+int(r[2]/2)
center_y=r[1]+int(r[3]/2)
sensor.reset()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QQVGA)
sensor.skip_frames(time = 300)
sensor.set_auto_gain(False) # must be turned off for color tracking
sensor.set_auto_whitebal(False) # must be turned off for color tracking
sensor.set_vflip(False)
sensor.set_hmirror(False)
img = sensor.snapshot(1.8)
#r=list(r)
i=3
while(i>0):
blobs = img.find_blobs(blue,roi=r,pixel_threshold=200,area_threshold=200)
if blobs:
max_blob = find_max(blobs)
img.draw_rectangle(r) # rect
#img.draw_cross(center_x, center_y) # cx, cy
img.draw_cross(max_blob.cx(), max_blob.cy())
#img.draw_line(x+int(w/2),y,x,y+h)
#img.draw_line(x,y+h,x+w,y+h)
#img.draw_line(x+w,y+h,x+int(w/2),y)#三角形
img.draw_circle(x+int(w/2),y+int(h/2),int(w/2))
sum_blue=sum_blue+1
blobs = img.find_blobs(red,roi=r,pixel_threshold=200,area_threshold=200)
if blobs:
max_blob = find_max(blobs)
img.draw_rectangle(r) # rect
img.draw_cross(center_x, center_y) # cx, cy
img.draw_circle(x+int(w/2),y+int(h/2),int(h/2))
sum_red=sum_red+1
blobs = img.find_blobs(green,roi=r,pixel_threshold=200,area_threshold=200)
if blobs:
max_blob = find_max(blobs)
img.draw_rectangle(r) # rect
img.draw_cross(center_x, center_y) # cx, cy
sum_green=sum_green+1
i=i-1
if (sum_red>sum_green and sum_red>sum_blue) :
flag=5#"red"
elif (sum_green>sum_red and sum_green>sum_blue) :
flag=6#"green"
elif (sum_blue>sum_red and sum_blue>sum_green):
flag=7#"blue"
else :
flag = 0
if(r==0 or flag == 0):
print("没找到")
else:
Lm = int(r[2]/2)
K = 25
G=1
length = K/Lm
#edge =
print("length:",length)
print("color:",flag,"object:",t,"range:",r,"red:",sum_red,
"green:",sum_green,"blue:",sum_blue,"zfx_model:",sum_zfx,"yx_model:",
sum_yx,"sjx_model:",sum_sjx)
uart.writechar(0x55)
uart.writechar(0x53)
uart.writechar(flag)
uart.writechar(t)
uart.writechar(Lm)
uart.writechar(K)
uart.writechar(G)
uart.writechar(1)
G=0
break
#如果为红色, blob.code()==1; 如果为绿色, blob.code==2.
#如果为数字0, t=="0.pgm"; 如果为数字1, t=="1.pgm".
#print(clock.fps())
|
LAST_SAFE_CONTRACT = '0x34CfAC646f301356fAa8B21e94227e3583Fe3F5F'
LAST_DEFAULT_CALLBACK_HANDLER = '0xd5D82B6aDDc9027B22dCA772Aa68D5d74cdBdF44'
LAST_MULTISEND_CONTRACT = '0x8D29bE29923b68abfDD21e541b9374737B49cdAD'
|
#palindrome
n=int(input("Enter number:"))
copy=n
rev=0
while copy>0:
rev=rev*10+copy%10
copy//=10
if rev==n:
print("Palindrome")
else:
print("Not")
|
from time import sleep, strftime, time
from datetime import datetime
import src.database_tools
from src.entities.Temperature import Temperature
from src.TemperatureSlave import TemperatureSlave
from src.database_tools.TemperatureDataService import TemperatureDataService
from src.database_tools.GlobalSettingsAdapter import GlobalSettingsAdapter
if __name__ == '__main__':
slave = TemperatureSlave()
dataService = TemperatureDataService()
secondCounter = 0
while secondCounter < 3600:
temperature = slave.ReadTemperature()
dataService.Save(temperature)
print("Saved " + str(temperature) + "at " + str(secondCounter) + " seconds")
secondCounter = secondCounter + 10
sleep(10)
|
#
# core
#
import pygame
from pygame.locals import *
class Zect:
def __init__(self,
id = '',
pos=(0, 0),
dims=(32, 32),
text='',
color=(255,255,255, 255*0.2),
tag='',
children=[],
parent = None):
self.id = id
self.pos=pos
self.dims=dims
self.text = text
self.color=color
self.tag = tag
self.children = children
self.parent = parent
class SceneGraph:
def __init__(self):
self.graph = []
def getSceneGraph(self):
return self.graph
def getView(self, id):
f = lambda x: x.id == id
return filter(f, self.graph)[0]
def getViews(self, ids):
f = lambda x: x.id in ids
return filter(f, self.graph)
def printViewIds(self):
for v in self.graph:
print v.id
class PyGame:
def __init__(self):
pygame.init()
pygame.font.init()
self.FONT = pygame.font.Font('fonts/bitstream.ttf', 12)
self.clock = pygame.time.Clock()
self.SCREEN_SIZE = (800, 500)
self.screen = pygame.display.set_mode(self.SCREEN_SIZE)
pygame.display.set_caption('DisplayNode')
pygame.event.set_allowed(None)
pygame.event.set_allowed(
[QUIT, KEYUP, KEYDOWN, \
VIDEORESIZE, VIDEOEXPOSE, MOUSEMOTION, \
MOUSEBUTTONUP, MOUSEBUTTONDOWN])
def clearScreen(self):
self.screen.fill((0, 0, 0))
def clockTick(self):
return self.clock.tick()
def updateScreen(self):
pygame.display.update()
def getEvents(self):
events = pygame.event.get()
filteredEvents = []
if events != []:
i = 0
mouse_motion_events = []
for event in events:
if (event.type == pygame.MOUSEMOTION):
mouse_motion_events.append(event)
elif ( event.type == pygame.QUIT):
return [pygame.QUIT]
else:
filteredEvents.append(event)
i += 1
if (mouse_motion_events != []):
filteredEvents.append(mouse_motion_events[len(mouse_motion_events)-1])
return filteredEvents
def draw_rounded_rect(self,
rect,
color = (0, 0, 0, 255 * 0.8),
boarder = 15,
boarder_color = (0,0,0,255),
corner = 5):
rect.topleft = (0, 0)
#print 'draw_rounded_rect.rect.size', rect.size
#print 'draw_rounded_rect.color', color
surf = pygame.Surface(rect.size, SRCALPHA)
# draw circles in corners
pygame.draw.circle(surf, boarder_color, (corner, corner), corner)
pygame.draw.circle(surf, boarder_color, (corner, rect.height - corner), corner)
pygame.draw.circle(surf, boarder_color, (rect.width - corner, corner), corner)
pygame.draw.circle(surf,
boarder_color,
(rect.width - corner, rect.height - corner),
corner)
# draw two rect that combine to create big rect with corners cut out
surf.fill(boarder_color, pygame.Rect(corner, 0, rect.width - corner * 2, rect.height))
surf.fill(boarder_color, pygame.Rect(0, corner, rect.width, rect.height - corner * 2))
pygame.draw.circle(surf, color, (corner+boarder, corner+boarder), corner)
pygame.draw.circle(surf, color, (corner+boarder, rect.height - corner - boarder), corner)
pygame.draw.circle(surf, color, (rect.width - corner - boarder, corner + boarder), corner)
pygame.draw.circle(surf, color,
(rect.width - corner - boarder, rect.height - corner - boarder),
corner)
surf.fill(color,
pygame.Rect(
corner+boarder,
boarder,
rect.width - (corner * 2) - (boarder * 2),
rect.height - (boarder * 2)))
surf.fill(color,
pygame.Rect(
boarder, corner+boarder,
rect.width-(boarder*2),
rect.height - (corner * 2) - (boarder*2)))
return surf
def drawView(self, text, x, y, w=32, h=32, color = (0, 0, 0, 255 * 0.8)):
if (text != ''):
surface = self.FONT.render(text, True, (255, 255, 255))
surface_rect = surface.get_rect()
h_padding = (h/2) - round(surface_rect.height/2.0)
w_padding = h_padding
new_width = surface_rect.width+(h_padding*2)
if ( w > new_width):
new_width = w
w_padding = (w/2) - round(surface_rect.width/2.0)
rect = pygame.Rect(0, 0, new_width, h)
rounded_surface = self.draw_rounded_rect(rect, color)
rounded_surface.blit(surface, (w_padding, h_padding))
else:
rect = pygame.Rect(0, 0, w, h)
rounded_surface = self.draw_rounded_rect(rect, color)
self.screen.blit(rounded_surface, (x, y))
|
from math import hypot
# from math import hypot, pow, sqrt
cateto_adjacente = float(input('Qual o valor do cateto adjacente? '))
cateto_oposto = float(input('Qual o valor do cateto oposto? '))
# hipotenusa = (cateto_oposto ** 2 + cateto_adjacente ** 2) ** (1 / 2)
# hipotenusa = hypot(sqrt(pow(cateto_adjacente, 2)), sqrt(pow(cateto_oposto, 2)))
hipotenusa = hypot(cateto_adjacente, cateto_oposto)
print('A hipotenusa desse triângulo é {0:.2f}'.format(hipotenusa))
|
from django.conf.urls import include
from django.contrib.auth import views as auth_views
from . import views
from django.urls import path
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('', views.user_account.as_view(), name='login'),
path('logout/', auth_views.LogoutView.as_view(), name='logout'),
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) |
from tkinter import *
import pandas as pd
import numpy as np
from pandas import DataFrame as df
from tkinter import filedialog
from tkinter import messagebox, ttk
import tkinter as tk
from datetime import datetime
#-----------------------------------------------CLASSES
class ToolTip(object):
def __init__(self, widget):
self.widget = widget
self.tipwindow = None
self.id = None
self.x = self.y = 0
def showtip(self, text):
"Display text in tooltip window"
self.text = text
if self.tipwindow or not self.text:
return
x, y, cx, cy = self.widget.bbox("insert")
x = x + self.widget.winfo_rootx() + 40
y = y + cy + self.widget.winfo_rooty() +10
self.tipwindow = tw = Toplevel(self.widget)
tw.wm_overrideredirect(1)
tw.wm_geometry("+%d+%d" % (x, y))
try:
# For Mac OS
tw.tk.call("::tk::unsupported::MacWindowStyle",
"style", tw._w,
"help", "noActivates")
except TclError:
pass
label = Label(tw, text=self.text, justify=LEFT,
background="#ffffe0", relief=SOLID, borderwidth=1,
font=("tahoma", "8", "normal"))
label.pack(ipadx=1)
def hidetip(self):
tw = self.tipwindow
self.tipwindow = None
if tw:
tw.destroy()
def createToolTip(widget, text):
toolTip = ToolTip(widget)
def enter(event):
toolTip.showtip(text)
def leave(event):
toolTip.hidetip()
widget.bind('<Enter>', enter)
widget.bind('<Leave>', leave)
#-----------------------------------------------FUNCTIONS
def current_t():
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
return current_time
def openFile ():
frame_csv(1)
global reset
global pathname
global csvfile
global a
global b
global c
global d
global e
pathname = filedialog.askopenfilename(initialdir="/user/desktop", title="Select file ...", filetypes= ( ("CSV file", ".csv"),("All files", "*.*") ) )
file_label=Label(framecsv,text=pathname, height=1, width=50 )
file_label.pack(side=BOTTOM)
csvfile=pd.read_csv(pathname,header=2)
a= IntVar()
checknoblk = tk.Checkbutton(framecsv, text="No Blank", variable=a)
checknoblk.pack(side=LEFT)
b= IntVar()
checknostd = tk.Checkbutton(framecsv, text="No Standards", variable=b)
checknostd.pack(side=LEFT)
c= IntVar()
checknohno3 = tk.Checkbutton(framecsv, text="No HNO3", variable=c)
checknohno3.pack(side=LEFT)
d= IntVar()
allsamples = tk.Checkbutton(framecsv, text="Selec. All Samples", variable=d)
allsamples.pack(side=LEFT)
e= IntVar()
allelements = tk.Checkbutton(framecsv, text="Selec. All Elements", variable=e)
allelements.pack(side=LEFT)
checknoblk.select()
checknostd.select()
checknohno3.select()
allsamples.select()
reset=1
def frame_csv(reset):
global framecsv
if (reset==1):
framecsv.destroy()
framecsv=LabelFrame(frame1,text="CSV Report Load Setup",height=50, width=500)
framecsv.grid(row=0, column=1)
def l_samples():
global list_samples
list_samples= csvfile
if (noblank==1):
list_samples= list_samples[list_samples['Type']!='BLK']
if (nostd==1):
list_samples= list_samples[list_samples['Type']!='STD']
if (noHNO3==1):
list_samples=list_samples[~list_samples['Label'].str.contains('HNO', regex=False)]
list_samples=list_samples.reset_index(drop=True)
print(list_samples)
T.insert(END,current_t() +" - Samples have been read successfully!\n")
def l_elements():
global list_elements
list_elements=[]
for n in range(0,len(list_samples.index),1):
if (list_samples.iloc[n,3] not in list_elements):
list_elements.append(list_samples.iloc[n,3])
list_elements=pd.DataFrame(list_elements, columns=["Elements"])
T.insert(END,current_t() +" - Elements have been read successfully!\n")
def l_names ():
global list_names
global list_date
list_date=[]
list_names=[]
for n in range(0,len(list_samples),1):
if (list_samples.iloc[n,2] not in list_date):
list_date.append(list_samples.iloc[n,2])
list_names.append(list_samples.iloc[n,0])
list_names=pd.DataFrame(list_names, columns=['Label']).reset_index(drop=True)
list_date=pd.DataFrame(list_date, columns=['Date Time']).reset_index(drop=True)
print(list_names)
def sample_name_filter():
select_dup1=[]
erase=text.split('\n')
print('====================================================================')
print(erase)
select_samples1=list(dict.fromkeys(select_samples['Label']))
print('-----------------------------------------------------------------------------')
print(select_samples)
for i in select_samples1:
s01=i
for x in erase:
s01= s01.replace(x,'')
s01= s01.strip()
print(s01)
select_dup1.append(s01)
select_dup=list(dict.fromkeys(select_dup1))
print(select_dup)
return select_dup
def loadcsv():
global noblank
global nostd
global noHNO3
global all_s
global all_e
noblank=a.get()
nostd=b.get()
noHNO3=c.get()
all_s=d.get()
all_e=e.get()
l_samples()
l_elements()
setupw()
def calculate ():
global v_s
global v_e
i=0
vve=[]
vvs=[]
for x in range(len(vars)):
i=vars[x].get()
vvs.append(i)
for x in range(len(vare)):
i=vare[x].get()
vve.append(i)
v_s = pd.DataFrame(vvs)
v_e = pd.DataFrame(vve, columns=list_elements.columns)
def filter():
global select_date, select_elements, select_samples, text
calculate()
select_samples=list_names.loc[v_s[0],:]
select_date=list_date.loc[v_s[0],:]
select_elements=list_elements[v_e].dropna()
text=t.get('1.0',END)
sl=[]
csv_final=pd.DataFrame(sl, columns=list_samples.columns)
for x in select_date['Date Time']:
for y in select_elements["Elements"]:
select1=list_samples[list_samples['Date Time']==x]
select2=select1[select1["Element"]==y]
csv_final=csv_final.append(select2)
csv_final=csv_final.append(pd.Series(), ignore_index=True)
print(csv_final)
if op_v==2:
print(csv_final)
csv_final.to_csv('./ICP_Full_Report.csv', index = False)
T.insert(END,current_t() +" - Full Report has been created !\n")
elif op_v==3:
csv_final= csv_final.dropna(how='all')
select_dup=sample_name_filter()
list_average=[]
list_standard=[]
l01=[]
l02=[]
for x in select_dup:
for y in select_elements['Elements']:
slc1=csv_final[csv_final['Label'].str.contains(x, regex=False)]
slc2=slc1[slc1['Element']==y]
# print(slc2)
slc_avr= pd.to_numeric(slc2['Concentration'], errors='coerce').mean()
list_average.append(slc_avr)
slc_std= pd.to_numeric(slc2['Concentration'], errors='coerce').std()
list_standard.append(slc_std)
l01.append(x)
l02.append(y)
l01.append('')
l02.append('')
list_average.append('')
list_standard.append('')
data={'Label':l01,'Element': l02 , 'Average': list_average, 'STD': list_standard}
f_rep= pd.DataFrame.from_dict(data)
print(f_rep)
f_rep.to_csv('./ICP_Calculated_Report.csv', index = False)
T.insert(END,current_t() +" - Full Calculated Report has been created !\n")
elif op_v==1:
csv_final=csv_final[["Label","Element","Concentration"]]
print(csv_final)
csv_final.to_csv('./ICP_Report.csv', index = False)
T.insert(END,current_t() +" - Report has been created!\n")
#-----------------------------------------------Root window
root= Tk()
root.title(" ICP Data Extractor by Mr.Gee (v. 1.5)")
frame1=LabelFrame(root,padx=10, pady=10, bd=0)
frame1.grid(row=0, column=0)
frame2=LabelFrame(frame1,padx=10, pady=10, bd=0)
frame2.grid(row=0, column=0)
frame3=LabelFrame(root,text=" Log",padx=10, pady=10, bd=0)
frame3.grid(row=1, column=0)
frame_csv(0)
#Event log
T =Text(frame3, height=10, width=100)
T.grid(row=0)
T.insert(END, current_t() + " - Click 'Help' button to read the instructions \n")
#-----------------------------------------------Top windows
def help_w():
help = Toplevel()
help.title(" Help")
help.geometry("500x500")
frame_about=LabelFrame(help,text="Support/Bugs")
frame_about.pack(side=TOP)
frame_h=LabelFrame(help,text="Instructions:", bd=0)
frame_h.pack(side=TOP)
about = Label(
frame_about, text=' Developed by: Guilherme Carvalho \n email: guicampos96@gmail.com \n', font=('helvetica', 10))
about.pack()
help_t = Text(frame_h, width=60, height=25, font=('helvetica', 9), highlightbackground='gray', spacing2=2)
help_t.pack()
help_t.insert(END, """This program extracts and process data from the samples using the ICP exported file
1. Open the CSV file
2. Check the Setup options you want
(noblk= remove the blank samples, nostd= remove the standard samples, nohno3= remove samples that have HNO in their label name, Selec. All Samples = Start the loading screen with all samples selected, Selec. All Samples = Start the loading screen with all elements selected)
3. Load the data
4. Select the samples and the elements that you want in the final report
5. Select the type of report and create the report ("Report" contains concentration column for each sample/element,"Full Report" include all columns, "Calc. Report" calculate the average and standard deviation of the samples by NIR)
The final document will be created in the same folder as the running program
""")
help_t.configure(state=DISABLED)
def setupw():
global setup
global vars
global vare
global f
global op_v
global t
l_names()
setup = Toplevel()
setup.title(" ICP Data setup")
setup.geometry("500x450")
frame_s=LabelFrame(setup,text="Samples", cursor="arrow")
frame_s.place( width=150, x=25)
scrollbary = tk.Scrollbar(frame_s)
scrollbary.pack(side=tk.RIGHT, fill=tk.Y)
scrollbarx = tk.Scrollbar(frame_s, orient=HORIZONTAL)
scrollbarx.pack(side=tk.BOTTOM, fill=tk.X)
checklist = tk.Text(frame_s, width=15)
checklist.pack(side=tk.BOTTOM, expand=True)
vars = []
for i in list_names["Label"]:
var = tk.BooleanVar()
checkbutton = tk.Checkbutton(checklist, text=i, variable=var, bg="white")
vars.append(var)
checklist.window_create("end", window=checkbutton)
checklist.insert("end", "\n")
checklist.config(yscrollcommand=scrollbary.set, cursor="arrow")
checklist.config(xscrollcommand=scrollbarx.set)
if (all_s==1):
checkbutton.select()
scrollbary.config(command=checklist.yview)
scrollbarx.config(command=checklist.xview)
checklist.configure(state="disabled")# disable the widget so users can't insert text into it
frame_e=LabelFrame(setup,text="Elements", cursor="arrow")
frame_e.place( width=150, x=200)
scrollbary = tk.Scrollbar(frame_e)
scrollbary.pack(side=tk.RIGHT, fill=tk.Y)
scrollbarx = tk.Scrollbar(frame_e, orient=HORIZONTAL)
scrollbarx.pack(side=tk.BOTTOM, fill=tk.X)
checklist = tk.Text(frame_e, width=15)
checklist.pack(side=tk.BOTTOM, expand=True)
vare = []
for i in list_elements["Elements"]:
var = tk.BooleanVar()
vare.append(var)
checkbutton = tk.Checkbutton(checklist, text=i, variable=var, bg="white")
checklist.window_create("end", window=checkbutton)
checklist.insert("end", "\n")
checklist.config(yscrollcommand=scrollbary.set, cursor="arrow")
checklist.config(xscrollcommand=scrollbarx.set)
if (all_e==1):
checkbutton.select()
scrollbary.config(command=checklist.yview)
scrollbarx.config(command=checklist.xview)
checklist.configure(state="disabled")#
frame_calc=LabelFrame(setup,text="Final Report", cursor="arrow",padx=10, pady=10)
frame_calc.place( width=120, x=370)
v = tk.IntVar()
v.set(1) # initializing the choice, i.e. Python
options = [("Report", 1),
("Full Report", 2),
("Calc. Report", 3)]
def ShowChoice():
global op_v
op_v=v.get()
tk.Label(frame_calc,
padx = 10).pack()
for op, val in options:
tk.Radiobutton(frame_calc,
text=op,
padx = 10,
variable=v,
command=ShowChoice,
value=val).pack()
ShowChoice()
t_text=Label(frame_calc,text='Replicates id:\n(per line)')
t_text.pack()
t_h=Label(frame_calc,text='?',fg='blue')
t_h.pack()
t =Text(frame_calc, height=10, width=10)
t.pack()
t.insert(END, "A\nB\nC\na\nb\nc\nR\nr")
button_c=Button(frame_calc,text="Create Report", command=filter, height=1, width=225)
button_c.pack()
createToolTip(t_h,"*Only for Calc.Report. It removes the suffixes in the list to calculate the report properly \n(Ex: 1234 A -> 1234 or 98765 aR -> 98765)")
#-----------------------------------------------Root window buttons
b_open= Button(frame2, text="Open CSV file",command= openFile, height=1, width=15)
b_open.grid(row=0,column=0)
b_load= Button(frame2, text="Load data",command= loadcsv, height=1, width=15)
b_load.grid(row=1,column=0)
button_quit=Button(frame2,text="Exit", command=root.quit, height=1, width=15)
button_quit.grid(row=3, column=0)
button_help=Button(frame2,text="Help", command=help_w, height=1, width=15)
button_help.grid(row=2, column=0)
root.mainloop()
|
from django.db import models
# Create your models here.
# class Program(models.Model):
# nama_program = models.CharField(max_length = 255)
# images = models.CharField(max_length = 255)
# deskripsi = models.CharField(max_length = 1000)
# def __str__(self):
# return self.nama_program |
"""
This is the test suite for cspsolver.py.
"""
import os, sys
import collections
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from unittest import TestCase, main, skip
from teachercourse_csp import pref_handler, assign_days_for_course, maps_day_to_class, hours_for_prof, profs_for_courses, add_nodes, assigner, compute_course_start_end, add_unary_constraint, add_binary_constraint
from cspsolver import CSP
def create_csp():
csp = CSP()
csp.add_node(("physics", "John Smith"), [("648", (5, 60), "physics")])
return csp
def create_user_data1():
rooms = ['655', '666', '745a', '745b', '433', '201', '115a', '115b']
room_capacities = {
'655': 30,
'666': 30,
'745a': 22,
'745b': 40,
'433': 31,
'201': 28,
'115a': 35,
'115b': 40
}
# Course details
courses = ['physics', 'chemistry', 'biochemistry', 'biology 1', 'biology 2',
'molecular biology', 'calculus 1', 'calculus 4', 'astrophysics']
course_no_students = {
'physics': 35,
'chemistry': 26,
'biochemistry': 22,
'molecular biology': 20,
'biology 1': 38,
'biology 2': 25,
'calculus 1': 34,
'calculus 4': 21,
'astrophysics': 15,
}
course_mins = {
'physics': 60,
'chemistry': 90,
'biochemistry': 90,
'biology 1': 90,
'biology 2': 60,
'molecular biology': 60,
'calculus 1': 60,
'calculus 4': 60,
'astrophysics': 60
}
course_no_sections = {
'physics': 2,
'chemistry': 2,
'biochemistry': 1,
'biology 1': 2,
'biology 2': 1,
'molecular biology': 1,
'calculus 1': 2,
'calculus 4': 1,
'astrophysics': 1
}
course_days_weekly = {
'physics': 3,
'chemistry': 2,
'biochemistry': 2,
'biology 1': 2,
'biology 2': 3,
'molecular biology': 1,
'calculus 1': 3,
'calculus 4': 2,
'astrophysics': 1
}
# Info about professors
professors = ['John Smith', 'Lisa Jones', 'Mike Williams',
'Tim Simpson', 'Rachel Smith', 'Gregg Woods',
'Simon Valinski', 'Chu Yen', 'Peter Parker',
'Lisa Mullen', 'Elizabeth Walker', 'Brian K. Dickson',
'Jamir Abdullah']
prof_info = {
'John Smith': {
'courses': ['physics', 'chemistry'],
'start_time': 8,
'end_time': 17
},
'Lisa Jones': {
'courses': ['physics'],
'start_time': 9,
'end_time': 18
},
'Mike Williams': {
'courses': ['biology 1'],
'start_time': 9,
'end_time': 15
},
'Tim Simpson': {
'courses': ['calculus 1', 'calculus 4'],
'start_time': 9,
'end_time': 18
},
'Rachel Smith': {
'courses': ['calculus 4', 'biology 2'],
'start_time': 9,
'end_time': 18
},
'Gregg Woods': {
'courses': ['chemistry', 'biochemistry'],
'start_time': 8,
'end_time': 17
},
'Simon Valinski': {
'courses': ['calculus 1', 'physics', 'astrophysics'],
'start_time': 8,
'end_time': 17
},
'Chu Yen': {
'courses': ['calculus 1', 'calculus 4',
'physics', 'astrophysics'],
'start_time': 10,
'end_time': 18
},
'Peter Parker': {
'courses': ['biology 1', 'biology 2', 'biochemistry',
'chemistry', 'molecular biology'],
'start_time': 8,
'end_time': 14
},
'Lisa Mullen': {
'courses': ['calculus 1', 'calculus 4'],
'start_time': 9,
'end_time': 13
},
'Elizabeth Walker': {
'courses': ['calculus 1', 'calculus 4'],
'start_time': 9,
'end_time': 18
},
'Brian K. Dickson': {
'courses': ['calculus 4', 'physics'],
'start_time': 9,
'end_time': 18
},
'Jamir Abdullah': {
'courses': ['chemistry', 'calculus 4'],
'start_time': 10,
'end_time': 18
}
}
user_data = professors, prof_info, rooms, room_capacities, courses, \
course_no_students, course_mins, course_days_weekly
return user_data
def create_user_data():
courses = ["physics", "chemistry"]
professors = ['John Smith', 'Lisa Jones', 'Mike Williams']
rooms = ["648", "649"]
room_capacities = {'648': 30, '649': 40}
course_no_students = {'physics': 35, 'chemistry': 26}
course_mins = {'physics': 60, 'chemistry': 90}
course_no_sections = {'physics': 2, 'chemistry': 2}
course_days_weekly = {'physics': 3, 'chemistry': 2}
prof_info = {'John Smith': {'courses': ['physics', 'chemistry'], 'start_time': 8, 'end_time': 17},
'Lisa Jones': {'courses': ['physics'], 'start_time': 9, 'end_time': 18},
'Mike Williams': {'courses': ['biology 1'], 'start_time': 9, 'end_time': 15}}
user_data = professors, prof_info, rooms, room_capacities, courses, \
course_no_students, course_mins, course_days_weekly
return user_data
class Teachercourse_Csp_TestCase(TestCase):
def setUp(self):
self.csp = create_csp()
self.data = create_user_data()
def tearDown(self):
self.csp = None
self.data = None
def room_has_capacity(self, val, course):
room = val[0]
hour_and_min = val[1]
no_students = self.data[5][course]
return self.data[3][room] >= no_students
def no_class_overlap(self, val1, val2, course1, course2):
"""
Class constraint function for binary
"""
course_min = self.data[5]
hours1, mins1 = val1[1]
hours2, mins2 = val2[1]
course_start1 = hours1 * 6 + mins1 // 10
course_end1 = course_start1 + \
course_min[course1] // 10
course_start2 = hours2 * 6 + mins2 // 10
course_end2 = course_start2 + \
course_min[course2] // 10
# conditions to check if one class starts during other
if course_start1 <= course_start2 < course_end1:
return bool(False)
if course_start2 <= course_start1 < course_end2:
return bool(False)
# soft constraint: non-sequential classes
# get higher weight
if course_start1 == course_end2 or course_start2 == course_end1:
return 2
return bool(True)
def no_time_clash(self, val1, val2, course, dummy):
"""
Class constraint function for binary
"""
course_min = self.data[5]
room1, time1 = val1[0], val1[1]
room2, time2 = val2[0], val2[1]
if room1 != room2:
return bool(True)
hours1, mins1 = time1
hours2, mins2 = time2
start_time1 = hours1 * 6 + mins1 // 10
end_time1 = start_time1 + course_min[course] // 10
start_time2 = hours2 * 6 + mins2 // 10
if start_time1 <= start_time2 < end_time1:
return bool(False)
return bool(True)
def test_pref_handler(self):
self.assertRaises(ValueError, lambda: pref_handler("sun"))
result = pref_handler("tues")
self.assertEqual(result, ["mon", "wed", "thur", "fri"])
result = pref_handler("mon")
self.assertFalse("mon" in result)
self.assertEqual(result, ["wed"] + ["tues", "wed", "thur", "fri"])
result = pref_handler("fri")
self.assertFalse("fri" in result)
self.assertEqual(result, ["thur"] + ["mon", "tues", "wed", "thur"])
def test_assign_days_for_course(self):
result1 = assign_days_for_course(1)
self.assertEqual(len(result1), 1)
self.assertTrue(result1[0] in ["mon", "tues", "wed", "thur", "fri"])
result2 = assign_days_for_course(2)
self.assertEqual(len(result2), 2)
result3 = assign_days_for_course(3)
self.assertEqual(len(result3), 3)
result4 = assign_days_for_course(4)
self.assertEqual(len(result4), 4)
result5 = assign_days_for_course(5)
self.assertEqual(len(result5), 5)
self.assertEqual(result5, ["mon", "tues", "wed", "thur", "fri"])
def test_maps_day_to_class(self):
course = ["physics", "chemistry", "japanese"]
course_days = {"physics": 3, "chemistry": 1, "japanese": 5}
result = maps_day_to_class(course_days, course)
count_physics = 0
count_chemistry = 0
count_japanese = 0
for key in result.keys():
if "physics" in result[key]:
count_physics += 1
if "chemistry" in result[key]:
count_chemistry += 1
if "japanese" in result[key]:
count_japanese += 1
self.assertEqual(count_japanese, 5)
self.assertEqual(count_chemistry, 1)
self.assertEqual(count_physics, 3)
def test_hours_for_prof(self):
prof_info = {'John Smith': {'courses': ['physics', 'chemistry'], 'start_time': 17, 'end_time': 17}}
prof = "John Smith"
self.assertEqual(hours_for_prof(prof_info, prof), set())
prof_info[prof]["start_time"] = 16
self.assertFalse(len(hours_for_prof(prof_info, prof)) > 2)
self.assertEqual(len(hours_for_prof(prof_info, prof)), 2)
prof_info[prof]["start_time"] = 15
self.assertFalse(len(hours_for_prof(prof_info, prof)) > 4)
self.assertEqual(len(hours_for_prof(prof_info, prof)), 4)
prof_info[prof]["start_time"] = 8
self.assertFalse(len(hours_for_prof(prof_info, prof)) > 18)
self.assertEqual(len(hours_for_prof(prof_info, prof)), 18)
def test_profs_for_course(self):
prof_info = {'John Smith': {'courses': [], 'start_time': 15, 'end_time': 17}}
courses = ["physics", "chemistry"]
profs = ['John Smith']
self.assertEqual(profs_for_courses(courses, profs, prof_info), {})
prof_info["John Smith"]["courses"].append("physics")
self.assertFalse("chemistry" in profs_for_courses(courses, profs, prof_info).keys())
self.assertTrue("physics" in profs_for_courses(courses, profs, prof_info).keys())
self.assertEqual(profs_for_courses(courses, profs, prof_info)["physics"], "John Smith")
prof_info["John Smith"]["courses"].append("chemistry")
self.assertFalse("biology" in profs_for_courses(courses, profs, prof_info).keys())
self.assertTrue("physics" in profs_for_courses(courses, profs, prof_info).keys())
self.assertTrue("chemistry" in profs_for_courses(courses, profs, prof_info).keys())
self.assertEqual(profs_for_courses(courses, profs, prof_info)["physics"], "John Smith")
self.assertEqual(profs_for_courses(courses, profs, prof_info)["chemistry"], "John Smith")
prof_info = {'John Smith': {'courses': ["chemistry"], 'start_time': 15, 'end_time': 17},
'Lisa Jones': {'courses': ['physics'], 'start_time': 9, 'end_time': 18}}
profs.append("Lisa Jones")
self.assertFalse(profs_for_courses(courses, profs, prof_info)["physics"] == "John Smith")
self.assertTrue(profs_for_courses(courses, profs, prof_info)["chemistry"] == "John Smith")
self.assertFalse(profs_for_courses(courses, profs, prof_info)["chemistry"] == "Lisa Jones")
self.assertTrue(profs_for_courses(courses, profs, prof_info)["physics"] == "Lisa Jones")
def test_add_node(self):
courses = ["physics", "chemistry"]
profs = ['John Smith', "Lisa Jones"]
prof_info = {'John Smith': {'courses': ["chemistry"], 'start_time': 16, 'end_time': 17},
'Lisa Jones': {'courses': ['physics'], 'start_time': 9, 'end_time': 10}}
prof_assign = profs_for_courses(courses, profs, prof_info)
room_chosen = {}
rooms = ["648"]
add_nodes(courses, rooms, room_chosen, prof_assign, prof_info, self.csp)
nodes = []
for i in range(len(courses)):
nodes.append((courses[i], prof_assign[courses[i]]))
self.assertFalse(("Bob") in self.csp.node_domains.keys())
self.assertTrue(nodes[0] in self.csp.node_domains.keys())
self.assertTrue(nodes[1] in self.csp.node_domains.keys())
self.assertEqual(rooms[0], self.csp.node_domains[nodes[0]][0][0])
self.assertEqual(rooms[0], self.csp.node_domains[nodes[1]][0][0])
def test_compute_course_start_end(self):
hour = 5
min = 0
duration = {"physics": 30}
course = "physics"
result = compute_course_start_end(hour, min, duration, course)
self.assertTrue(len(result) == 2)
self.assertEqual(result, (5 * 6, 5 * 6 + 30))
min = 50
result = compute_course_start_end(hour, min, duration, course)
self.assertEqual(result, (5 * 6 + 5, 5 * 6 + 5 + 30))
def test_add_unary(self):
self.assertTrue(self.csp.unary_constraints == {})
add_unary_constraint(self.csp, self.room_has_capacity)
self.assertFalse(self.csp.unary_constraints == {})
self.assertTrue(('physics', 'John Smith') in self.csp.unary_constraints)
def test_binary(self):
self.csp.add_node(("chemistry", "John Smith"), [("649", (5, 60), "chemistry")])
self.assertTrue(self.csp.binary_constraints == {})
course_map = {}
add_binary_constraint(self.csp, course_map, self.no_class_overlap, self.no_time_clash)
self.assertFalse(self.csp.binary_constraints == {})
self.assertTrue(('physics', 'John Smith') in self.csp.binary_constraints)
def test_assigner(self):
user_data = create_user_data()
solution = assigner(user_data)
self.assertFalse(len(solution) == 0)
self.assertEqual(type(solution), type(collections.defaultdict(lambda: None)))
user_data = create_user_data1()
solution = assigner(user_data)
self.assertFalse(len(solution) == 0)
self.assertEqual(type(solution), type(collections.defaultdict(lambda: None)))
if __name__ == '__main__':
main() |
import requests
import googlemaps
import json
import re
import getKey
def bytesIO_to_obj(bytesIO):
return json.loads(bytesIO.read().decode('UTF-8'))
def get_api_result(start, destination, mode=None):
gmaps = googlemaps.Client(key=getKey.googleKey())
result = gmaps.directions(start, destination, mode=mode, alternatives=True)
return result
def get_route_points(result):
points_list = []
first_point = result['legs'][0]['start_location']
first_point['dist'] = 0
points_list.append(first_point)
steps = result['legs'][0]['steps']
for step in steps:
end = step['end_location']
end['dist'] = step['distance']['value']
points_list.append(end)
return points_list
def get_accidents(points_list):
accidents_per_points = []
for point in points_list:
lat = point['lat']
lng = point['lng']
dist = point['dist']
url = 'https://data.cityofnewyork.us/resource/qiz3-axqb.json?$where=within_circle(location, {}, {}, {})'.format(
lat, lng, dist / 2)
acc_in_point = json.loads(requests.get(url).text)
accidents_per_points.append(acc_in_point)
return accidents_per_points
def score_walking(accidents_per_points):
count = 0
for point_acc in accidents_per_points:
if len(point_acc) == 0:
pass
else:
for acc in point_acc:
if int(acc['number_of_pedestrians_injured']) > 0 or \
int(acc['number_of_pedestrians_killed']) > 0:
count += 0.2
count += int(acc['number_of_pedestrians_injured']) * 0.5
count += int(acc['number_of_pedestrians_killed'])
return count
def score_cycling(accidents_per_points):
count = 0
for point_acc in accidents_per_points:
if len(point_acc) == 0:
pass
else:
for acc in point_acc:
if int(acc['number_of_cyclist_injured']) > 0 or \
int(acc['number_of_cyclist_killed']) > 0:
count += 0.2
count += int(acc['number_of_cyclist_injured']) * 0.5
count += int(acc['number_of_cyclist_killed'])
return count
def score_driving(accidents_per_points):
count = 0
for point_acc in accidents_per_points:
if len(point_acc) == 0:
pass
else:
for acc in point_acc:
count += 0.2
count += int(acc['number_of_persons_injured']) * 0.5
count += int(acc['number_of_persons_killed'])
return count
def calc_score(mode, accidents_per_points):
score = 0
if mode == 'driving' or mode == 'transit':
score = score_driving(accidents_per_points)
elif mode == 'cycling':
score = score_cycling(accidents_per_points)
elif mode == 'walking':
score = score_walking(accidents_per_points)
return score
def rank_routes(mode_dict):
for mode in mode_dict:
scores = []
for route in mode_dict[mode]:
scores.append(route['score'])
best = min(scores)
if best == 0:
best = 1
for route in mode_dict[mode]:
route['percent'] = round(((route['score'] / best) - 1) * 100, 2)
if route['percent'] < 0:
route['percent'] = 0
return mode_dict
def main(start, destination):
modes = ['walking', 'driving', 'transit', 'bicycling']
mode_dict = {}
clean = re.compile('<.*?>')
for mode in modes:
result = get_api_result(start, destination, mode=mode)
mode_dict[mode] = []
for i, route in enumerate(result, 1):
time = route['legs'][0]['duration']['text']
distance = route['legs'][0]['distance']['text']
directions = []
for turn in route['legs'][0]['steps']:
instructions = re.sub(clean, '', turn['html_instructions'])
directions.append((instructions, turn['distance']['text']))
points_list = get_route_points(route)
accidents_per_points = get_accidents(points_list)
score = calc_score(mode, accidents_per_points)
mode_dict[mode].append(
{'route': i, 'points': points_list, 'score': score,
'distance': distance, 'time': time, 'instructions':
directions})
return rank_routes(mode_dict)
if __name__ == '__main__':
start = 'College of staten island'
destination = 'wagner college, staten island'
start = 'college of staten island'
destination = '17 amsterdam place, staten island, new york'
print(main(start, destination))
|
import numpy as np
from sklearn.datasets import load_breast_cancer
# 1. 데이터
datasets = load_breast_cancer()
print(datasets.DESCR)
print(datasets.feature_names)
x = datasets.data
y = datasets.target
print(x.shape) # (569, 30)
print(y.shape) # (569,)
# print(x[:5])
# print(np.max(x), np.min(x))
# print(y)
# 전처리 알어서 해 / train_test_split, minmax
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=45)
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
scaler.fit(x_train)
x_train = scaler.transform(x_train)
x_test = scaler.transform(x_test)
# 2. 모델
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
model = Sequential()
model.add(Dense(256, activation='relu', input_shape=(30,)))
model.add(Dense(64, activation='relu'))
model.add(Dense(64, activation='relu'))
model.add(Dense(32, activation='relu'))
model.add(Dense(32, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
# 마지막 layer의 activation은 sigmoid
# 3. 컴파일, 훈련
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
# 이진 분류인 경우 binary_crossentropy를 loss로 사용
# metrics에 가급적 acc 사용
model.fit(x_train, y_train, epochs=200, batch_size=1, validation_split=0.2, verbose=2)
loss, acc = model.evaluate(x_test, y_test)
print('loss :', loss)
print('acc :', acc)
print('========================')
# 실습 1. acc 0.985 이상 올릴 것
# loss : 0.6525935530662537
# acc : 0.9912280440330505
# 실습 2. predict 출력해 볼 것
y_pred = np.round(model.predict(x_test))
for i in range(10,20):
true = y_test[i]
pred = y_pred[i]
print('실제 :', true, '| 예측 :', pred)
print('========================')
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
from warnings import warn
import csv
import sys
import codecs
import re
from collections import defaultdict
from collections import Counter
from itertools import zip_longest
from operator import itemgetter
from array import array
class defaultlist(list):
def __setitem__(self, index, value):
size = len(self)
if index >= size:
self.extend(0 for _ in range(size, index + 1))
list.__setitem__(self, index, value)
csv_file = sys.argv.pop()
is_number = re.compile(r'\d+')
s_plm = re.compile(r'^.*plm([a-zA-Z0-9]+).*')
is_filter = (
re.compile(r'OPER', re.I),
#re.compile('3DEXP', re.I)
)
scan = defaultdict(dict)
count = Counter()
full_count = dict()
channels = Counter()
header = tuple()
max = 0
with codecs.open(csv_file, 'r') as fh: # , 'cp1252' (si fichier pure windows)
for row in fh:
if is_number.search(row):
#~ next_loop = True
#~ for filter in is_filter:
#~ if not filter.search(row):
#~ next_loop = True
#~ break
#~ else:
#~ next_loop = False
#~ if next_loop:
#~ continue
if not header:
continue
#print(row)
data = dict(zip(header, row.strip().split(';')))
#print(data)
try:
channel = s_plm.sub(r'\1', data['Channel'].lower().split()[1])
poste = data['Poste'].lower()
domain = data['Domaine'].lower()
avion = data['Avion'].lower()
annee = int(data['Année'])
tsusage = int(data['TSUsage Count'])
usage = int(data['Usage Count']) + tsusage
except:
# Cas des postes inutilisés pour qu'il ne soit pas exclu des stats
# stat a 0
avion = 'EMPTY'
domain = 'oper'
annee = 2019
tsusage = 0
usage = 0
# Exclusion
if not domain == 'oper':
continue
if avion == 'batch':
continue
if poste in full_count:
pass
else:
full_count[poste] = {
'years': Counter(),
'total': 0,
'channel': Counter(),
}
full_count[poste]['years'][annee] += usage
full_count[poste]['channel'][channel] += usage
full_count[poste]['total'] += usage
channels[channel] += usage
if full_count[poste]['total'] > max:
max = full_count[poste]['total']
#print(full_count)
#print(channel, poste, domain, avion, annee, tsusage, usage)
#
else:
if row.startswith(';'):
continue
header = tuple(row.strip().split(';'))
#print(header)
i=0
channels_list = channels.keys()
print("#poste_range\ttotal\t", "\t".join(channels_list), "#poste\n\"Lancement par poste plm\"")
step = 10
stop = int(max/step)+1
d = [0 for i in range(0, stop)] #array('i')
for poste, usage in sorted(full_count.items(), key=lambda x: x[1]['total'] , reverse=False):
fmt = "{}\t{}\t"+"{}\t" * len(channels_list)+"#{}"
plm = [usage['channel'][channel] for channel in channels_list]
print(fmt.format(i, usage['total'], *plm, poste))
total = usage['total']
if total > 50:
break
if total == 0:
div_slot = 0
#s= input("Pause [Enter] -->")
#print(div_slot, poste, usage)
else:
div_slot = int(total / step)
if div_slot == 0:
div_slot = 1
d[div_slot] += 1
i += 1
max = 10
print("\n\n#bloc_usage<\tnb_poste\n\"Usage split by step "+str(step)+"\"")
for i, val in enumerate(d):
#if val == 0:
# continue
print("{}\t{}".format(i*step, val))
if i > max:
break
|
from __future__ import unicode_literals
from django import forms
from django.utils.translation import ugettext_lazy as _
import select2rocks
from testproj.testapp.models import Beach, SelectedBeach
class SelectedBeachForm(forms.ModelForm):
class Meta:
exclude = []
model = SelectedBeach
fields = [
'json_beach',
'tastypie_beach_contains',
'tastypie_beach_starts',
'rest_framework_beach',
'rest_framework_beach_multi',
'required_boolean',
]
# Default JS backend
json_beach = select2rocks.Select2ModelChoiceField(
queryset=Beach.objects.all(),
widget=select2rocks.AjaxSelect2Widget(
url_name='json_beaches',
select2_options={
'placeholder': _("Select a beach"),
'ajax': {
'quietMillis': 50
}
}))
# Tastypie JS backend
tastypie_beach_contains = select2rocks.Select2ModelChoiceField(
queryset=Beach.objects.all(),
required=False,
widget=select2rocks.AjaxSelect2Widget(
url_name='api_dispatch_list',
url_kwargs={'resource_name': 'beach', 'api_name': 'v1'},
select2_options={'backend': 'tastypie'}))
# Tastypie JS backend but overrides queryKey
tastypie_beach_starts = select2rocks.Select2ModelChoiceField(
queryset=Beach.objects.all(),
required=False,
widget=select2rocks.AjaxSelect2Widget(
url_name='api_dispatch_list',
url_kwargs={'resource_name': 'beach', 'api_name': 'v1'},
select2_options={'backend': 'tastypie', 'queryKey': 'name__istartswith'}))
# REST Framework backend
rest_framework_beach = select2rocks.Select2ModelChoiceField(
queryset=Beach.objects.all(),
required=False,
widget=select2rocks.AjaxSelect2Widget(
url_name='rest_beach_list',
select2_options={'placeholder': _("Select a beach"),
'backend': 'restframework'}))
# Multi select REST framework
rest_framework_beach_multi = select2rocks.Select2ModelMultipleChoiceField(
queryset=Beach.objects.all(),
required=False,
widget=select2rocks.AjaxSelect2Widget(
url_name='rest_beach_list',
select2_options={'placeholder': _("Select beaches"),
'backend': 'restframework'}))
required_boolean = forms.BooleanField(
required=True,
help_text="Leave blank to raise a form error and test form restoration")
|
from secrets import randbelow
from math import factorial
from math import log2 as ln
"""EUA passwords must...
- start with a letter
- have at least one number
- have at least one lowercase and one uppercase
- be EXACTLY 8 charcters long (WHYYY)
"""
PP_LENGTH = 3
ASCII_PWD_LENGTH = 8
TRIALS = 10
EUA = True
separators = True
titlecase = True
numbers = 2 # 0-9
breakpoints = {}
breakpoints[20] = "very weak" # 0-20
breakpoints[40] = "weak" # 20-40
breakpoints[60] = "good enough" # 40-60
breakpoints[80] = "strong" # 60-80
breakpoints[99999999] = "very strong" # 80+
wordlist = []
with open('google-10000-english-usa-no-swears-medium.txt') as f:
for word in f:
wordlist.append(word.split()[0])
length = len(wordlist)
sep_num = (PP_LENGTH-1)
charset = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789" #!@#$%^&*
perms = length ** PP_LENGTH + (8 ** sep_num) + (9 ** sep_num)
ascii_perms = len(charset) ** ASCII_PWD_LENGTH # 26 upper + 26 lower + 10 numbers
# print(length)
# print("number of words: {}".format(length))
for j in range(TRIALS):
pp = ""
words = []
seps = []
for i in range(PP_LENGTH):
if titlecase:
words.append(wordlist[randbelow(length)].title())
else:
words.append(wordlist[randbelow(length)])
pp = words[0]
for word in words[1:]:
sep = "!@#$%^&*"[randbelow(8)] if separators else ""
pp += sep + word
for i in range(numbers):
pp += str(randbelow(10))
print(pp)
####### Ascii #######
if EUA:
pw = ""
for i in range(ASCII_PWD_LENGTH):
pw += charset[randbelow(len(charset))]
print(pw)
# perms2 = 26 ** 3 + 9 ** 5
passphrase = {"name": "PASSPHRASE", "perms": perms, "length": PP_LENGTH}
ascii_pwd = {"name": "ASCII PASSWORD", "perms": ascii_perms, "length": ASCII_PWD_LENGTH}
# methods = [passphrase, ascii_pwd]
methods = [passphrase]
MY_LAPTOP_GUESS_SPEED = 2652509
for method in methods:
tperms = method['perms']
# print("*"*8,'calulations for method {}'.format(method['name']),"*"*20)
# print(" passphrase possible permutations are {:,}".format(tperms))
entropy = ln(tperms)
# print(" entropy of {} is {:.6}".format(method['name'].lower(), entropy))
if (entropy < 20):
print("very weak")
elif (20 <= entropy < 35):
print("weak")
elif (35 <= entropy < 55):
print("good enough")
elif (55 <= entropy < 80):
print("strong")
else:
print("very strong")
print("{:.4} bits of entropy".format(entropy))
guesses = 10000000 # guesses per second
for i in range(1):
# s = perms // guesses
s = tperms // guesses
m, s = divmod(s, 60)
hr, m = divmod(m, 60)
d, hr = divmod(hr, 24)
yr, d = divmod(d, 365)
cracked = "{:,d} years, {:d} days,\n {:d} hours, {:d} minutes,\n and {:d} seconds".format(yr, d, hr, m, s)
print(" At {:,} guesses per second,\n it would take {} to crack".format(guesses, cracked))
guesses *= 10
|
from django import template
register = template.Library()
@register.filter(name = "check")
def check(value,arg):
if value in arg.all():
return True
else:
return False
|
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
import matplotlib.pyplot as plt
from matplotlib.mlab import griddata
import matplotlib as mpl
import numpy as np
import scipy.ndimage as ndimage
from mpl_toolkits.mplot3d import art3d
fig = plt.figure()
ax = fig.gca(projection='3d')
#amp_phase = np.genfromtxt('Data_1.0Hz.txt',skip_header=1)
data = np.genfromtxt('data.txt',skip_header=1)
z_positive = data[:,0]
z_negative = data[:,1]
y = data[:,3]
x = data[:,2]
xi = np.linspace(min(x), max(x))
yi = np.linspace(min(y), max(y))
X, Y = np.meshgrid(xi, yi)
Z_pos = griddata(x, y, z_positive, xi, yi,interp='linear')
Z_neg = griddata(x, y, z_negative, xi, yi,interp='linear')
Z_neg = ndimage.gaussian_filter(Z_neg, sigma=4.0, order=0)
Z_pos = ndimage.gaussian_filter(Z_pos, sigma=4.0, order=0)
#surf = ax.plot_surface(X, Y, Z2, rstride=1, cstride=1, cmap=cm.jet,
# linewidth=1, antialiased=True)
surf = ax.plot_surface(X, Y, Z_pos, rstride=1, cstride=1, shade=False, linewidth=1, antialiased=True)
wire = ax.plot_wireframe(X, Y, Z_neg, rstride=3, cstride=3, cmap=cm.jet, linewidth=0.1, antialiased=True)
# Retrive data from internal storage of plot_wireframe, then delete it
nx, ny, _ = np.shape(wire._segments3d)
wire_x = np.array(wire._segments3d)[:, :, 0].ravel()
wire_y = np.array(wire._segments3d)[:, :, 1].ravel()
wire_z = np.array(wire._segments3d)[:, :, 2].ravel()
wire.remove()
# create data for a LineCollection
wire_x1 = np.vstack([wire_x, np.roll(wire_x, 1)])
wire_y1 = np.vstack([wire_y, np.roll(wire_y, 1)])
wire_z1 = np.vstack([wire_z, np.roll(wire_z, 1)])
to_delete = np.arange(0, nx*ny, ny)
wire_x1 = np.delete(wire_x1, to_delete, axis=1)
wire_y1 = np.delete(wire_y1, to_delete, axis=1)
wire_z1 = np.delete(wire_z1, to_delete, axis=1)
scalars = np.delete(wire_z, to_delete)
segs = [list(zip(xl, yl, zl)) for xl, yl, zl in \
zip(wire_x1.T, wire_y1.T, wire_z1.T)]
# Plots the wireframe by a a line3DCollection
my_wire = art3d.Line3DCollection(segs, cmap=cm.jet)
my_wire.set_array(scalars)
ax.add_collection(my_wire)
plt.colorbar(my_wire)
#surf = ax.plot_surface(X, Y, Z_pos, rstride=1, cstride=1, cmap=cm.jet, alpha=1, linewidth=1, antialiased=True)
ax.set_zlim3d(np.min(Z_pos), np.max(Z_neg))
#fig.colorbar(surf)
plt.show() |
from enum import Enum
class GameControls(Enum):
"""
"""
UP = "⬆", 0
DOWN = "⬇", 1
LEFT = "⬅", 2
RIGHT = "➡", 3
SWORDS = "⚔", 4
SHIELD = "🛡", 5
FLAG = "🏳", 6
HEARTH = "💗", 7
WORLD = "🗺", 8
@classmethod
def all_emojis(cls):
return_list = []
for emoji in list(cls):
return_list += [emoji.value[0]]
return return_list
@classmethod
def get_emojis(cls, searched):
for emoji in list(cls):
if emoji.value[0] == searched:
return emoji.value[1]
return None
|
__author__ = 'timothyahong'
import re
def extract_cap_values(data_parameters, data_file):
return data_file[:_num_cap_values(data_parameters) - 1]
def extract_other_sensors(data_parameters, data_file):
return data_file[_num_cap_values(data_parameters):]
def _num_cap_values(data_parameters):
count = 0
for sensor_name in data_parameters['Format']:
if re.match('\cap', sensor_name) is not None:
count += 1
return count |
from pyspark import SparkContext, HiveContext
sc = SparkContext(appName = "test")
sqlc = HiveContext(sc)
sqlc.sql("create table if not exists asdf1(id string, name string)")
#sqlc.sql("insert into asdf select * from (select stack(3, 1.1, 'A', 1.2, 'b', 1.3, 'C')) t")
#sqlc.sql("insert into asdf select * from (select 2, 3.14) t")
data = sqlc.sql("select 1, 'AA'")
for i in range(5):
data.write.mode("append").saveAsTable("asdf1")
res = sqlc.sql("select * from asdf")
res.show()
|
from flask import Flask, render_template, redirect, request, url_for, session, flash, send_from_directory
from flask_pymongo import PyMongo
from pymongo import MongoClient
from werkzeug.utils import secure_filename
import os
from os.path import join, dirname, realpath
#Uploading folders Configurations
ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg', 'gif'])
APP_ROOT = os.path.dirname(os.path.abspath(__file__))
UPLOAD_FOLDER = os.path.join(APP_ROOT, 'static/uploads')
app = Flask(__name__)
app.secret_key = 'yoursecretkey'
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
#app.config['MONGO_DBNAME'] = 'isgproj_db'
#app.config['MONGO_URI'] = 'mongodb://achrefothmeni:barcelona10@ds149138.mlab.com:49138/isgproj_db'
#Connecting to MongoDB
mongo = MongoClient('mongodb://achrefothmeni:barcelona10@ds149138.mlab.com:49138/isgproj_db')
db = mongo.isgproj_db
@app.route('/')
def index():
return render_template('home.html')
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/publish', methods=['GET','POST'])
def publish_events():
if 'logged_in' not in session:
return render_template('signup.html',err_log=True)
else:
if request.method == 'POST':
title = request.form['title']
description = request.form['description']
club = request.form['club']
place = request.form['place']
price = request.form['price']
date = request.form['date']
file = request.files['file']
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
# if user does not select file, browser also
# submit a empty part without filename
if file.filename == '':
flash('No selected file')
return redirect(request.url)
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
#return redirect(url_for('uploaded_file', filename=filename))
event = db.events
event.insert({'title':title, 'description':description, 'club':club, 'place':place, 'price':price,'date':date ,'image':file.filename})
return redirect (url_for('events'))
else:
return render_template('publish-events.html')
@app.route('/uploads/<filename>')
def uploaded_file(filename):
return send_from_directory(app.config['UPLOAD_FOLDER'],filename)
@app.route('/events')
def events():
events = db.events.find({})
nb = events.count()
return render_template('events.html', events=events, nb=nb)
@app.route('/signin', methods=['GET','POST'] )
def signin():
if request.method == 'POST':
user = db.users
l_user = user.find_one({'username':request.form['username']})
if l_user:
if l_user['password'] == request.form['password']:
session['username'] = request.form['username']
session['logged_in'] = True
return redirect (url_for('publish_events'))
else:
return render_template('login.html', err=True)
else:
return render_template('login.html', err=True)
else:
return render_template('login.html', err=False)
@app.route('/signup', methods=['GET','POST'] )
def signup():
if request.method == 'POST':
username = request.form['username']
user = db.users
existing_user = user.find_one({'username':username})
if existing_user is None:
user.insert({'username':username,'first_name':request.form['first_name'], 'last_name':request.form['last_name'], 'password': request.form['password']})
return redirect(url_for('signin'))
else:
return render_template('signup.html', err=True)
else:
return render_template('signup.html', err=False)
@app.route('/signout')
def signout():
session.pop('logged_in', None)
flash('You were logged out')
return redirect(url_for('signin'))
if __name__ == '__main__':
app.run(debug=True)
|
import uuid
from django.contrib.gis.db import models
from django.contrib.gis.geos import Point
from users.models import UserProfile
class Source(models.Model):
userprofile = models.ForeignKey(UserProfile, on_delete=models.CASCADE)
source_uuid = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
photo_url = models.URLField()
photo_base64 = models.TextField(default='')
source_type = models.TextField()
lng = models.FloatField()
lat = models.FloatField()
address = models.TextField(default='')
modified_address = models.TextField(default='')
village_name = models.TextField(default='')
description = models.TextField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
qualified_status = models.TextField(default='待審核')
location = models.PointField(geography=True, srid=4326, default='POINT(0.0 0.0)')
def __str__(self):
return "%s %s" % (self.userprofile.phone, str(self.source_uuid))
def save(self, **kwargs):
self.location = Point(float(self.lng), float(self.lat))
super(Source, self).save(**kwargs)
|
#=========================================================================
# pisa_sra_test.py
#=========================================================================
import pytest
import random
import pisa_encoding
from pymtl import Bits
from PisaSim import PisaSim
from pisa_inst_test_utils import *
#-------------------------------------------------------------------------
# gen_basic_test
#-------------------------------------------------------------------------
def gen_basic_test():
return """
mfc0 r1, mngr2proc < 0x00008000
nop
nop
nop
nop
nop
nop
nop
nop
sra r3, r1, 0x03
nop
nop
nop
nop
nop
nop
nop
nop
mtc0 r3, proc2mngr > 0x00001000
nop
nop
nop
nop
nop
nop
nop
nop
"""
#-------------------------------------------------------------------------
# gen_dest_byp_test
#-------------------------------------------------------------------------
def gen_dest_byp_test():
return [
gen_rimm_dest_byp_test( 5, "sra", 0x08000000, 1, 0x04000000 ),
gen_rimm_dest_byp_test( 4, "sra", 0x40000000, 1, 0x20000000 ),
gen_rimm_dest_byp_test( 3, "sra", 0x20000000, 1, 0x10000000 ),
gen_rimm_dest_byp_test( 2, "sra", 0x10000000, 1, 0x08000000 ),
gen_rimm_dest_byp_test( 1, "sra", 0x08000000, 1, 0x04000000 ),
gen_rimm_dest_byp_test( 0, "sra", 0x04000000, 1, 0x02000000 ),
]
#-------------------------------------------------------------------------
# gen_src_byp_test
#-------------------------------------------------------------------------
def gen_src_byp_test():
return [
gen_rimm_src_byp_test( 5, "sra", 0x02000000, 1, 0x01000000 ),
gen_rimm_src_byp_test( 4, "sra", 0x01000000, 1, 0x00800000 ),
gen_rimm_src_byp_test( 3, "sra", 0x00800000, 1, 0x00400000 ),
gen_rimm_src_byp_test( 2, "sra", 0x00400000, 1, 0x00200000 ),
gen_rimm_src_byp_test( 1, "sra", 0x00200000, 1, 0x00100000 ),
gen_rimm_src_byp_test( 0, "sra", 0x00100000, 1, 0x00080000 ),
]
#-------------------------------------------------------------------------
# gen_srcs_dest_test
#-------------------------------------------------------------------------
def gen_srcs_dest_test():
return [
gen_rimm_src_eq_dest_test( "sra", 0x00800000, 1, 0x00400000 ),
]
#-------------------------------------------------------------------------
# gen_value_test
#-------------------------------------------------------------------------
def gen_value_test():
return [
gen_rimm_value_test( "sra", 0x80000000, 0, 0x80000000 ),
gen_rimm_value_test( "sra", 0x80000000, 1, 0xc0000000 ),
gen_rimm_value_test( "sra", 0x80000000, 7, 0xff000000 ),
gen_rimm_value_test( "sra", 0x80000000, 14, 0xfffe0000 ),
gen_rimm_value_test( "sra", 0x80000001, 31, 0xffffffff ),
gen_rimm_value_test( "sra", 0x7fffffff, 0, 0x7fffffff ),
gen_rimm_value_test( "sra", 0x7fffffff, 1, 0x3fffffff ),
gen_rimm_value_test( "sra", 0x7fffffff, 7, 0x00ffffff ),
gen_rimm_value_test( "sra", 0x7fffffff, 14, 0x0001ffff ),
gen_rimm_value_test( "sra", 0x7fffffff, 31, 0x00000000 ),
gen_rimm_value_test( "sra", 0x81818181, 0, 0x81818181 ),
gen_rimm_value_test( "sra", 0x81818181, 1, 0xc0c0c0c0 ),
gen_rimm_value_test( "sra", 0x81818181, 7, 0xff030303 ),
gen_rimm_value_test( "sra", 0x81818181, 14, 0xfffe0606 ),
gen_rimm_value_test( "sra", 0x81818181, 31, 0xffffffff ),
]
#-------------------------------------------------------------------------
# gen_random_test
#-------------------------------------------------------------------------
def gen_random_test():
asm_code = []
for i in xrange(100):
src = Bits( 32, random.randint(0,0xffffffff) )
imm = Bits( 5, random.randint(0,31) )
dest = Bits( 32, src.int() >> imm.uint() )
asm_code.append( gen_rimm_value_test( "sra", src.uint(), imm.uint(), dest.uint() ) )
return asm_code
#-------------------------------------------------------------------------
# test_basic
#-------------------------------------------------------------------------
@pytest.mark.parametrize( "name,test", [
asm_test( gen_basic_test ),
asm_test( gen_dest_byp_test ),
asm_test( gen_src_byp_test ),
asm_test( gen_srcs_dest_test ),
asm_test( gen_value_test ),
asm_test( gen_random_test ),
])
def test( name, test ):
sim = PisaSim( trace_en=True )
sim.load( pisa_encoding.assemble( test() ) )
sim.run()
|
# -*- coding: utf8 -*-
import os
import time
import requests
import datetime
import json
import selenium
from e_postman import send_mail
from selenium import webdriver
from selenium.webdriver.common import desired_capabilities
log_time = (datetime.datetime.utcnow() + datetime.timedelta(hours=+8)).strftime("%Y-%m-%d_%H-%M-%S")
USE_REMOTE_WEBDRIVER = True
send_from = os.environ.get("SEND_FROM")
send_to_list = os.environ.get("SEND_TO").split(",")
smtp_password = os.environ.get("SMTP_PASSWORD")
print(os.environ.get("GITHUB_WORKFLOW"))
print(os.environ.get("GITHUB_RUN_ID"))
print(os.environ.get("GITHUB_RUN_NUMBER"))
print(os.environ.get("GITHUB_ACTION"))
print(os.environ.get("GITHUB_WORKSPACE"))
def log(a_str, slient=False):
if not slient:
print(a_str)
global log_time
with open(f'logs/log_{log_time}.txt', 'a') as f:
print(a_str, file=f)
def need_browser(func):
def webdriver_setup(*args, **kwargs):
# Use remote webdriver or not
if USE_REMOTE_WEBDRIVER:
caps = desired_capabilities.DesiredCapabilities.CHROME
driver = webdriver.Remote(command_executor="http://127.0.0.1:4444/wd/hub",
desired_capabilities=caps)
else:
driver = webdriver.Chrome()
result = func(driver, *args, **kwargs)
driver.close()
return result
return webdriver_setup
@need_browser
def get_all_brands_from_sitemap(driver):
brands_homepage_map = {}
log("<-------------------- get_all_brands_from_sitemap -------------------->")
driver.get("https://www.davincilifestyle.com/sitemap/")
raw_elements = driver.find_elements_by_css_selector("li .menu-item.menu-item-type-custom.menu-item-object-custom>a")
for raw_element in raw_elements:
link = raw_element.get_attribute("href")
if link and link.startswith("https://www.davincilifestyle.com/contracts/") \
and link != "https://www.davincilifestyle.com/contracts/" \
and link != "https://www.davincilifestyle.com/contracts/disclaimer/":
log(f"{raw_element.text} -> {link}", slient=True)
brands_homepage_map[raw_element.text] = link
log("'get_all_brands_from_sitemap' got {} brands".format(len(brands_homepage_map)))
return brands_homepage_map
@need_browser
def get_all_brands_from_contracts(driver):
brands_homepage_map = {}
log("<-------------------- get_all_brands_from_contracts -------------------->")
driver.get("https://www.davincilifestyle.com/contracts/contracts-brands-name/")
raw_elements = driver.find_elements_by_css_selector(".wpb_column.vc_column_container.vc_col-sm-2")
for raw_element in raw_elements:
try:
brand_logo = raw_element.find_element_by_css_selector(".vc_single_image-wrapper.vc_box_outline.vc_box_border_white")
brand_name = raw_element.find_element_by_css_selector("div.wpb_wrapper>p>span")
except selenium.common.exceptions.NoSuchElementException:
continue
link = brand_logo.get_attribute("href")
brand_name = brand_name.text
if isinstance(brand_name, str):
brand_name = brand_name.title()
else:
continue
if link and link.startswith("https://www.davincilifestyle.com/contracts/") and link != "https://www.davincilifestyle.com/contracts/":
log(f"{brand_name} -> {link}", slient=True)
brands_homepage_map[brand_name] = link
log("'get_all_brands_from_contracts' got {} brands".format(len(brands_homepage_map)))
return brands_homepage_map
@need_browser
def get_catalogues(driver, brand, homepage):
log(f"<---------- {brand} ---------->")
book_map = {}
# Visit brand homepage, if brand not exist, will redirect to website homepage.
driver.get(homepage)
time.sleep(3)
if driver.current_url == "https://www.davincilifestyle.com/":
log("Brand: {} -> {} redirected to homepage.".format(brand, homepage))
return False
# Try to get brand logo at brand homepage, if not exist, most likely this brand have no catalogue.
try:
logo_element = driver.find_element_by_css_selector(".vc_single_image-img.lazyloaded")
logo_link = logo_element.get_attribute("src")
os.makedirs("files/{}".format(brand), exist_ok=True)
r = requests.get(logo_link, stream=True, allow_redirects=False)
if r.status_code == 200:
open('files/{}/{}_logo.jpg'.format(brand, brand), 'wb').write(r.content)
log("========== LOGO SUCCESS {} -> {} ==========".format(brand, homepage))
del r
except selenium.common.exceptions.NoSuchElementException:
log("!!!!!!!!!! LOGO FAILED {} -> {} !!!!!!!!!!".format(brand, homepage))
return False
# Click 'catalogues' tab in brand homepage.
titles = driver.find_elements_by_css_selector("li.vc_tta-tab")
for title in titles:
log(title.find_element_by_css_selector("a>span").text)
if title.find_element_by_css_selector("a>span").text == "CATALOGUES":
title.click()
time.sleep(4)
break
books_element = driver.find_elements_by_css_selector("div.wpb_column.vc_column_container.vc_col-sm-3")
book_sum = 0
for book_element in books_element:
try:
book_name = book_element.find_element_by_css_selector('span[style]').text.title()
book_link = book_element.find_element_by_css_selector("a.vc_single_image-wrapper").get_attribute("href").split("#p")[0]
log(f"{book_name} -> {book_link}")
book_map[book_name] = book_link
book_sum += 1
except selenium.common.exceptions.NoSuchElementException:
continue
log(f"<---------- {brand} SUM: {book_sum} ---------->")
return book_map
def check_new_brands(existing_brands, current_brands):
log("<-------------------- check_new_brands -------------------->")
all_new_brands = {}
new_brand_books = {}
for brand_name, brand_link in current_brands.items():
if not brand_link.endswith("/"):
brand_link = brand_link + "/"
if brand_link not in existing_brands.values():
all_new_brands.update({brand_name: brand_link})
log("We do not have brand: {} -> {}".format(brand_name, brand_link), slient=True)
for new_brand_name, new_brand_link in all_new_brands.items():
res = get_catalogues(brand=new_brand_name, homepage=new_brand_link)
if res:
new_brand_books[new_brand_name] = {
"brand": new_brand_name,
"link": new_brand_link,
"catalogues": res
}
log("New brand catalogues need to download:\n{}".format(json.dumps(new_brand_books, indent=2)))
return new_brand_books
def download_img(page_num, book_link, brand, book_name):
img_url = f"{book_link}{page_num}.jpg"
log("☐ " + img_url, slient=False)
retries = 0
while retries < 11:
try:
r = requests.get(img_url, stream=True, allow_redirects=False, timeout=20)
if r.status_code == 200:
open(f'files/{brand}/{book_name}/{page_num}.jpg', 'wb').write(r.content)
del r
while os.path.getsize(f'files/{brand}/{book_name}/{page_num}.jpg') <= 0:
rn = requests.get(img_url, stream=True, allow_redirects=False, timeout=20)
open(f'files/{brand}/{book_name}/{page_num}.jpg', 'wb').write(rn.content)
del rn
log(f'☑ files/{brand}/{book_name}/{page_num}.jpg', slient=True)
return True
elif r.status_code == 301:
log(f"{brand}->{book_name} Max page: {page_num - 1}")
del r
return False
else:
log(r.status_code)
raise ValueError(f"{img_url} got {r.status_code}!")
except Exception:
retries += 1
log("!!!RETRYING!!!")
time.sleep(5)
raise ValueError("Max retries reached")
def download_catalogue(brand, brand_map):
for book_name, book_link in brand_map.items():
log(f"<----- {brand}->{book_name} -----")
os.makedirs(f"files/{brand}/{book_name}", exist_ok=True)
splited_book_link = book_link.split("/")
splited_book_link[-1] = 'files/mobile/'
book_link = "/".join(splited_book_link)
for i in range(1, 9999):
if not download_img(i, book_link, brand, book_name):
break
log(f"----- {brand}->{book_name} ----->")
if __name__ == "__main__":
# NEW BRAND
# Get brand list from sitemap
brands_homepage_map = get_all_brands_from_sitemap()
# Get brand list from contracts and update the sitemap one
for key, value in get_all_brands_from_contracts().items():
if value not in brands_homepage_map.values():
brands_homepage_map.update({key: value})
log("Got {} brands in total.".format(len(brands_homepage_map)))
# Get existing brand list from json file
with open("lists/brands_list.json", "r", encoding='utf-8') as brands_list_file:
existing_brands = json.load(brands_list_file)
# Check all valid brands we do not have, and get their catalogue links
new_brand_books = check_new_brands(existing_brands, brands_homepage_map)
# Download new brands catalogues
for brand_dict in new_brand_books.values():
log("^^^^^^^^^^ {} ^^^^^^^^^^".format(brand_dict["brand"]))
download_catalogue(brand_dict["brand"], brand_dict["catalogues"])
# send_mail(
# send_from,
# send_to_list,
# "[DaVinci]有新的品牌{}".format(brand_dict["brand"]),
# "请将压缩包中的文件直接解压到Da Vinci Lifestyle/根目录下",
# smtp_password,
# files=["files/{}".format(brand_dict["brand"])]
# )
log("vvvvvvvvvv {} vvvvvvvvvv".format(brand_dict["brand"]))
# with open("lists/brands_list.yaml", "w+", encoding='utf-8') as brands_list_file:
# yaml.dump(brands_homepage_map, brands_list_file, Dumper=yaml.RoundTripDumper, explicit_start=True, encoding='utf-8')
|
"""empty message
Revision ID: 25ef2c40583c
Revises: bc92eafed48b
Create Date: 2019-03-06 20:19:18.238849
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '25ef2c40583c'
down_revision = 'bc92eafed48b'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('ZYN',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('university_id', sa.Integer(), nullable=True),
sa.Column('discipline_name', sa.String(length=128), nullable=True),
sa.Column('know', sa.String(length=255), nullable=True),
sa.Column('can', sa.String(length=255), nullable=True),
sa.Column('own', sa.String(length=255), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('education_program',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('university_id', sa.Integer(), nullable=True),
sa.Column('name', sa.String(length=128), nullable=True),
sa.Column('annotation', sa.String(length=255), nullable=True),
sa.Column('know', sa.String(length=255), nullable=True),
sa.Column('can', sa.String(length=255), nullable=True),
sa.Column('own', sa.String(length=255), nullable=True),
sa.Column('themes', sa.String(length=255), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('parts',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('university_id', sa.Integer(), nullable=True),
sa.Column('discipline_name', sa.String(length=128), nullable=True),
sa.Column('parts_id', sa.Integer(), nullable=True),
sa.Column('parts_name', sa.String(length=255), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('parts_themes',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('university_id', sa.Integer(), nullable=True),
sa.Column('discipline_name', sa.String(length=128), nullable=True),
sa.Column('parts_id', sa.Integer(), nullable=True),
sa.Column('themes', sa.String(length=255), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('university',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=128), nullable=True),
sa.Column('program', sa.String(length=128), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('profstandard_education',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=128), nullable=True),
sa.Column('profstandard_id', sa.Integer(), nullable=True),
sa.Column('qualification_level', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['profstandard_id'], ['profstandard.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_profstandard_education_name'), 'profstandard_education', ['name'], unique=True)
op.create_index(op.f('ix_profstandard_education_profstandard_id'), 'profstandard_education', ['profstandard_id'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_profstandard_education_profstandard_id'), table_name='profstandard_education')
op.drop_index(op.f('ix_profstandard_education_name'), table_name='profstandard_education')
op.drop_table('profstandard_education')
op.drop_table('university')
op.drop_table('parts_themes')
op.drop_table('parts')
op.drop_table('education_program')
op.drop_table('ZYN')
# ### end Alembic commands ###
|
from subsf2net import settings
VBULLETIN_CONFIG = {
'tableprefix': settings.cfgTablePrefix,
'superuser_groupids': settings.cfgSuGids,
'staff_groupids': settings.cfgStaffGids,
'standard_groupids': settings.cfgStandardGids,
'paid_groupid': settings.cfgPaidGid,
'not_paid_groupid': settings.cfgNotPaidGid,
'subscription_0': settings.cfgSubsZeroGid,
'banned_groupid': settings.cfgBannedGid
}
if hasattr(settings, 'VBULLETIN_CONFIG'):
VBULLETIN_CONFIG.update(settings.VBULLETIN_CONFIG)
|
import pygame
from pygame.sprite import Group
import game_functions as gf
from settings import Settings
from ship import Ship
from game_stats import Game_stats
from button import Button
from scoreboard import Scoreboard
# def check_events():
# #check for keypress and mouse events
def run_game():
# intialize the game and create the screen object
pygame.init()
ai_settings = Settings()
screen = pygame.display.set_mode(
(ai_settings.screen_width, ai_settings.screen_height))
pygame.display.set_caption('Alien invasion')
# make a ship, groupe to store all bullets and a group to store all aliens
ship = Ship(ai_settings, screen)
bullets = Group()
aliens = Group()
stats = Game_stats(ai_settings)
play_button = Button(ai_settings, screen, 'Play')
sb = Scoreboard(ai_settings, screen, stats)
gf.create_fleet(ai_settings, screen, ship, aliens, sb)
# start the main loop for the game
while True:
gf.check_events(ai_settings, screen, stats, sb,
play_button, ship, aliens, bullets)
if stats.game_active:
ship.update()
bullets.update()
gf.update_bullets(ai_settings, screen, stats,
sb, ship, aliens, bullets)
gf.check_bullet_alien_collision(
ai_settings, screen, stats, sb, ship, aliens, bullets)
gf.update_aliens(ai_settings, stats, sb,
screen, ship, aliens, bullets)
gf.update_sreen(ai_settings, screen, stats, sb, ship,
aliens, bullets, play_button)
run_game()
|
# q5
# list1=['one','two','three','four','five']
# list2=[1,2,3,4,5]
# # k=[]
# # i=0
# # while i<len(list1):
# # k.append([list1[i],list2[i]])
# # i+=1
# # l={}
# # l.update(k)
# # print(l)
# # second method
# k={}
# for i in range(len(list1)):
# k.update({list1[i]:list2[i]})
# print(k)
d=["keemaya","17","pune","maharastra"]
s=["name","age","live","k"]
k={}
for i in range(len(s)):
k.update({s[i]:d[i]})
print(k)
|
class Node(object):
def __init__(self, my_id, my_node_coordinates, my_demand):
self.id = my_id
self.coordinates = my_node_coordinates
self.demand = my_demand
self.visited = False
def __eq__(self, other):
if not isinstance(other, Node):
print("you tried to compare different type of object (correct: Node)")
raise TypeError
else:
if self.id == other.id:
return True
else:
return False
def get_id(self):
return self.id
def get_demand(self):
return self.demand
def get_coordinates(self):
return self.coordinates
def set_visited(self, state):
self.visited = state
class Vehicle(object):
__id = 0
def __init__(self, my_capacity):
self.id = Vehicle.__id
Vehicle.__id += 1
self.capacity = my_capacity
self.route = Route()
self.load = 0
def set_route(self, my_route):
self.route = my_route
def set_route_add_node(self, my_node):
self.route.append_node(my_node)
def get_capacity(self):
return self.capacity
def set_load(self, load):
self.load = load
def add_load(self, cargo):
if self.load + cargo <= self.capacity:
self.load += cargo
else:
raise ValueError
def subtract_load(self, cargo):
if self.load - cargo >= 0:
self.load -= cargo
else:
raise ValueError
def add_node(self, node):
try:
self.add_load(node.demand)
self.set_route_add_node(node)
node.visited = True
except ValueError as e:
raise e
def get_load(self):
return self.load
def get_route(self):
return self.route
def update_load(self):
load = 0
for node in self.route:
load += node.demand
if load > self.capacity:
raise ValueError
self.load = load
class Network(object):
def __init__(self, my_network=None):
if isinstance(my_network, Network):
self.network = my_network
else:
self.network = []
def __iter__(self):
for node in self.network:
yield node
def __getitem__(self, key):
return self.network[key]
def set_network(self, my_network):
self.network = my_network
def append_node(self, node):
self.network.append(node)
def get_node(self, node_id):
for node in self.network:
if node.id == node_id:
return node
raise ValueError
def sort_network_by_demand(self, increasing=True):
self.network.sort(key=lambda node: node.get_demand(), reverse=increasing)
class Fleet(object):
def __init__(self, my_fleet=None):
if isinstance(my_fleet, Fleet):
self.fleet = my_fleet
else:
self.fleet = []
def __iter__(self):
for vehicle in self.fleet:
yield vehicle
def __getitem__(self, key):
return self.fleet[key]
def __len__(self):
return len(self.fleet)
def set_fleet(self, my_fleet):
self.fleet = my_fleet
def append_vehicle(self, vehicle):
self.fleet.append(vehicle)
def get_vehicle(self, id_):
for vehicle in self.fleet:
if vehicle.id == id_:
return vehicle
print("no match found for given id!")
raise ValueError
def search_id_for_node(self, node_id):
for vehicle in self.fleet:
for node in vehicle.route:
if node.id == node_id:
return vehicle.id
else:
return None
def search_for_node(self, node_id):
for vehicle in self.fleet:
for node in vehicle.route:
if node.id == node_id:
return vehicle
else:
return None
class Route(object):
def __init__(self):
self.route = []
def __iter__(self):
for node in self.route:
yield node
def __getitem__(self, key):
return self.route[key]
def __len__(self):
return len(self.route)
def __bool__(self):
return bool(self.route)
def set_route(self, route):
self.route = route
def append_node(self, node):
if node not in self.route or node.id is 1: # depot can be visited multiple time
self.route.append(node)
else:
raise ValueError
def pop_node_id(self, id_=None):
if id_ is None:
return self.route.pop()
else:
for i, node in enumerate(self.route):
if node.id == id_:
return self.route.pop(i)
raise ValueError # if node not found
def get_node_position(self, id_):
for i, node in enumerate(self.route):
if node.id == id_:
return i
return None
def set_node(self, index, node):
if not isinstance(node, Node):
print("given argument is not a Node!")
raise TypeError
if node not in self.route:
self.route[index] = node
else:
raise ValueError
def insert_node(self, index, node):
if not isinstance(node, Node):
print("given argument is not a Node!")
raise TypeError
if node not in self.route:
self.route.insert(index, node)
else:
print("node.id: " + str(node.id) + " already in the route!")
raise ValueError
def switch_nodes_internaly(self, index1, index2):
temp = self.route[index1]
self.route[index1] = self.route[index2]
self.route[index2] = temp
def get_route(self):
return self.route
|
'''
Copyright (C) 2017-2020 Bryant Moscon - bmoscon@gmail.com
Please see the LICENSE file for the terms and conditions
associated with this software.
'''
import sys
from setuptools import setup
from setuptools import find_packages
from setuptools.command.test import test as TestCommand
ld = None
try:
import pypandoc
ld = pypandoc.convert_file('README.md', 'rst', format='markdown_github')
except BaseException:
pass
class Test(TestCommand):
def run_tests(self):
import pytest
errno = pytest.main(['tests/'])
sys.exit(errno)
setup(
name="cryptofeed",
version="1.4.0",
author="Bryant Moscon",
author_email="bmoscon@gmail.com",
description=("Cryptocurrency feed handler and synthetic NBBO feed"),
long_description=ld,
long_description_content_type='text/x-rst',
license="XFree86",
keywords=["cryptocurrency", "bitcoin", "btc", "feed handler", "market feed", "market data"],
url="https://github.com/bmoscon/cryptofeed",
packages=find_packages(exclude=['tests']),
package_data={'': ['rest/config.yaml']},
cmdclass={'test': Test},
classifiers=[
"Development Status :: 4 - Beta",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8"
],
tests_require=["pytest"],
install_requires=[
"requests>=2.18.4",
"websockets>=7.0",
"sortedcontainers>=1.5.9",
"pandas",
"pyyaml",
"aiohttp",
"aiodns",
"cchardet",
"aiofile",
'yapic.json>=1.4.3'
],
extras_require={
'redis': ['aioredis'],
'arctic': ['arctic'],
'zmq': ['pyzmq'],
'mongo': ['motor'],
'kafka': ['aiokafka'],
'rabbit': ['aio_pika', 'pika'],
'postgres': ['asyncpg']
},
)
|
# -*- coding: utf-8 -*-
import time
from datetime import datetime
from dateutil.relativedelta import relativedelta
from openerp.osv import fields, osv
import openerp.addons.decimal_precision as dp
from tools.translate import _
class account_asset_asset_depreciar(osv.osv):
_name = 'account.asset.asset.depreciar'
_columns = {
'state_d' : fields.selection([('draft', 'Borrador'), ('open', 'Abierto'), ('close', 'Cerrado')], 'Filtro por estado de los Activos',required = True,store=False),
'tipo_plan' : fields.selection([('ifrs', 'IFRS'),('tributario', 'TRIBUTARIO')],'Plan Contable', size=50, required=True,store=False),
}
def compute_depreciation_board(self, cr, uid, ids, context=None):
obj = self.pool.get('account.asset.asset')
depreciation_lin_obj = self.pool.get('account.asset.depreciation.line')
currency_obj = self.pool.get('res.currency')
plan_config_obj = self.pool.get('account.asset.config.plan')
obj_state = self.browse(cr, uid, ids, context=None)
print obj_state[0]['state_d']
print obj_state[0]['tipo_plan']
cr.execute("""SELECT asset_id FROM account_asset_config_plan
WHERE tipo_plan = '%s'
AND asset_id in (select id from account_asset_asset WHERE state ='%s')""" % (obj_state[0]['tipo_plan'],obj_state[0]['state_d'] ) )
ids= cr.fetchall()
if ids and ids[0]:
ids = map(lambda x:x[0],ids)
context['tipo_cont'] = obj_state[0]['tipo_plan']
for i in ids:
obj.compute_depreciation_board_father(cr, uid, [i], context)
else:
raise osv.except_osv(('Warning'), ("No se encontraron activos con filtros seleccionados"))
account_asset_asset_depreciar()
|
#!/usr/bin/env python
def try_to_change(n):
n = 'Green George'
name = 'Emma Friord'
try_to_change(name)
print name
|
N, M = map( int, input().split())
A = [ list( map( int, input().split())) for _ in range(N)]
A.sort()
ans = 0
now = 0
for i in range(N):
if now + A[i][1] <= M:
ans += A[i][0]*A[i][1]
now += A[i][1]
else:
ans += A[i][0]*(M - now)
break
print( ans)
|
from lib.randomizer import get_random_first_name, get_random_last_name
REGISTRATION_DATA = {
'first_name': get_random_first_name(),
'last_name': get_random_last_name(),
'company_name': 'Test Company',
'country': 'Україна',
'city': 'Test city',
'phone': '0000000000',
'template': 'Ремонт мобильных / смартфонов',
}
|
#!/usr/bin/env python
# coding: utf-8
# # 이거 뭐하는 거지...?
#
# - 주피터 노트북에서 텐서플로우를 사용해본데...
# In[1]:
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import tensorflow as tf
a = tf.constant(100)
b = tf.constant(50)
add_op = a + b
v = tf.Variable(0)
let_op = tf.assign(v, add_op)
# In[2]:
sess = tf.Session()
# In[3]:
sess.run(tf.global_variables_initializer())
# In[4]:
sess.run(let_op)
# In[5]:
print(sess.run(v))
# In[ ]:
|
import hashlib
from onegov.activity.models import Activity, Attendee, Booking, Occasion
from onegov.user import User
from sqlalchemy import func
class Scoring:
""" Provides scoring based on a number of criteria.
A criteria is a callable which takes a booking and returns a score.
The final score is the sum of all criteria scores.
"""
def __init__(self, criteria=None):
self.criteria = criteria or [PreferMotivated()]
def __call__(self, booking):
return sum(criterium(booking) for criterium in self.criteria)
@classmethod
def from_settings(cls, settings, session):
scoring = cls()
# always prefer groups
scoring.criteria.append(PreferGroups.from_session(session))
if settings.get('prefer_in_age_bracket'):
scoring.criteria.append(
PreferInAgeBracket.from_session(session))
if settings.get('prefer_organiser'):
scoring.criteria.append(
PreferOrganiserChildren.from_session(session))
if settings.get('prefer_admins'):
scoring.criteria.append(
PreferAdminChildren.from_session(session))
return scoring
@property
def settings(self):
classes = {c.__class__ for c in self.criteria}
settings = {}
if PreferInAgeBracket in classes:
settings['prefer_in_age_bracket'] = True
if PreferOrganiserChildren in classes:
settings['prefer_organiser'] = True
if PreferAdminChildren in classes:
settings['prefer_admins'] = True
return settings
class PreferMotivated:
""" Scores "motivated" bookings higher. A motivated booking is simply a
booking with a higher priority (an attendee would favor a booking he's
excited about.)
"""
@classmethod
def from_session(cls, session):
return cls()
def __call__(self, booking):
return booking.priority
class PreferInAgeBracket:
""" Scores bookings whose attendees fall into the age-bracket of the
occasion higher.
If the attendee falls into the age-bracket, the score is 1.0. Each year
difference results in a penalty of 0.1, until 0.0 is reached.
"""
def __init__(self, get_age_range, get_attendee_age):
self.get_age_range = get_age_range
self.get_attendee_age = get_attendee_age
@classmethod
def from_session(cls, session):
attendees = None
occasions = None
def get_age_range(booking):
nonlocal occasions, session
if occasions is None:
occasions = {
o.id: o.age
for o in session.query(Occasion.id, Occasion.age)
.filter(Occasion.period_id == booking.period_id)}
return (
occasions[booking.occasion_id].lower,
occasions[booking.occasion_id].upper - 1
)
def get_attendee_age(booking):
nonlocal attendees, session
if attendees is None:
attendees = {a.id: a.age for a in session.query(
Attendee.id, Attendee.age)}
return attendees[booking.attendee_id]
return cls(get_age_range, get_attendee_age)
def __call__(self, booking):
min_age, max_age = self.get_age_range(booking)
attendee_age = self.get_attendee_age(booking)
if min_age <= attendee_age and attendee_age <= max_age:
return 1.0
else:
difference = min(
abs(min_age - attendee_age),
abs(max_age - attendee_age)
)
return 1.0 - min(1.0, float(difference) / 10.0)
class PreferOrganiserChildren:
""" Scores bookings of children higher if their parents are organisers.
This is basically an incentive to become an organiser. A child whose parent
is an organiser gets a score of 1.0, if the parent is not an organiser
a score 0.0 is returned.
"""
def __init__(self, get_is_organiser_child):
self.get_is_organiser_child = get_is_organiser_child
@classmethod
def from_session(cls, session):
organisers = None
def get_is_organiser_child(booking):
nonlocal organisers
if organisers is None:
organisers = {
a.username
for a in session.query(Activity.username)
.filter(Activity.id.in_(
session.query(Occasion.activity_id)
.filter(Occasion.period_id == booking.period_id)
.subquery()
))
}
return booking.username in organisers
return cls(get_is_organiser_child)
def __call__(self, booking):
return self.get_is_organiser_child(booking) and 1.0 or 0.0
class PreferAdminChildren:
""" Scores bookings of children higher if their parents are admins. """
def __init__(self, get_is_association_child):
self.get_is_association_child = get_is_association_child
@classmethod
def from_session(cls, session):
members = None
def get_is_association_child(booking):
nonlocal members
if members is None:
members = {
u.username for u in session.query(User)
.filter(User.role == 'admin')
.filter(User.active == True)
}
return booking.username in members
return cls(get_is_association_child)
def __call__(self, booking):
return self.get_is_association_child(booking) and 1.0 or 0.0
class PreferGroups:
""" Scores group bookings higher than other bookings. Groups get a boost
by size:
- 2 people: 1.0
- 3 people: 0.8
- 4 people: 0.6
- more people: 0.5
This preference gives an extra boost to unprioritised bookings, to somewhat
level out bookings in groups that used no star (otherwise a group
might be split up because someone didn't star the booking).
Additionally a unique boost between 0.010000000 to 0.099999999 is given to
each group depending on the group name. This should ensure that competing
groups generally do not have the same score. So an occasion will generally
prefer the members of one group over members of another group.
"""
def __init__(self, get_group_score):
self.get_group_score = get_group_score
@classmethod
def from_session(cls, session):
group_scores = None
def unique_score_modifier(group_code):
digest = hashlib.new(
'sha1',
group_code.encode('utf-8'),
usedforsecurity=False
).hexdigest()[:8]
number = int(digest, 16)
return float('0.0' + str(number)[:8])
def get_group_score(booking):
nonlocal group_scores
if group_scores is None:
query = session.query(Booking).with_entities(
Booking.group_code,
func.count(Booking.group_code).label('count')
).filter(
Booking.group_code != None,
Booking.period_id == booking.period_id
).group_by(
Booking.group_code
).having(
func.count(Booking.group_code) > 1
)
group_scores = {
r.group_code:
max(.5, 1.0 - 0.2 * (r.count - 2))
+ unique_score_modifier(r.group_code)
for r in query
}
return group_scores.get(booking.group_code, 0)
return get_group_score
def __call__(self, booking):
offset = 0 if booking.priority else 1
return self.get_group_score(booking) + offset
|
from model.user_account import UserAccount
class Guest(UserAccount):
def __init__(self, name, pwd):
UserAccount.__init__(self, name, "guest", pwd, "guest", 2)
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 18 22:28:27 2017
@author: Gavrilov
"""
class Coordinate(object):
def __init__(self, x, y):
self.x = x
self.y = y
def distance(self, other): #methods ways of manipulating attributes
#we need to have the first argument be self,
#because it's going to refer to a particular instance.
#other is another parameter
#the “.” operator is used to access any attribute
#think of it as a function call
x_diff_sq = (self.x - other.x)**2 #other.x saying, get the value of other, it points to a frame,
#because it's an instance of a coordinate, and in that frame,
#I defined variable bindings for x and y
y_diff_sq = (self.y - other.y)**2
return(x_diff_sq + y_diff_sq)**0.5 #**0.5 is same as square root
c = Coordinate(3,4)
origin = Coordinate(0,0)
print(c.distance(origin)) #conventional way: object on which to call a method, name of the method
#parameters not including self (self is implied to be c)
#Python automatically provides C as the first argument to this distance function.
#because value of distance is a method __init__
print(Coordinate.distance(c, origin)) #different way: using the class to get to the method
#name of a class, name of method, parameters including an object
#on which to call the method representing self
|
#!/usr/bin/env python
"""Implementation of soccer goal detection
Goal is represented by 2 orange/red cones (pylons)
"""
# For Python2/3 compatibility
from __future__ import print_function
from __future__ import division
import sys
import os
import math
import rospy
import angles
import tf
from cv_bridge import CvBridge, CvBridgeError
from sensor_msgs.msg import Image
from geometry_msgs.msg import TransformStamped
import cv2
#print(cv2.__version__)
import numpy as np
import xml.etree.ElementTree as ET
__author__ = "Eric Dortmans"
class GoalDetection:
"""This class detects a soccer goal consisting of 2 training cones (pylons)
A transform is published from the camera to the goal.
"""
def __init__(self, camera):
self.process_image_setup()
self.bridge = CvBridge()
self.image_subscriber = rospy.Subscriber("image_raw", Image, self.on_image_message,
queue_size=1, buff_size=2**24) # large buff_size for lower latency
self.transform_publisher = rospy.Publisher("goal", TransformStamped, queue_size=1)
self.transform_broadcaster = tf.TransformBroadcaster()
def to_cv2(self, image_msg):
"""Convert ROS image message to OpenCV image
"""
try:
image = self.bridge.imgmsg_to_cv2(image_msg, 'bgr8')
except CvBridgeError as e:
print(e)
return image
def to_imgmsg(self, image):
"""Convert OpenCV image to ROS image message
"""
try:
image_msg = self.bridge.cv2_to_imgmsg(image, "bgr8")
except CvBridgeError as e:
print(e)
return image_msg
def on_image_message(self, image_msg):
"""Process received ROS image message
"""
self.image = self.to_cv2(image_msg)
self.timestamp = image_msg.header.stamp
self.frame_id = image_msg.header.frame_id
self.process_image()
def process_image_setup(self):
"""Setup for image processing.
This code will run only once to setup image processing.
"""
self.display = rospy.get_param('~display', True)
self.goal_frame_id = rospy.get_param('~goal_frame_id', 'goal')
# Optical center coordinates
(self.center_x, self.center_y) = (None, None)
# Goal coordinates
(self.goal_x, self.goal_y, self.goal_theta) = (0, 0, 0)
def process_image(self):
"""Process the image
This code is run for reach image
"""
# Size of original image
width = self.image.shape[1]
height = self.image.shape[0]
# Make copy of image for display purposes
display_img = self.image.copy()
# Determine optical center
if self.center_x == None or self.center_y == None:
Camera.detect_optical_center(self.image) # optional
self.center_x = int(round(Camera.center[0]))
self.center_y = int(round(Camera.center[1]))
# Draw crosshair
north = (self.center_x, height-1)
south = (north[0], 0)
east = (width-1, self.center_y)
west = (0, east[1])
cv2.line(display_img, south, north, (0,255,0))
cv2.line(display_img, west, east, (0,255,0))
cv2.circle(display_img, (self.center_x, self.center_y), 0, (0, 255, 0), 5)
# Detect soccer training cones (pylons)
hsv = cv2.cvtColor(self.image, cv2.COLOR_BGR2HSV)
color_min = np.array([ 0, 150, 150], np.uint8) # min HSV color
color_max = np.array([20, 255, 255], np.uint8) # max HSV color
blobs = Utils.detect_colored_blobs(hsv, color_min, color_max)
#cv2.imshow('blobs', blobs)
pylons = cv2.bitwise_and(self.image, self.image, mask=blobs)
#cv2.imshow('pylons', pylons)
contours = Utils.find_contours(blobs)
if len(contours) > 1: # we have seen pylons
cnts = Utils.largest_contours(contours, number=2)
# Calculate and draw position of both goal posts (pylons)
# OPTION1: Center of contour
#pylon1 = Utils.center_of_contour(cnts[0])
#pylon2 = Utils.center_of_contour(cnts[1])
# OPTION2: Closest point on contour
def closest_point(point, points):
point = np.asarray(point)
points = np.asarray(points)
dist_2 = np.sum((points - point)**2, axis=1)
return np.argmin(dist_2)
pylon1_points = cnts[0].ravel().reshape((len(cnts[0]),2))
pylon2_points = cnts[1].ravel().reshape((len(cnts[1]),2))
pylon1 = pylon1_points[closest_point((self.center_x, self.center_y), pylon1_points)]
pylon2 = pylon2_points[closest_point((self.center_x, self.center_y), pylon2_points)]
# OPTION3: Center of rotated rectangle
pylon1_rect = cv2.minAreaRect(cnts[0]) # rect = ( (center_x,center_y), (width,height), angle )
pylon2_rect = cv2.minAreaRect(cnts[0]) # rect = ( (center_x,center_y), (width,height), angle )
pylon_radius = int(round((pylon1_rect[1][0] + pylon2_rect[1][0]) / 4.0))
#pylon1_box = np.int0(cv2.boxPoints(pylon1_rect))
#cv2.drawContours(display_img,[pylon1_box],0,(0,0,255),1)
#pylon2_box = np.int0(cv2.boxPoints(pylon2_rect))
#cv2.drawContours(display_img,[pylon2_box],0,(0,0,255),1)
#pylon1 = np.int0(pylon1_rect[0])
#pylon2 = np.int0(pylon2_rect[0])
cv2.circle(display_img, tuple(pylon1), 0, (0, 255, 0), 5)
cv2.circle(display_img, tuple(pylon2), 0, (0, 255, 0), 5)
if pylon1[0] > pylon2[0]:
pylon1, pylon2 = pylon2, pylon1
# Draw goal-line
cv2.line(display_img, tuple(pylon1), tuple(pylon2), (0,255,0), 1)
# Calculate the center of the goal in pixel coordinates
goal = np.round(Utils.middle_between(pylon1, pylon2)).astype("int")
goal_x = goal[0]
goal_y = goal[1]
# Draw line from optical center to goal center
cv2.circle(display_img, tuple(goal), 0, (0, 0, 255), 5)
cv2.line(display_img, (self.center_x, self.center_y), tuple(goal), (0,0,255), 1)
# Calculate the goal center real world coordinates
goal_relative_x = goal_x - self.center_x
goal_relative_y = goal_y - self.center_y
goal_rho, goal_phi = Utils.cart2pol(goal_relative_x, goal_relative_y)
goal_rho += pylon_radius # correct for radius of object
goal_real_phi = goal_phi
goal_real_rho = Camera.pixels2meters(goal_rho)
goal_x_cart, goal_y_cart = Utils.pol2cart(goal_real_rho, goal_real_phi)
self.goal_x, self.goal_y = Camera.image2robot(goal_x_cart, goal_y_cart)
# Calculate goal orientation (theta)
goal_normal_relative = Utils.perpendicular((pylon1[0] - pylon2[0], pylon1[1] - pylon2[1]))
goal_normal = (goal[0] + goal_normal_relative[0], goal[1] + goal_normal_relative[1])
cv2.line(display_img, tuple(goal), tuple(goal_normal), (0,255,0), 1)
x_axis = (0, -self.center_y)
self.goal_theta = Utils.angle_between(x_axis, goal_normal_relative)
#print("goal_theta", self.goal_theta)
# Publish transform from camera to goal
transform_msg = TransformStamped()
transform_msg.header.stamp = rospy.Time.now()
transform_msg.header.frame_id = self.frame_id
transform_msg.child_frame_id = self.goal_frame_id
transform_msg.transform.translation.x = self.goal_x
transform_msg.transform.translation.y = self.goal_y
transform_msg.transform.translation.z = 0.0
quaternion = tf.transformations.quaternion_from_euler(0, 0, self.goal_theta)
transform_msg.transform.rotation.x = quaternion[0]
transform_msg.transform.rotation.y = quaternion[1]
transform_msg.transform.rotation.z = quaternion[2]
transform_msg.transform.rotation.w = quaternion[3]
self.transform_publisher.publish(transform_msg)
#self.transform_broadcaster.sendTransform(transform_msg)
self.transform_broadcaster.sendTransform(
(self.goal_x, self.goal_y, 0.0),
(quaternion[0], quaternion[1], quaternion[2], quaternion[3]),
rospy.Time.now(),
self.goal_frame_id,
self.frame_id
)
# Show augmented image
if self.display:
cv2.imshow("image", display_img)
cv2.waitKey(1)
class Camera:
""" Camera parameters
"""
center = None # Optical center
def __init__(self, params_file):
root = ET.parse(params_file).getroot()
# camera/center/x,y
center_x = float(root.findall("./center/x")[0].text)
center_y = float(root.findall("./center/y")[0].text)
Camera.center = np.array([center_x, center_y])
# camera/coefficients/c0,c1,c2,c3
c0 = float(root.findall("./coefficients/c0")[0].text)
c1 = float(root.findall("./coefficients/c1")[0].text)
c2 = float(root.findall("./coefficients/c2")[0].text)
c3 = float(root.findall("./coefficients/c3")[0].text)
Camera.coefficients = np.array([c0, c1, c2, c3])
@classmethod
def detect_optical_center(cls, image):
""" detect optical center of omnivision camera
"""
height, width = image.shape[:2]
# first estimate
center_x, center_y = width//2, height//2
# try to give better estimate
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#gray = cv2.medianBlur(gray,5)
#gray = Utils.find_edges(gray)
#cv2.imshow("edges", gray)
circles = Utils.find_circles(gray, param2=40, minDist=100, minRadius=180, maxRadius=300)
r_max = 0
if circles is not None:
# find biggest circle
for (x, y, r) in circles:
if r > r_max:
r_max = r
center_x, center_y = x, y
#display_img = image.copy()
#cv2.circle(display_img, (center_x, center_y), r_max, (0, 255, 0), 1)
#cv2.circle(display_img, (center_x, center_y), 0, (0, 255, 0), 3)
#cv2.imshow("optical_center", display_img)
Camera.center = np.array([center_x, center_y])
@classmethod
def pixels2meters(cls, rho):
""" Mapping of image radial distance to real world radial distance.
"""
polynome = np.array([1, rho, rho**2, rho**3])
return polynome.dot(cls.coefficients)
@staticmethod
def image2robot(x, y):
""" Transform from image to robot coordinates
Image Robot
+---x x
| |
y y---+
"""
return -y, -x
class Utils:
""" Utility methods
"""
@staticmethod
def cart2pol(x, y):
""" Carthesian to Polar coordinates
"""
rho = np.sqrt(x**2 + y**2)
phi = np.arctan2(y, x)
return (rho, phi)
@staticmethod
def pol2cart(rho, phi):
""" Polar to Carthesian coordinates
"""
x = rho * np.cos(phi)
y = rho * np.sin(phi)
return (x, y)
@staticmethod
def perpendicular(v) :
""" Vector perpendicular to input vector
"""
vp = np.empty_like(v)
vp[0] = -v[1]
vp[1] = v[0]
return vp
@staticmethod
def normalize(v):
""" Normalize vector to unit magnitude
"""
v = np.array(v)
return v/np.linalg.norm(v)
@staticmethod
def middle_between(v1, v2) :
""" Middle between two vector
"""
v1 = np.array(v1)
v2 = np.array(v2)
vm = (v1 + v2) / 2.0
return vm
@staticmethod
def angle_between(v1, v2):
""" Angle between two vectors
"""
v1 = np.array(v1)
v2 = np.array(v2)
## Inner angle, no sign
#cosang = np.dot(v1, v2)
#sinang = np.linalg.norm(np.cross(v1, v2))
#return np.arctan2(sinang, cosang)
## Inner angle, no sign
#v1_u = Utils.normalize(v1)
#v2_u = Utils.normalize(v2)
#return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))
a1 = np.arctan2(v1[1],v1[0])
a2 = np.arctan2(v2[1],v2[0])
return angles.shortest_angular_distance(a2, a1)
@staticmethod
def find_contours(image, mode=cv2.RETR_EXTERNAL):
"""find contours in image
"""
(_, cnts, hierarchy) = cv2.findContours(image.copy(), mode, cv2.CHAIN_APPROX_SIMPLE)
return cnts
@staticmethod
def largest_contours(cnts, number=1):
"""Select largest contour(s)
"""
largest = sorted(cnts, key=cv2.contourArea, reverse=True)[:number]
return largest
@staticmethod
def center_of_contour(cnt):
""" Calculate centroid of contour
"""
moments = cv2.moments(cnt)
center_x = int(moments["m10"] / moments["m00"])
center_y = int(moments["m01"] / moments["m00"])
if moments["m00"] != 0:
center_x = int(moments["m10"] / moments["m00"])
center_y = int(moments["m01"] / moments["m00"])
else:
center_x, center_y = 0, 0
return center_x, center_y
@staticmethod
def detect_colored_blobs(image, color_min, color_max):
blobs = cv2.inRange(image, color_min, color_max)
if (blobs[0, 0] == 255): blobs = cv2.bitwise_not(blobs)
return blobs
@staticmethod
def find_edges(image):
"""find edges in image
"""
sigma=0.33
v = np.median(image)
lower = int(max(0, (1.0 - sigma) * v))
upper = int(min(255, (1.0 + sigma) * v))
edged = cv2.Canny(image, lower, upper)
return edged
@staticmethod
def find_circles(image, minDist=1, param1=50, param2=30, minRadius=0, maxRadius=0):
"""Find circles in image
"""
# detect circles in the image
circles = cv2.HoughCircles(image.copy(), cv2.HOUGH_GRADIENT, 1,
minDist=minDist,
param1=param1,
param2=param2,
minRadius=minRadius,
maxRadius=maxRadius)
# convert the (x, y) coordinates and radius of the circles to integers
if circles is not None:
circles = np.round(circles[0, :]).astype("int")
return circles
return circles
def main(args):
rospy.init_node('goal_detection', anonymous=True)
camera_params = rospy.get_param('~camera_params', None)
camera = Camera(camera_params)
goal_detection = GoalDetection(camera)
rospy.spin()
cv2.destroyAllWindows()
if __name__ == '__main__':
main(sys.argv)
|
import cv2
# read the image
img = cv2.imread("20190417_143055.jpg")
# resize the image to 500 x 500
img = cv2.resize(img, (500, 500))
# convert BGR to greyscaale image
grey = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# invert the grey image
grey_inv = 255 - grey
# add guassian blur to it
grey_inv_blur = cv2.GaussianBlur(grey_inv, (51, 51), 0)
# invert the blurred image
inv_blur = 255 - grey_inv_blur
# divide grey by inv_blur
sketch = cv2.divide(grey, inv_blur, scale=256.0)
cv2.imshow('image', img)
cv2.imshow('sketch', sketch)
cv2.waitKey(0) |
import os.path
def download_file(url, filepath=None):
if filepath:
if filepath.endswith('/'):
filename = url.split('/')[-1]
filepath = os.path.join(filepath, filename)
else:
filepath = url.split('/')[-1]
r = requests.get(url, stream=True)
with open(filepath, 'wb') as fp:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
fp.write(chunk)
return filepath |
#!/usr/bin/python3
# -*- coding:utf8 -*-
# Author : Arthur Yan
# Date : 2019-02-16 15:50:14
# Description : 百钱百鸡
# cocks 1-5
# hens 1-3
# chickens 3-1
# cocks + hens + chickens = 100
# 5*cocks + 3*hens + 1/3*chickens =100
cocks = 100 // 5
hens = 100 // 3
chickens = 100
for cock in range(cocks):
for hen in range(hens):
for chicken in range(chickens):
animal_sum = cock + hen + chicken
price_sum = 5*cock + 3*hen + 1/3*chicken
if (animal_sum == 100 and price_sum == 100):
print("cocks:{0},hens:{1},chicken:{2}"
.format(cock, hen, chicken))
continue
|
#character identification
ch=input("Enter a character:")
asc=ord(ch)
if asc>=48 and asc<=57:
print("%c is a digit"%(ch))
elif asc>=65 and asc<=90:
print("%c is a capital letter"%(ch))
elif asc>=97 and asc<=112:
print("%c is a small letter"%(ch))
else:
print("%c is a special symbol"%(ch))
|
def solution(x,y):
ans = ''
nowx, nowy = 0,0
preX = [0]*31
preY = [0]*31
for i in range(31):
if nowx < x:
nowx += 2**(30-i)
preX[i] = 1
else:
nowx -= 2**(30-i)
preX[i] = -1
if nowy < y:
nowy += 2**(30-i)
preY[i] = 1
else:
nowy -= 2**(30-i)
preY[i] = -1
for i in range(31):
if preX[i] == -1 and preY[i] == -1:
ans += 'L'
elif preX[i] == 1 and preY[i] == 1:
ans += 'R'
elif preX[i] == -1 and preY[i] == 1:
ans += 'D'
else:
ans += 'U'
return ans
N = int( input())
X = [0]*N
Y = [0]*N
x, y = map( int, input().split())
X[0] = x
Y[0] = y
evod = (x+y)%2
Flag = True
for i in range(1,N):
x, y = map( int, input().split())
X[i] = x
Y[i] = y
if (x+y)%2 != evod:
Flag = False
if Flag:
D = [2**i for i in range(30,-1,-1)]
if evod == 1:
print(31)
print(' '.join(map(str,D)))
for i in range(N):
print( solution(X[i]+Y[i],X[i]-Y[i]))
else:
print(32)
D.append(1)
print(' '.join(map(str,D)))
for i in range(N):
print( solution(X[i]+Y[i]-1,X[i]-1-Y[i]) + 'R')
else:
print(-1)
|
from src.image_processing import histogram
#######
# Inputs
#######
grey_scale = 8
matrix_str = """
4 5 5 7
7 5 7 8
4 5 6 5
8 6 5 7
"""
#######
# Solution
#######
if __name__ == '__main__':
histogram.resolve(matrix_str, grey_scale)
|
#!/usr/bin/env
############################################
# exercise_8_basic.py
# Author: Paul Yang
# Date: June, 2016
# Brief: handling valueError exception
############################################
############################################
# print_file()
# open file by the filepath user input
# inputs: None
# returns: None
def print_file():
#data = open("dialogue_chinese.txt", encoding="utf-8")
filename = input("輸入要開啟的檔名:")
data = open(filename, encoding="utf-8")
for line in data:
try:
(role,line_spoken) = line.split(":",maxsplit=1)
print(role,end="")
print("說: ", end="")
print(line_spoken, end="")
except:
pass
data.close() |
import pytest
def test_endpoint(client):
response = client.post(
'/analyze_slack',
content_type="application/json",
json={'text': 'test __eou__ another'}
)
payload = response.get_json()
print(payload)
for field, value_type in [
('conf_speech_acts', list),
('speech_acts', list),
('utterances', list)
]:
assert field in payload
assert isinstance(payload[field], value_type)
|
from gemlibapp import create_app # since this exists in __init__.py it can be found and imported
app = create_app()
if __name__ == "__main__":
app.run(debug=True, host='localhost')
|
""" For use in dumping single frame ground truths of EuRoc Dataset
Adapted from https://github.com/ClementPinard/SfmLearner-Pytorch/blob/0caec9ed0f83cb65ba20678a805e501439d2bc25/data/kitti_raw_loader.py
You-Yi Jau, yjau@eng.ucsd.edu, 2019
Rui Zhu, rzhu@eng.ucsd.edu, 2019
"""
from __future__ import division
import numpy as np
from pathlib import Path
from tqdm import tqdm
import scipy.misc
from collections import Counter
from pebble import ProcessPool
import multiprocessing as mp
ratio_CPU = 0.8
default_number_of_process = int(ratio_CPU * mp.cpu_count())
import os, sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(ROOT_DIR)
import traceback
import coloredlogs, logging
logging.basicConfig()
logger = logging.getLogger()
coloredlogs.install(level="INFO", logger=logger)
import cv2
from kitti_tools.utils_kitti import (
load_velo_scan,
rectify,
read_calib_file,
transform_from_rot_trans,
scale_intrinsics,
scale_P,
)
import dsac_tools.utils_misc as utils_misc
# from utils_good import *
from glob import glob
from dsac_tools.utils_misc import crop_or_pad_choice
from utils_kitti import load_as_float, load_as_array, load_sift, load_SP
import yaml
DEEPSFM_PATH = "/home/ruizhu/Documents/Projects/kitti_instance_RGBD_utils/deepSfm"
sys.path.append(DEEPSFM_PATH)
import torch
from models.model_wrap import PointTracker
from models.model_wrap import SuperPointFrontend_torch
from kitti_odo_loader import KittiOdoLoader
from kitti_odo_loader import (
dump_sift_match_idx,
get_sift_match_idx_pair,
dump_SP_match_idx,
get_SP_match_idx_pair,
read_odo_calib_file,
)
class tum_seq_loader(KittiOdoLoader):
def __init__(
self,
dataset_dir,
img_height=375,
img_width=1242,
cam_ids=["00"], # no usage in TUM
get_X=False,
get_pose=False,
get_sift=False,
get_SP=False,
sift_num=2000,
if_BF_matcher=False,
save_npy=True,
):
# depth_size_ratio=1):
# dir_path = Path(__file__).realpath().dirname()
self.dataset_dir = Path(dataset_dir)
self.img_height = img_height
self.img_width = img_width
self.cam_ids = ["00"] # no use in TUM
# assert self.cam_ids == ['02'], 'Support left camera only!'
self.cid_to_num = {"00": 0, "01": 1, "02": 2, "03": 3}
self.debug = False
if self.debug:
coloredlogs.install(level="DEBUG", logger=logger) # original info
if self.debug:
## small dataset for debuggin
self.train_seqs = ["rgbd_dataset_freiburg1_xyz"]
self.test_seqs = ["rgbd_dataset_freiburg1_xyz"]
else:
## dataset names
self.train_seqs = [
"rgbd_dataset_freiburg1_desk",
"rgbd_dataset_freiburg1_room",
"rgbd_dataset_freiburg2_desk",
"rgbd_dataset_freiburg3_long_office_household",
]
self.test_seqs = [
"rgbd_dataset_freiburg1_desk2",
"rgbd_dataset_freiburg2_xyz",
"rgbd_dataset_freiburg3_nostructure_texture_far",
]
self.get_X = get_X
self.get_pose = get_pose
self.get_sift = get_sift
self.get_SP = get_SP
self.save_npy = save_npy
if self.save_npy:
logging.info("+++ Dumping as npy")
else:
logging.info("+++ Dumping as h5")
if self.get_sift:
self.sift_num = sift_num
self.if_BF_matcher = if_BF_matcher
self.sift = cv2.xfeatures2d.SIFT_create(
nfeatures=self.sift_num, contrastThreshold=1e-5
)
# self.bf = cv2.BFMatcher(normType=cv2.NORM_L2)
# FLANN_INDEX_KDTREE = 0
# index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
# search_params = dict(checks = 50)
# self.flann = cv2.FlannBasedMatcher(index_params, search_params)
# self.sift_matcher = self.bf if BF_matcher else self.flann
self.scenes = {"train": [], "test": []}
if self.get_SP:
self.prapare_SP()
# no need two functions
self.collect_train_folders()
self.collect_test_folders()
def read_images_files_from_folder(self, drive_path, scene_data, folder="rgb"):
print(f"drive_path: {drive_path}")
## given that we have matched time stamps
arr = np.genfromtxt(f'{drive_path}/{folder}_filter.txt',dtype='str') # [N, 2(time, path)]
img_files = np.char.add(str(drive_path)+'/',arr[:,1])
img_files = [Path(f) for f in img_files]
img_files = sorted(img_files)
## no time stamps
# img_dir = os.path.join(drive_path, "")
# img_files = sorted(glob(img_dir + f"/{folder}/*.png"))
print(f"img_files: {img_files[0]}")
return img_files
def collect_train_folders(self):
for seq in self.train_seqs:
seq_dir = os.path.join(self.dataset_dir, seq)
self.scenes["train"].append(seq_dir)
def collect_test_folders(self):
for seq in self.test_seqs:
seq_dir = os.path.join(self.dataset_dir, seq)
self.scenes["test"].append(seq_dir)
def load_image(self, scene_data, tgt_idx, show_zoom_info=True):
# use different image filename
img_file = Path(scene_data["img_files"][tgt_idx])
if not img_file.is_file():
logging.warning("Image %s not found!" % img_file)
return None, None, None
img_ori = scipy.misc.imread(img_file)
if [self.img_height, self.img_width] == [img_ori.shape[0], img_ori.shape[1]]:
return img_ori, (1.0, 1.0), img_ori
else:
zoom_y = self.img_height / img_ori.shape[0]
zoom_x = self.img_width / img_ori.shape[1]
if show_zoom_info:
logging.warning(
"[%s] Zooming the image (H%d, W%d) with zoom_yH=%f, zoom_xW=%f to (H%d, W%d)."
% (
img_file,
img_ori.shape[0],
img_ori.shape[1],
zoom_y,
zoom_x,
self.img_height,
self.img_width,
)
)
img = scipy.misc.imresize(img_ori, (self.img_height, self.img_width))
return img, (zoom_x, zoom_y), img_ori
def get_calib_file_from_folder(self, foldername):
cid = 1
cam_name = "freiburg"
for i in range(1, 4):
if f"{cam_name}{i}" in str(foldername):
cid = i
calib_file = f"{self.dataset_dir}/tum/TUM{cid}.yaml"
return calib_file
# def collect_scene_from_drive(self, drive_path):
def collect_scene_from_drive(self, drive_path, split="train"):
# adapt for Euroc dataset
train_scenes = []
logging.info("Gathering info for %s..." % drive_path)
for c in self.cam_ids:
scene_data = {
"cid": "00",
"cid_num": 0,
"dir": Path(drive_path),
"rel_path": Path(drive_path).name + "_" + "00",
}
# img_dir = os.path.join(drive_path, 'image_%d'%scene_data['cid_num'])
# scene_data['img_files'] = sorted(glob(img_dir + '/*.png'))
scene_data["img_files"] = self.read_images_files_from_folder(
drive_path, scene_data, folder="rgb"
)
scene_data["depth_files"] = self.read_images_files_from_folder(
drive_path, scene_data, folder="depth"
)
scene_data["N_frames"] = len(scene_data["img_files"])
assert scene_data["N_frames"] != 0, "No file found for %s!" % drive_path
scene_data["frame_ids"] = [
"{:06d}".format(i) for i in range(scene_data["N_frames"])
]
img_shape = None
zoom_xy = None
show_zoom_info = True
# read images
for idx in tqdm(range(scene_data["N_frames"])):
img, zoom_xy, _ = self.load_image(scene_data, idx, show_zoom_info)
# print(f"zoom_xy: {zoom_xy}")
show_zoom_info = False
if img is None and idx == 0:
logging.warning("0 images in %s. Skipped." % drive_path)
return []
else:
if img_shape is not None:
assert img_shape == img.shape, (
"Inconsistent image shape in seq %s!" % drive_path
)
else:
img_shape = img.shape
# print(img_shape)
scene_data["calibs"] = {
"im_shape": [img_shape[0], img_shape[1]],
"zoom_xy": zoom_xy,
"rescale": True if zoom_xy != (1.0, 1.0) else False,
}
# Get geo params from the RAW dataset calibs
# calib_file = os.path.join("tum/TUM1.yaml")
# calib_file = os.path.join("/data/tum/calib/TUM1.yaml")
calib_file = os.path.join(self.get_calib_file_from_folder(drive_path))
logging.info(f"calibration file: {calib_file}")
# calib_file = f"{scene_data['img_files'][0].str()}/../../sensor.yaml"
P_rect_noScale, P_rect_scale = self.get_P_rect(
calib_file, scene_data["calibs"]
)
P_rect_ori_dict = {c: P_rect_scale}
intrinsics = P_rect_ori_dict[c][:, :3]
logging.debug(f"intrinsics: {intrinsics}")
# calibs_rects = self.get_rect_cams(intrinsics, P_rect_ori_dict[c])
calibs_rects = {"Rtl_gt": np.eye(4)} # only one camera, no extrinsics
cam_2rect_mat = np.eye(4) # extrinsics for cam2
# drive_in_raw = self.map_to_raw[drive_path[-2:]]
# date = drive_in_raw[:10]
# seq = drive_in_raw[-4:]
# calib_path_in_raw = Path(self.dataset_dir)/'raw'/date
# imu2velo_dict = read_calib_file(calib_path_in_raw/'calib_imu_to_velo.txt')
# velo2cam_dict = read_calib_file(calib_path_in_raw/'calib_velo_to_cam.txt')
# cam2cam_dict = read_calib_file(calib_path_in_raw/'calib_cam_to_cam.txt')
# velo2cam_mat = transform_from_rot_trans(velo2cam_dict['R'], velo2cam_dict['T'])
# imu2velo_mat = transform_from_rot_trans(imu2velo_dict['R'], imu2velo_dict['T'])
# cam_2rect_mat = transform_from_rot_trans(cam2cam_dict['R_rect_00'], np.zeros(3))
velo2cam_mat = np.eye(4)
cam2body_mat = np.eye(3)
scene_data["calibs"].update(
{
"K": intrinsics,
"P_rect_ori_dict": P_rect_ori_dict,
"P_rect_noScale": P_rect_noScale, # add for read and process 3d points
"cam_2rect": cam_2rect_mat,
"velo2cam": velo2cam_mat,
"cam2body_mat": cam2body_mat,
}
)
scene_data["calibs"].update(calibs_rects)
# Get pose
gt_kitti_file = "groundtruth_filter.kitti"
# if not (Path(drive_path) / gt_kitti_file).exists():
# import subprocess
# gt_file = "groundtruth_filter.txt"
# assert (Path(drive_path) / gt_file).exists()
# # process files
# logging.info(f"generate kitti format gt pose: {drive_path}")
# subprocess.run(f"evo_traj tum {str(Path(drive_path)/gt_file)} --save_as_kitti", shell=True, check=True) # https://github.com/MichaelGrupp/evo
assert (
Path(drive_path) / gt_kitti_file
).exists(), "kitti style of gt pose file not found, please run 'python process_poses.py --dataset_dir DATASET_DIR"
poses = (
np.genfromtxt(Path(drive_path) / gt_kitti_file)
.astype(np.float32)
.reshape(-1, 3, 4)
)
# print(f"poses before: {poses[:10]}")
# ## invert camera poses of world coord to poses of camera coord
# poses = np.array([np.linalg.inv(utils_misc.Rt_pad(pose))[:3] for pose in poses])
# print(f"poses after: {poses[:10]}")
assert scene_data["N_frames"] == poses.shape[0], (
"scene_data[N_frames]!=poses.shape[0], %d!=%d"
% (scene_data["N_frames"], poses.shape[0])
)
scene_data["poses"] = poses
# extrinsic matrix for cameraN to this camera
scene_data["Rt_cam2_gt"] = scene_data["calibs"]["Rtl_gt"]
logging.debug(f'scene_data["Rt_cam2_gt"]: {scene_data["Rt_cam2_gt"]}')
train_scenes.append(scene_data)
return train_scenes
def get_P_rect(self, calib_file, calibs):
# width, height = calib_data['resolution']
# cam_info.distortion_model = 'plumb_bob'
# D = np.array(calib_data['distortion_coefficients'])
# cam_info.R = [1, 0, 0, 0, 1, 0, 0, 0, 1]
calib_data = loadConfig(calib_file)
fu, fv, cu, cv = (
calib_data["Camera.fx"],
calib_data["Camera.fy"],
calib_data["Camera.cx"],
calib_data["Camera.cy"],
)
K = np.array([[fu, 0, cu], [0, fv, cv], [0, 0, 1]])
P_rect_ori = np.concatenate((K, [[0], [0], [0]]), axis=1)
# rescale the camera matrix
if calibs["rescale"]:
P_rect_scale = scale_P(
P_rect_ori, calibs["zoom_xy"][0], calibs["zoom_xy"][1]
)
else:
P_rect_scale = P_rect_ori
return P_rect_ori, P_rect_scale
@staticmethod
def load_velo(scene_data, tgt_idx, calib_K=None):
"""
create point clouds from depth image, return array of points
return:
np [N, 3] (3d points)
"""
depth_file = scene_data["depth_files"][tgt_idx]
color_file = scene_data["img_files"][tgt_idx]
# def get_point_cloud_from_images(color_file, depth_file):
# """
# will cause crashes!!!
# """
# import open3d as o3d # import open3d before torch to avoid crashes
# depth_raw = o3d.io.read_image(depth_file)
# color_raw = o3d.io.read_image(color_file)
# rgbd_image = o3d.geometry.RGBDImage.create_from_tum_format(
# color_raw, depth_raw)
# pcd = o3d.geometry.PointCloud.create_from_rgbd_image(
# rgbd_image,
# o3d.camera.PinholeCameraIntrinsic(
# o3d.camera.PinholeCameraIntrinsicParameters.PrimeSenseDefault))
# # Flip it, otherwise the pointcloud will be upside down
# pcd.transform([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])
# xyz_points = np.asarray(pcd.points)
# return xyz_points
def get_point_cloud_from_images(color_file, depth_file, calib_K=None):
from PIL import Image
depth = Image.open(depth_file)
rgb = Image.open(color_file)
points = []
## parameters
if calib_K is None:
focalLength = 525.0
centerX = 319.5
centerY = 239.5
else:
focalLength = (calib_K[0, 0] + calib_K[1, 1]) / 2
centerX = calib_K[0, 2]
centerY = calib_K[1, 2]
logging.debug(
f"get calibration matrix for retrieving points: focalLength = {focalLength}, centerX = {centerX}, centerY = {centerY}"
)
scalingFactor = 5000.0
for v in range(rgb.size[1]):
for u in range(rgb.size[0]):
color = rgb.getpixel((u, v))
Z = depth.getpixel((u, v)) / scalingFactor
if Z == 0:
continue
X = (u - centerX) * Z / focalLength
Y = (v - centerY) * Z / focalLength
# points.append("%f %f %f %d %d %d 0\n"%(X,Y,Z,color[0],color[1],color[2]))
points.append([X, Y, Z])
logging.debug(f"points: {points[:3]}")
return np.array(points)
pass
###
if Path(color_file).is_file() is False or Path(depth_file).is_file() is False:
logging.warning(
f"color file {color_file} or depth file {depth_file} not found!"
)
return None
xyz_points = get_point_cloud_from_images(
color_file, depth_file, calib_K=calib_K
)
# xyz_points = np.ones((10,3)) ######!!!
logging.debug(f"xyz: {xyz_points[0]}, {xyz_points.shape}")
return xyz_points
def loadConfig(filename):
import yaml
with open(filename, "r") as f:
config = yaml.load(f)
return config
# calib_file = '/data/euroc/mav0/cam0/sensor.yaml'
# calib_data = loadConfig(calib_file)
# intrinsics = load_intrinsics(calib_data)
# transformation_base_camera = load_extrinsics(calib_data)
# print(f"height, width, K, D = {intrinsics}")
# print(f"transformation_base_camera: {transformation_base_camera}")
if __name__ == "__main__":
pass
|
# -*- coding: utf-8 -*-
import numpy as np
from framework.modules import Module
class LossMSE(Module):
"""Implements the MSE loss computation"""
def forward(self, output, target):
"""
Carries out the forward pass for backpropagation.
INPUT
output: Tensor with output of the network
target: Tensor with ground truth
OUTPUT
loss
"""
#Compute loss
self.diff = output.float() - target.float().view(output.size())
return (self.diff ** 2).sum()
def backward(self):
"""
Carries out the backward pass for backpropagation.
OUTPUT
Gradient of loss
"""
# Gradient
return self.diff * 2
class CrossEntropyLoss(Module):
"""Implements the Cross-Entropy loss computation"""
def forward(self, output, target):
"""
Carries out the forward pass for backpropagation.
INPUT
output: Tensor with output of the network
target: Tensor with ground truth
OUTPUT
loss
"""
self.target = target.float()
self.output = output.float()
# Loss with nan_to_num to avoid overflow
return np.nan_to_num(-self.target * self.output.log() -
(1 - self.target) * (1 - self.output).log()).sum()
def backward(self):
"""Carries out the backward pass for backpropagation
OUTPUT
Gradient of loss
"""
# Gradient
return self.output - self.target
|
#!/usr/bin/env python
"""Update the circulation manager server with new books from OPDS 2.0 import collections."""
import os
import sys
bin_dir = os.path.split(__file__)[0]
package_dir = os.path.join(bin_dir, "..")
sys.path.append(os.path.abspath(package_dir))
from core.scripts import OPDSImportScript
from core.model import ExternalIntegration
from core.opds2_import import OPDS2Importer, OPDS2ImportMonitor
import_script = OPDSImportScript(
importer_class=OPDS2Importer,
monitor_class=OPDS2ImportMonitor,
protocol=ExternalIntegration.OPDS2_IMPORT
)
import_script.run()
|
import argparse
from pathlib import Path
from cheffu.tokenize import tokenize
from cheffu.validate import validate
from cheffu.graph import generate_graph
from cheffu.shopping_list import shopping_list
from cheffu.format import format_standard
if __name__ == "__main__":
arg_parser = argparse.ArgumentParser(description='Parses recipe files written in Cheffu')
arg_parser.add_argument('recipe_file_path',
metavar='RECIPE FILE PATH',
type=Path,
help='input recipe file path to process',
)
arg_parser.add_argument('--output-diagram',
action='store_true',
help='if specified, outputs a diagram of the recipe',
)
arg_parser.add_argument('--diagram-file-path',
metavar='DIAGRAM FILE PATH',
type=Path,
default=Path('./recipe.png'),
help='if output-diagram is specified, outputs the diagram to this specified file path; defaults to "./recipe.png"',
)
args = arg_parser.parse_args()
with args.recipe_file_path.open() as recipe_file:
recipe_text = recipe_file.read()
tokens = tokenize(recipe_text)
recipe_dict, start_key = validate(tokens)
if args.output_diagram:
graph = generate_graph(recipe_dict, start_key)
graph.write_png(str(args.diagram_file_path)) |
#-*- encoding=utf8 -*-
#!/usr/bin/env python
import sys, operator, string,re,Queue,threading
path_to_stop_words = 'BasicData/stop_words.txt'
path_to_text = 'BasicData/Pride_And_Prejudice.txt'
word_space = Queue.Queue()
freq_space = Queue.Queue()
stop_words = set(open(path_to_stop_words).read().split(','))
# for w in stop_words:
# print w
def process_word():
word_freqs = {}
while True:
try:
word = word_space.get(timeout=1)
except Queue.Empty:
break
if not word in stop_words:
if word in word_freqs:
word_freqs[word] += 1
else:
word_freqs[word] = 1
freq_space.put(word_freqs)
for word in re.findall('[a-z]{2,}',open(path_to_text).read().lower()):
word_space.put(word)
workers = []
for i in range(5):
workers.append(threading.Thread(target = process_word))
[t.start() for t in workers]
[t.join() for t in workers]
word_freqs = {}
while not freq_space.empty():
freqs = freq_space.get()
for (k,v) in freqs.iteritems():
if k in word_freqs:
count = sum(item[k] for item in [freqs,word_freqs])
else:
count = freqs[k]
word_freqs[k] = count
for (w,c) in sorted(word_freqs.iteritems(),key=operator.itemgetter(1),reverse=True)[:25]:
print w , ' - ', c
|
#!/home/mumaxbaby/anaconda3/envs/pmp/bin/python
"""
Author: Jialun Luo
Calculate time resolved field propagation of some photonic crystal structure
Note: on a different machine, check the #! statement at the beginning of the file
Parameters:
sidebankThickness,
separation - the distance between the centers of air cylinders of the center row
"""
import numpy as np
import meep as mp
import matplotlib.pyplot as plt
import matplotlib as mpl
import math
import argparse
import PostProcessingUtils as PPU
import sys
import subprocess
def setupSimulaion(eps=1, r=0.2, fcen=0.4, df=0.2, unitCellCountX=20, unitCellCountY=5, computeCellSizeX=20, computeCellSizeY=10, doFlux = True, geometryLattice=None, makeCavity=False, cavityUnitCellCount=2, pointSourceLocation=None, PMLThickness=1.0, sidebankThickness = 1.0, bridgeWidth = 1.0, separation = 2.0, defectY = math.sqrt(3), waveguideLineY = -math.sqrt(3), rRR = 0.21, RRShift=0.1
):
computationCell = mp.Vector3(computeCellSizeX, computeCellSizeY)
materialHBN = mp.Medium(epsilon=eps)
dielectricMaterial = materialHBN
airCylinder = mp.Cylinder(r, material=mp.air)
hBNCylinder = mp.Cylinder(r, material=dielectricMaterial)
# if(geometryLattice is None):
# print('No lattice provided, setup triangle lattice...')
# basis1 = mp.Vector3(math.sqrt(3)/2, 0.5)
# basis2 = mp.Vector3(math.sqrt(3)/2, -0.5)
#
# geometryLattice = mp.Lattice( size = mp.Vector3(unitCellCountX, unitCellCountY),
# basis1 = basis1,
# basis2 = basis2)
hBNBridge = mp.Block(mp.Vector3(mp.inf, bridgeWidth, mp.inf), material = materialHBN)
geometryAssembly = [hBNBridge]
""" """
airHoles = mp.geometric_objects_lattice_duplicates(geometryLattice, [airCylinder])
for hole in airHoles:
geometryAssembly.append(hole)
""" make a defect line at ybasis = waveguideLineY"""
for i in range(unitCellCountX):
shift = math.ceil(unitCellCountX/2)-1
geometryAssembly.append(mp.Cylinder(r, material=dielectricMaterial, center=mp.Vector3(1 * (i-shift), waveguideLineY)))
""" change the center into cartesian coordinates for meep"""
for geometricObject in geometryAssembly:
geometricObject.center = mp.lattice_to_cartesian(geometricObject.center, geometryLattice)
""" make a cavity at the 4th line below the waveguide line """
""" Refer to Susumu Noda's high Q photonic crystal paper for the geometry idea """
if(makeCavity):
for i in range(cavityUnitCellCount + 2):
shift = math.ceil(cavityUnitCellCount / 2) - 1
geometryAssembly.append(mp.Cylinder(r, material=dielectricMaterial, center=mp.Vector3(1 * (i - shift), defectY)))
geometryAssembly.append(mp.Cylinder(rRR, material=mp.air, center=mp.Vector3(1 * (0 - shift) - RRShift, defectY)))
geometryAssembly.append(mp.Cylinder(rRR, material=mp.air, center=mp.Vector3(1 * (cavityUnitCellCount + 1 - shift) + RRShift, defectY)))
""" for finding my (0,0) coordinate... comment out when running actual simulation"""
# geometryAssembly.append(mp.Cylinder(0.1, material=mp.air, center=mp.Vector3(0, defectY)))
if (pointSourceLocation is None):
""" if the source location is not specified"""
pointSourceLocation = mp.Vector3(0, waveguideLineY)
""" Use a Gaussian source to excite """
excitationSource = [mp.Source(mp.GaussianSource(frequency=fcen,fwidth=df),
component=mp.Ey,
center=pointSourceLocation,
size=mp.Vector3(0, 1))]
pml_layers = [mp.PML(PMLThickness)]
resolution = 20
sim = mp.Simulation(cell_size=computationCell,
boundary_layers=pml_layers,
geometry=geometryAssembly,
default_material=mp.air,
sources=excitationSource,
resolution=resolution)
return sim
""" Debug tool"""
def trace(frame, event, arg):
print("%s, %s:%d" % (event, frame.f_code.co_filename, frame.f_lineno))
return trace
if __name__ == '__main__':
# sys.settrace(trace)
""" Set up argparser here"""
parser = argparse.ArgumentParser(description = 'Configure and run meep on certain geometry')
pythonScriptName = 'brd_wvg_cvt_diff_spc'
PMLThickness = 1.0
eps0 = 4.84
''' geometries '''
r0 = 0.382
r1 = 0.25
r1Shift = 0.1
d1 = 0.1
# f0 = 0.344086 # center frequency of the source
framerate = 8
unitCellCountX = 60
unitCellCountY = 15
simDomainSizeX = 40
simDomainSizeY = 20
bridgeWidthPadding = 1
bridgeWidth = 15 * math.sqrt(3) / 2 + bridgeWidthPadding
defectYSet = math.sqrt(3)
waveguideY = 0
""" Analysis parameters """
nfreq = 4000 # number of frequencies at which to compute flux
# nfreq = 500
fluxFcen = 0.4
fluxDF = 0.6
harminvF0 = 0.25
harminvDf = 0.2
""" setup the geometry lattice """
basis1 = mp.Vector3(1, 0)
basis2 = mp.Vector3(0.5, math.sqrt(3)/2)
geometryLattice = mp.Lattice(size = mp.Vector3(unitCellCountX, unitCellCountY),
basis1 = basis1,
basis2 = basis2)
""" run with flux calculation """
# print(f'{mp.lattice_to_cartesian(mp.Vector3(0,1,0), geometryLattice)}')
fluxCutline = mp.FluxRegion(center=mp.Vector3( simDomainSizeX/2 - 1 * PMLThickness - 0.5, waveguideY), size=mp.Vector3(0, 1))#, direction = mp.X)
""" Parameter sweeps """
cavityUnitCellCountQuery = np.arange(4, 5, 1)
isMakingCavityQuery = [True, False]
separationQuery = np.arange(1.65, 3, 0.01)
""" Setups for printing stdout into a file"""
# originalStdout = sys.stdout
cavityUnitCellCount = 3
cavityUnitCellCountQuery = [3]
# cavityUnitCellCountQuery = [9]
separationQuery = [1.56]
# separationQuery = [1, 1.2 , 1.5, 2]
# epsQuery = [5, 7, 9, 11, 13]
# epsQuery = [13]
refIsCalculated=False
# exciteF0Query = np.arange(0.390, 0.490, 0.01)
exciteF0Query = [0.4]
df = 0.7 # bandwidth of the source (Gaussian frequency profile, 1 sigma frequency)
rRRQuery = np.arange(0.25, 0.38, 0.01)
RRShiftQuery = np.arange(0, 0.15, 0.01)
harminvF0 = 0.35
harminvDf = 0.15
ptSourceLocation = mp.Vector3(- (simDomainSizeX/2- 1 * PMLThickness) , waveguideY)
# ptSourceLocation = mp.Vector3(- (simDomainSizeX/2 - 1.5 * PMLThickness), 0)
isResonanceStudy = False
defaultResultFolder = '/home/mumaxbaby/Documents/jialun/MPBLearn/results/meepTrigLatCylAirHole'
sim = None
for f0 in exciteF0Query:
for isMakingCavity in isMakingCavityQuery:
for r1 in rRRQuery:
for r1Shift in RRShiftQuery:
""" End the current loop after one run of without holes (when isMakingCavity == False)"""
if (refIsCalculated):
# refIsCalculated = False
# break
exit(10)
if(isMakingCavity):
runDescription = f'with-cavity-{cavityUnitCellCount}_rRR-{r1:.3f}_RRShift-{r1Shift:.3f}_excite_fc-{f0:.3f}_bw-{df:.3f}_flux_fc-{fluxFcen:.3f}_df-{fluxDF:.3f}'
else:
refIsCalculated = True
runDescription = f'no-cavity_rRR-{r1:.3f}_RRShift-{r1Shift:.3f}_excite_fc-{f0:.3f}_bw-{df:.3f}_flux_fc-{fluxFcen:.3f}_df-{fluxDF:.3f}'
fieldFileBasename = f'{runDescription}_field'
epsMapFileBasename = f'{runDescription}_eps'
fluxFileBasename = f'{runDescription}_flux'
epsMapH5Filename = f'{defaultResultFolder}/{pythonScriptName}-{epsMapFileBasename}.h5'
fieldH5Filename = f'{defaultResultFolder}/{pythonScriptName}-{fieldFileBasename}.h5'
runLogFilename = f'{defaultResultFolder}/{runDescription}.log'
initLogFilename = f'{defaultResultFolder}/{runDescription}.initialization.log'
fluxDataFilename = f'{defaultResultFolder}/{fluxFileBasename}.csv'
sim = setupSimulaion(eps = eps0, r = r0, fcen = f0, df = df, unitCellCountX = unitCellCountX, unitCellCountY = unitCellCountY, geometryLattice=geometryLattice, computeCellSizeX=simDomainSizeX, computeCellSizeY=simDomainSizeY, makeCavity=isMakingCavity, cavityUnitCellCount=cavityUnitCellCount, bridgeWidth = bridgeWidth, pointSourceLocation=ptSourceLocation, defectY = defectYSet, waveguideLineY = waveguideY, rRR = r1, RRShift = r1Shift)
# sim = setupSimulaion(eps = eps0, r = r0, fcen = f0, df = df, unitCellCountX = unitCellCountX, unitCellCountY = unitCellCountY, geometryLattice=geometryLattice, computeCellSizeX=simDomainSizeX, computeCellSizeY=simDomainSizeY, makeCavity=isMakingCavity, cavityUnitCellCount=cavityUnitCellCount, bridgeWidth = bridgeWidth, separation = separation)
# sim.init_sim()
sim.use_output_directory(defaultResultFolder)
""" add_flux for calculate flux """
trans = sim.add_flux(fluxFcen, fluxDF, nfreq, fluxCutline)
if (isResonanceStudy):
sim.run()
break
if (isMakingCavity) :
sim.run(
# mp.after_sources(mp.Harminv(mp.Ey, mp.Vector3(0, 0), harminvF0, harminvDf)),
mp.at_beginning(mp.to_appended(epsMapFileBasename, mp.output_epsilon)),
mp.to_appended(fieldFileBasename, mp.at_every(1 / f0 / framerate, mp.output_efield_y)),
# until=1)
# mp.during_sources(mp.in_volume(vol, mp.to_appended(f'{runDescription}_ez-slice', mp.at_every(0.4, mp.output_efield_z)))),
# until_after_sources = 500)
until_after_sources = mp.stop_when_fields_decayed(50, mp.Ey, mp.Vector3(simDomainSizeX/2 - PMLThickness - 0.5, 0), 1e-2))
else:
sim.run(
# mp.after_sources(mp.Harminv(mp.Ey, mp.Vector3(0, 0), harminvF0, harminvDf)),
mp.at_beginning(mp.to_appended(epsMapFileBasename, mp.output_epsilon)),
# mp.to_appended(fieldFileBasename, mp.at_every(1 / f0 / framerate, mp.output_efield_z)),
# until=1)
# mp.during_sources(mp.in_volume(vol, mp.to_appended(f'{runDescription}_ez-slice', mp.at_every(0.4, mp.output_efield_z)))),
until_after_sources = mp.stop_when_fields_decayed(50, mp.Ey, mp.Vector3(simDomainSizeX/2 - PMLThickness - 0.5, 0), 1e-2))
# print(f'Run description: {runDescription}')
with open(fluxDataFilename, 'w') as csvrecord:
print(f'point stdout to {fluxDataFilename}')
sys.stdout = csvrecord
sim.display_fluxes(trans) # print out the flux spectrum
sys.stdout = open("/dev/stdout", "w")
# print('print back to the real stdout')
print('Flux data saved at')
print(fluxDataFilename)
print('or')
print(f'{fluxFileBasename}.csv')
""" closing log files """
# initLog.close()
# runLog.close()
""" Convert the eps map h5 file into a png file"""
PPU.PlotDielectricMap(epsMapH5Filename) |
#!/usr/bin/env python
import rospy
import tf
import threading
import time
from numpy import *
import sys
import std_msgs
class ViconTracker(object):
Xx = 0
Yy = 0
Oo = 0
def __init__(self, name):
#init_node()
#rospy.init_node('Whatever')
self.target = 'vicon/' + name + '/' + name
self.x = 0
self.y = 0
self.o = 0
self.t = tf.TransformListener()
self.thread = threading.Thread(target=self.updatePose)
self.thread.daemon = True
self.thread.start()
def updatePose(self):
#rospy
#while True:
#a = self.t.lookupTransform('world',self.target, rospy.Time(0))
self.t.waitForTransform('world',self.target, rospy.Time(0), rospy.Duration(4.0))
# while not rospy.is_shutdown():
# try:
# now = rospy.Time.now()
# self.t.waitForTransform('world',self.target, now, rospy.Duration(4.0))
a = self.t.lookupTransform('world',self.target, rospy.Time(0))
self.x = a[0][0]
self.y = a[0][1]
euler = tf.transformations.euler_from_quaternion(a[1])
self.o = euler[2]
Xx = self.x
Yy = self.y
Oo = self.o
def _stop(self):
print( "Vicon pose handler quitting..." )
self.thread.join()
print( "Terminated." )
def getPose(self, cached=False):
#print "({t},{x},{y},{o})".format(t=t,x=x,y=y,o=o)
self.updatePose()
return array([self.x, self.y, self.o])
def getViconData():
rospy.init_node('XXX_listener')
a = ViconTracker(2)
print(a.getPose())
time.sleep(1)
while True:
time.sleep(0.5)
b = a.getPose()
print( b )
if __name__ == "__main__":
#rospy.init_node('Hexbug_listener')
rospy.init_node('XXX_listener')
pub = rospy.Publisher('vicon222',std_msgs.msg.Float32,queue_size=2)
rate = rospy.Rate(12)
'''startNum = int(sys.argv[-2])
endNum = int(sys.argv[-1])
N = endNum-startNum+1
posData = zeros([N,2])
while True:
for i in range(N):
posData[i,:] = ViconTracker(i+startNum).getPose()[0:2]
pub.publish(posData[0,0])
rospy.loginfo(posData)t
rate.sleep()'''
# helmet = ViconTracker('Helmet_1')
sphero13 = ViconTracker('Sphero13')
# print(helmet.getPose())
print(sphero13.getPose())
time.sleep(1)
while True:
time.sleep(0.5)
# print('helmet:',helmet.getPose())
print('sphero13',sphero13.getPose())
|
import pandas as pd
import numpy as np
from ortools.linear_solver import pywraplp
import ortools
import torch
import torch.nn as nn
import torch.utils.data
import utils_new as ut
sigmoid_inverse = lambda x : torch.log(x/(1-x))
class MLP(nn.Module):
def __init__(self, D_in, hidden):
super(MLP,self).__init__()
self.MLP = nn.Sequential(nn.Linear(D_in, hidden),
nn.ReLU(),
nn.Linear(hidden, hidden),
nn.ReLU(),
nn.Linear(hidden, hidden),
nn.ReLU(),
nn.Linear(hidden, 1),
nn.Sigmoid())
def forward(self, x):
out = self.MLP(x)
return(out)
class PP_NN(object):
def __init__(self, data_set, index_train, index_validate, sensitive_variable):
batch_size = 100
data_set_train = ut.dataset_for_preprocessed_features_include_s(data_set.iloc[:index_train,:], sensitive_variable)
data_set_valid = ut.dataset_for_preprocessed_features_include_s(data_set.iloc[index_train:index_validate,:], sensitive_variable)
data_set_train_and_valid = ut.dataset_for_preprocessed_features_include_s(data_set.iloc[:index_validate,:], sensitive_variable)
data_set_test = ut.dataset_for_preprocessed_features_include_s(data_set.iloc[index_validate:,:], sensitive_variable)
self.dataloader_train = torch.utils.data.DataLoader(data_set_train, batch_size = batch_size, shuffle = True)
self.dataloader_train_full = torch.utils.data.DataLoader(data_set_train, batch_size = len(data_set.iloc[:index_train,:]), shuffle = True)
self.dataloader_valid = torch.utils.data.DataLoader(data_set_valid, batch_size = len(data_set.iloc[index_train:index_validate,:]), shuffle = False)
self.dataloader_train_and_valid = torch.utils.data.DataLoader(data_set_train_and_valid, batch_size = batch_size, shuffle = True)
self.dataloader_train_and_valid_full = torch.utils.data.DataLoader(data_set_train_and_valid, batch_size = len(data_set.iloc[:index_validate,:]), shuffle = True)
self.dataloader_test = torch.utils.data.DataLoader(data_set_test, batch_size = len(data_set.iloc[index_validate:,:]), shuffle = False)
self.feature_size = data_set.shape[1]-1
def get_best_model(self, num_trials, hyperparameters):
def train(model, optimizer, batch):
X, s, y = batch
X, s, y = X.cuda(), s.cuda(), y.cuda()
out = model(X)
loss = criterion(out, y.unsqueeze(1))
loss.backward()
optimizer.step()
optimizer.zero_grad()
return model, optimizer
list_of_accuracies = []
list_of_parameters = []
list_of_models = []
criterion = nn.BCELoss()
for trial in range(num_trials):
hidden = np.asscalar(np.random.choice(hyperparameters['hidden'], 1))
model = MLP(self.feature_size, hidden).cuda()
optimizer = torch.optim.Adam(model.parameters())
for i in range(15):
for batch in self.dataloader_train:
model, optimizer = train(model, optimizer, batch)
batch = next(iter(self.dataloader_valid))
X_valid, s_valid, y_valid = batch
X_valid, s_valid, y_valid = X_valid.cuda(), s_valid.cuda(), y_valid.cuda()
out = model(X_valid)
list_of_accuracies.append((((out.squeeze()>0.5).int() == y_valid.int()).sum().float()/len(X_valid)).detach().cpu().numpy())
list_of_parameters.append(hidden)
list_of_models.append(model)
best_parameter = list_of_parameters[np.argmax(list_of_accuracies)]
print(best_parameter)
#hidden = best_parameter
model = list_of_models[np.argmax(list_of_accuracies)]
self.model = model
def get_thresholds_and_predict(self, fairness):
batch = next(iter(self.dataloader_train_full))
X_train, s_train, y_train = batch
X_train, s_train, y_train = X_train.cuda(), s_train.cuda(), y_train.cuda()
X_train.requires_grad = True
out = self.model(X_train).squeeze()
predicted_probabilities_train_a = out[s_train == 0].detach().cpu().numpy()
predicted_probabilities_train_b = out[s_train == 1].detach().cpu().numpy()
threshold_a, threshold_b = ut.get_optimal_thresholds(predicted_probabilities_train_a, predicted_probabilities_train_b, y_train.cpu().numpy()*2-1, (s_train==0).cpu().numpy().astype(bool), (s_train==1).cpu().numpy().astype(bool), fairness)
train_statistics = ut.get_statistics(y_hat = out.squeeze().detach().cpu().numpy(),
y = y_train.cpu().numpy(),
s = s_train.cpu().numpy(),
threshold_s_0 = threshold_a,
threshold_s_1 = threshold_b)
out = sigmoid_inverse(out)
out.sum().backward()
X_pos_gradient = X_train.grad
norm_gradient = torch.sqrt(torch.sum((X_pos_gradient**2), axis= 1))
threshold = torch.ones(X_train.size(0)).cuda()
threshold[s_train == 0] = torch.tensor(threshold_a).cuda()
threshold[s_train == 1] = torch.tensor(threshold_b).cuda()
oben = torch.abs(sigmoid_inverse(threshold) - out).squeeze()
db_to_boundary = (oben)/(norm_gradient+ np.finfo(np.float32).tiny)
train_statistics['distances'] = db_to_boundary
batch = next(iter(self.dataloader_valid))
X_valid, s_valid, y_valid = batch
X_valid, s_valid, y_valid = X_valid.cuda(), s_valid.cuda(), y_valid.cuda()
out = self.model(X_valid).squeeze()
validation_statistics = ut.get_statistics(y_hat = out.squeeze().detach().cpu().numpy(),
y = y_valid.cpu().numpy(),
s = s_valid.cpu().numpy(),
threshold_s_0 = threshold_a,
threshold_s_1 = threshold_b)
batch = next(iter(self.dataloader_test))
X_test, s_test, y_test = batch
X_test, s_test, y_test = X_test.cuda(), s_test.cuda(), y_test.cuda()
out = self.model(X_test).squeeze()
test_statistics = ut.get_statistics(y_hat = out.squeeze().detach().cpu().numpy(),
y = y_test.cpu().numpy(),
s = s_test.cpu().numpy(),
threshold_s_0 = threshold_a,
threshold_s_1 = threshold_b)
return({'train_statistics' : train_statistics, 'validation_statistics': validation_statistics, 'test_statistics': test_statistics})
|
"""
MetaGenScope-CLI is used to upload data sets to the MetaGenScope web platform.
"""
import os
import sys
from setuptools import find_packages, setup
from setuptools.command.install import install
from metagenscope_cli import __version__
dependencies = [
'click',
'requests',
'configparser',
'pandas',
'datasuper==0.9.0',
]
dependency_links = [
'git+https://github.com/dcdanko/DataSuper.git@develop#egg=datasuper-0.9.0',
]
def readme():
"""Print long description."""
with open('README.md') as readme_file:
return readme_file.read()
class VerifyVersionCommand(install):
"""Custom command to verify that the git tag matches our version."""
description = 'Verify that the git tag matches our version.'
def run(self):
tag = os.getenv('CIRCLE_TAG')
if tag != 'v{0}'.format(__version__):
info = 'Git tag: {0} does not match the version of this app: {1}'
info = info.format(tag, __version__)
sys.exit(info)
setup(
name='metagenscope',
version=__version__,
url='https://github.com/bchrobot/python-metagenscope',
license='MIT',
author='Benjamin Chrobot',
author_email='benjamin.chrobot@alum.mit.edu',
description='MetaGenScope-CLI is used to upload data sets to the MetaGenScope web platform.',
long_description=readme(),
packages=find_packages(exclude=['tests']),
include_package_data=True,
zip_safe=False,
platforms='any',
install_requires=dependencies,
dependency_links=dependency_links,
entry_points={
'console_scripts': [
'metagenscope = metagenscope_cli.cli:main',
],
},
classifiers=[
# As from http://pypi.python.org/pypi?%3Aaction=list_classifiers
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX',
'Operating System :: MacOS',
'Operating System :: Unix',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Libraries :: Python Modules',
],
cmdclass={
'verify': VerifyVersionCommand,
},
)
|
import requests
from bs4 import BeautifulSoup
import json
from urllib import request, parse
import pandas as pd
import os
import time
import shutil
import csv
from pprint import pprint
import pymongo as pm
import datetime
class MongoOperator:
def __init__(self, host, port, db_name, default_collection): #mongo port:27017
self.client = pm.MongoClient(host=host, port=port) #建立資料庫連線
self.db = self.client.get_database(db_name) #選擇相應的資料庫名稱
self.collection = self.db.get_collection(default_collection) #設定預設的集合
def insert(self, item, collection_name =None): #增加資料 目錄,檔名
if collection_name != None:
collection = self.db.get_collection(self.db)
collection.insert(item)
else:
self.collection.insert(item)
def find(self, expression =None, collection_name=None): #查詢 expression:條件 collection_name:檔名
if collection_name != None:
collection = self.db.get_collection(self.db)
if expression == None:
return collection.find()
else:
return collection.find(expression)
else:
if expression == None:
return self.collection.find()
else:
return self.collection.find(expression)
def get_collection(self, collection_name=None):#查詢集合內容物
if collection_name == None:
return self.collection
else:
return self.get_collection(collection_name)
###把目標資料夾所有json傳進mongo
JsonFile_Path = r'E:\json' # 保存路徑
os.chdir(JsonFile_Path) #換工作路徑
file_list = os.listdir() #這個資料夾內所有的檔案名稱
file_json=[]
for file in file_list: #只讀json檔
if '.json' in file:
file_json.append(file)
for i in range(0, len(file_json)): # 讀所有檔案
with open('E:/json/%s'%(file_json[i]), 'r', encoding="utf-8-sig") as f: # 讀檔案
result_data=json.load(f) ##成功
db = MongoOperator('10.120.26.31',27017,'te','%s'%(file_json[i]))
db.insert(result_data)
for item in db.find():
print(item) |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
将结果写入web接口
"""
import os
BASEDIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
os.path.join(BASEDIR)
ERROR_LOG_FILE = os.path.join(BASEDIR, "log", 'error.log')
RUN_LOG_FILE = os.path.join(BASEDIR, "log", 'message.log')
# MQ_SERVER = "192.168.0.1"
# MQ_PORT = "1111"
API = "http://127.0.0.1:8000/cmdbapi"
APPID = 'Appid00002'
SECRETKEY = 'ThisIsSecuretKey0002'
VERSION = "v1"
KEY = '299095cc-1330-11e5-b06a-a45e60bec08b'
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-18 00:40
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('evesde', '0008_invcategory'),
]
operations = [
migrations.AlterField(
model_name='invcategory',
name='categoryName',
field=models.CharField(db_index=True, max_length=100, null=True),
),
migrations.AlterField(
model_name='invcategory',
name='iconID',
field=models.IntegerField(null=True),
),
]
|
"""Communicate from server to raspberry pi"""
import socket
import sys
import queue
import serial
import syslog
import time
import math
import threading
'''from TopsidesGlobals import GLOBALS
#import topsidesComms
# Change IP addresses for a production or development environment
if ((len(sys.argv) > 1) and (sys.argv[1] == "--dev")):
ipSend = GLOBALS['ipSend-5-dev']
ipHost = GLOBALS['ipHost-5-dev']
else:
ipSend = GLOBALS['ipSend-4']
ipHost = GLOBALS['ipHost']
portSend = GLOBALS['portSend-5']
portHost = GLOBALS['portHost']
received = queue.Queue()
# Try opening a socket for communication
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
except socket.error:
print("Failed To Create Socket")
sys.exit()
except Exception as e:
print("failed")
# Bind the ip and port of topsides to the socket and loop coms
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((ipHost, portHost))
# Queue to hold send commands to be read by simulator
simulator = queue.Queue()'''
# This function sends data to the ROV
def sendData(inputData):
global s
#s.sendto(inputData.encode('utf-8'), ("192.168.88.5", portSend))
# TESTING
def sendDataB(inputData):
global s
#s.sendto(inputData.encode('utf-8'), (ipSend, portSend))
# This function is constantly trying to receive data from the ROV
def receiveData(flag):
global s
while True:
outputData, addr = s.recvfrom(1024)
outputData = outputData.decode("utf-8")
if (outputData == "exit"):
break
received.put(outputData)
if flag.is_set():
break
def putMessage(msg):
sendData(msg)
simulator.put(msg, timeout=0.005)
def runThruster(tData):
for control in tData:
val = tData[control]
putMessage("runThruster.py " + str(GLOBALS["thrusterPorts"][control]) + " " + str(val))
print("good")
# TODO:
# sway
# yaw
# pitch
# heave
# roll
def sway(power):
good = False
try:
float(power)
good = True
except ValueError:
good = False
return False
tData = {
"fore-port-horz": power,
"fore-star-horz": -power,
"aft-port-horz": -power,
"aft-star-horz": power,
}
print("Send command")
runThruster(tData)
def yaw(power):
good = False
try:
float(power)
good = True
except ValueError:
good = False
return False
tData = {
"fore-port-horz": power,
"fore-star-horz": power,
"aft-port-horz": power,
"aft-star-horz": -power,
}
print("Send command")
runThruster(tData)
def heave(power):
good = False
try:
float(power)
good = True
except ValueError:
good = False
return False
tData = {
"fore-port-vert": power,
"fore-star-vert": power,
"aft-port-vert": -power,
"aft-star-vert": -power,
}
print("Send command")
runThruster(tData)
def pitch(power):
good = False
try:
float(power)
good = True
except ValueError:
good = False
return False
tData = {
"fore-port-vert": power,
"fore-star-vert": power,
"aft-port-vert": power,
"aft-star-vert": power,
}
print("Send command")
runThruster(tData)
def roll(power):
good = False
try:
float(power)
good = True
except ValueError:
good = False
return False
tData = {
"fore-port-vert": power,
"fore-star-vert": -power,
"aft-port-vert": -power,
"aft-star-vert": power,
}
print("Send command")
runThruster(tData)
def surge(power):
good = False
try:
float(power)
good = True
except ValueError:
good = False
return False
tData = {
"fore-port-horz": power,
"fore-star-horz": -power,
"aft-port-horz": -power,
"aft-star-horz": power,
}
print("Send command")
runThruster(tData)
|
# Copyright (c) 2020 Dell Inc. or its subsidiaries.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import contextlib
import json
import logging
from unittest import mock
from unittest import TestCase
import requests
import PyPowerFlex
from PyPowerFlex import utils
class MockResponse(requests.Response):
"""Mock HTTP Response.
Defines http replies from mocked calls to do_request().
"""
def __init__(self, content, status_code=200):
super(MockResponse, self).__init__()
self._content = content
self.request = mock.MagicMock()
self.status_code = status_code
def json(self, **kwargs):
return self._content
@property
def text(self):
if not isinstance(self._content, bytes):
return json.dumps(self._content)
return super(MockResponse, self).text
class PyPowerFlexTestCase(TestCase):
RESPONSE_MODE = (
collections.namedtuple('RESPONSE_MODE', 'Valid Invalid BadStatus')
(Valid='Valid', Invalid='Invalid', BadStatus='BadStatus')
)
BAD_STATUS_RESPONSE = MockResponse(
{
'errorCode': 500,
'message': 'Test default bad status',
}, 500
)
MOCK_RESPONSES = dict()
DEFAULT_MOCK_RESPONSES = {
RESPONSE_MODE.Valid: {
'/login': 'token',
'/version': '3.5',
'/logout': '',
},
RESPONSE_MODE.Invalid: {
'/version': '2.5',
},
RESPONSE_MODE.BadStatus: {
'/login': MockResponse(
{
'errorCode': 1,
'message': 'Test login bad status',
}, 400
),
'/version': MockResponse(
{
'errorCode': 2,
'message': 'Test version bad status',
}, 400
),
'/logout': MockResponse(
{
'errorCode': 3,
'message': 'Test logout bad status',
}, 400
)
}
}
__http_response_mode = RESPONSE_MODE.Valid
def setUp(self):
self.gateway_address = '1.2.3.4'
self.gateway_port = 443
self.username = 'admin'
self.password = 'admin'
self.client = PyPowerFlex.PowerFlexClient(self.gateway_address,
self.gateway_port,
self.username,
self.password,
log_level=logging.DEBUG)
self.get_mock = self.mock_object(requests,
'get',
side_effect=self.get_mock_response)
self.post_mock = self.mock_object(requests,
'post',
side_effect=self.get_mock_response)
utils.check_version = mock.MagicMock(return_value=False)
def mock_object(self, obj, attr_name, *args, **kwargs):
"""Use python mock to mock an object attribute.
Mocks the specified objects attribute with the given value.
Automatically performs 'addCleanup' for the mock.
"""
patcher = mock.patch.object(obj, attr_name, *args, **kwargs)
result = patcher.start()
self.addCleanup(patcher.stop)
return result
@contextlib.contextmanager
def http_response_mode(self, mode):
previous_response_mode, self.__http_response_mode = (
self.__http_response_mode, mode
)
yield
self.__http_response_mode = previous_response_mode
def get_mock_response(self, url, mode=None, *args, **kwargs):
if mode is None:
mode = self.__http_response_mode
api_path = url.split('/api')[1]
try:
if api_path == "/login":
response = self.RESPONSE_MODE.Valid[0]
elif api_path == "/logout":
response = self.RESPONSE_MODE.Valid[2]
else:
response = self.MOCK_RESPONSES[mode][api_path]
except KeyError:
try:
response = self.DEFAULT_MOCK_RESPONSES[mode][api_path]
except KeyError:
if mode == self.RESPONSE_MODE.BadStatus:
response = self.BAD_STATUS_RESPONSE
else:
raise Exception(
'Mock API Endpoint is not implemented: [{}]{}'.format(
mode, api_path
)
)
if not isinstance(response, MockResponse):
response = MockResponse(response, 200)
response.request.url = url
response.request.body = kwargs.get('data')
return response
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.