repo_name stringlengths 6 97 | path stringlengths 3 341 | text stringlengths 8 1.02M |
|---|---|---|
NickMonzillo/SmartCloud | SmartCloud/__init__.py | <filename>SmartCloud/__init__.py<gh_stars>1-10
from random import randint,choice
from os.path import isdir, isfile
from wordplay import tuplecount,separate, read_file
from utils import dir_freq, dir_list, read_dir, assign_colors, colorize, assign_fonts, fontsize
import pygame
class Cloud(object):
def __init__(self,width=500,height=500):
pygame.init()
pygame.font.init()
self.width = width
self.height = height
self.cloud = pygame.Surface((width,height))
self.used_pos = []
def render_word(self,word,size,color):
'''Creates a surface that contains a word.'''
pygame.font.init()
font = pygame.font.Font(None,size)
self.rendered_word = font.render(word,0,color)
self.word_size = font.size(word)
def plot_word(self,position):
'''Blits a rendered word on to the main display surface'''
posrectangle = pygame.Rect(position,self.word_size)
self.used_pos.append(posrectangle)
self.cloud.blit(self.rendered_word,position)
def collides(self,position,size):
'''Returns True if the word collides with another plotted word.'''
word_rect = pygame.Rect(position,self.word_size)
if word_rect.collidelistall(self.used_pos) == []:
return False
else:
return True
def expand(self,delta_width,delta_height):
'''Makes the cloud surface bigger. Maintains all word positions.'''
temp_surface = pygame.Surface((self.width + delta_width,self.height + delta_height))
(self.width,self.height) = (self.width + delta_width, self.height + delta_height)
temp_surface.blit(self.cloud,(0,0))
self.cloud = temp_surface
def smart_cloud(self,input,max_text_size=72,min_text_size=12,exclude_words = True):
'''Creates a word cloud using the input.
Input can be a file, directory, or text.
Set exclude_words to true if you want to eliminate words that only occur once.'''
self.exclude_words = exclude_words
if isdir(input):
self.directory_cloud(input,max_text_size,min_text_size)
elif isfile(input):
text = read_file(input)
self.text_cloud(text,max_text_size,min_text_size)
elif isinstance(input, basestring):
self.text_cloud(input,max_text_size,min_text_size)
else:
print 'Input type not supported.'
print 'Supported types: String, Directory, .txt file'
def directory_cloud(self,directory,max_text_size=72,min_text_size=12,expand_width=50,expand_height=50,max_count=100000):
'''Creates a word cloud using files from a directory.
The color of the words correspond to the amount of documents the word occurs in.'''
worddict = assign_fonts(tuplecount(read_dir(directory)),max_text_size,min_text_size,self.exclude_words)
sorted_worddict = list(reversed(sorted(worddict.keys(), key=lambda x: worddict[x])))
colordict = assign_colors(dir_freq(directory))
num_words = 0
for word in sorted_worddict:
self.render_word(word,worddict[word],colordict[word])
if self.width < self.word_size[0]:
#If the word is bigger than the surface, expand the surface.
self.expand(self.word_size[0]-self.width,0)
elif self.height < self.word_size[1]:
self.expand(0,self.word_size[1]-self.height)
position = [randint(0,self.width-self.word_size[0]),randint(0,self.height-self.word_size[1])]
#the initial position is determined
loopcount = 0
while self.collides(position,self.word_size):
if loopcount > max_count:
#If it can't find a position for the word, create a bigger cloud.
self.expand(expand_width,expand_height)
loopcount = 0
position = [randint(0,self.width-self.word_size[0]),randint(0,self.height-self.word_size[1])]
loopcount += 1
self.plot_word(position)
num_words += 1
def text_cloud(self,text,max_text_size=72,min_text_size=12,expand_width=50,expand_height=50,max_count=100000):
'''Creates a word cloud using plain text.'''
worddict = assign_fonts(tuplecount(text),max_text_size,min_text_size,self.exclude_words)
sorted_worddict = list(reversed(sorted(worddict.keys(), key=lambda x: worddict[x])))
for word in sorted_worddict:
self.render_word(word,worddict[word],(randint(0,255),randint(0,255),randint(0,255)))
if self.width < self.word_size[0]:
#If the word is bigger than the surface, expand the surface.
self.expand(self.word_size[0]-self.width,0)
elif self.height < self.word_size[1]:
self.expand(0,self.word_size[1]-self.height)
position = [randint(0,self.width-self.word_size[0]),randint(0,self.height-self.word_size[1])]
loopcount = 0
while self.collides(position,self.word_size):
if loopcount > max_count:
#If it can't find a position for the word, expand the cloud.
self.expand(expand_width,expand_height)
loopcount = 0
position = [randint(0,self.width-self.word_size[0]),randint(0,self.height-self.word_size[1])]
loopcount += 1
self.plot_word(position)
def display(self):
'''Displays the word cloud to the screen.'''
pygame.init()
self.display = pygame.display.set_mode((self.width,self.height))
self.display.blit(self.cloud,(0,0))
pygame.display.update()
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
return
def save(self,filename):
'''Saves the cloud to a file.'''
pygame.image.save(self.cloud,filename)
|
NickMonzillo/SmartCloud | SmartCloud/wordplay.py | def separate(text):
'''Takes text and separates it into a list of words'''
alphabet = 'abcdefghijklmnopqrstuvwxyz'
words = text.split()
standardwords = []
for word in words:
newstr = ''
for char in word:
if char in alphabet or char in alphabet.upper():
newstr += char
if newstr != '':
standardwords.append(newstr)
return map(lambda x: x.lower(),standardwords)
def read_file(filename):
'''Reads in a .txt file.'''
with open(filename,'r') as f:
content = f.read()
return content
def eliminate_repeats(text):
'''Returns a list of words that occur in the text. Eliminates stopwords.'''
bannedwords = read_file('stopwords.txt')
alphabet = 'abcdefghijklmnopqrstuvwxyz'
words = text.split()
standardwords = []
for word in words:
newstr = ''
for char in word:
if char in alphabet or char in alphabet.upper():
newstr += char
if newstr not in standardwords and newstr != '' and newstr not in bannedwords:
standardwords.append(newstr)
return map(lambda x: x.lower(),standardwords)
def wordcount(text):
'''Returns the count of the words in a file.'''
bannedwords = read_file('stopwords.txt')
wordcount = {}
separated = separate(text)
for word in separated:
if word not in bannedwords:
if not wordcount.has_key(word):
wordcount[word] = 1
else:
wordcount[word] += 1
return wordcount
def tuplecount(text):
'''Changes a dictionary into a list of tuples.'''
worddict = wordcount(text)
countlist = []
for key in worddict.keys():
countlist.append((key,worddict[key]))
countlist = list(reversed(sorted(countlist,key = lambda x: x[1])))
return countlist
|
NickMonzillo/SmartCloud | setup.py | <gh_stars>1-10
from distutils.core import setup
setup(
name = 'SmartCloud',
packages = ['SmartCloud'],
author_email = '<EMAIL>',
url = 'https://github.com/NickMonzillo/SmartCloud',
version = '1.0.1',
description = 'Text visualization using word frequencies.',
author='<NAME>',
install_requires = ['Pygame'],
classifers=[
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Topic :: Text Processing',
'License :: OSI Approved :: MIT License'
]
)
|
NickMonzillo/SmartCloud | SmartCloud/utils.py | <reponame>NickMonzillo/SmartCloud<gh_stars>1-10
from os import listdir
from wordplay import eliminate_repeats, read_file
def dir_freq(directory):
'''Returns a list of tuples of (word,# of directories it occurs)'''
content = dir_list(directory)
i = 0
freqdict = {}
for filename in content:
filewords = eliminate_repeats(read_file(directory + '/' + filename))
for word in filewords:
if freqdict.has_key(word):
freqdict[word] += 1
else:
freqdict[word] = 1
tupleize = []
for key in freqdict.keys():
wordtuple = (key,freqdict[key])
tupleize.append(wordtuple)
return tupleize
def dir_list(directory):
'''Returns the list of all files in the directory.'''
try:
content = listdir(directory)
return content
except WindowsError as winErr:
print("Directory error: " + str((winErr)))
def read_dir(directory):
'''Returns the text of all files in a directory.'''
content = dir_list(directory)
text = ''
for filename in content:
text += read_file(directory + '/' + filename)
text += ' '
return text
def assign_colors(dir_counts):
'''Defines the color of a word in the cloud.
Counts is a list of tuples in the form (word,occurences)
The more files a word occurs in, the more red it appears in the cloud.'''
frequencies = map(lambda x: x[1],dir_counts)
words = map(lambda x: x[0],dir_counts)
maxoccur = max(frequencies)
minoccur = min(frequencies)
colors = map(lambda x: colorize(x,maxoccur,minoccur),frequencies)
color_dict = dict(zip(words,colors))
return color_dict
def colorize(occurence,maxoccurence,minoccurence):
'''A formula for determining colors.'''
if occurence == maxoccurence:
color = (255,0,0)
elif occurence == minoccurence:
color = (0,0,255)
else:
color = (int((float(occurence)/maxoccurence*255)),0,int(float(minoccurence)/occurence*255))
return color
def assign_fonts(counts,maxsize,minsize,exclude_words):
'''Defines the font size of a word in the cloud.
Counts is a list of tuples in the form (word,count)'''
valid_counts = []
if exclude_words:
for i in counts:
if i[1] != 1:
valid_counts.append(i)
else:
valid_counts = counts
frequencies = map(lambda x: x[1],valid_counts)
words = map(lambda x: x[0],valid_counts)
maxcount = max(frequencies)
font_sizes = map(lambda x:fontsize(x,maxsize,minsize,maxcount),frequencies)
size_dict = dict(zip(words, font_sizes))
return size_dict
def fontsize(count,maxsize,minsize,maxcount):
'''A formula for determining font sizes.'''
size = int(maxsize - (maxsize)*((float(maxcount-count)/maxcount)))
if size < minsize:
size = minsize
return size
|
zgw426/OpenCV_SamplesForMyself | opcvCapImg01_drawID_outImg.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import cv2
from PIL import Image
targetImg = "test-img02.png"
aruco = cv2.aruco
dictionary = aruco.getPredefinedDictionary(aruco.DICT_4X4_50)
outputImg = "edit01-" + targetImg[0:-4]+".png"
def arReader( argTargetImg , argOutputImg ):
img = cv2.imread( argTargetImg )
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
corners, ids, rejectedImgPoints = aruco.detectMarkers(img, dictionary) #マーカを検出
# draw ID
aruco.drawDetectedMarkers(img, corners, ids, (0,255,0)) #検出したマーカに描画する
img = Image.fromarray(img)
img.save( argOutputImg )
arReader( targetImg , outputImg )
|
zgw426/OpenCV_SamplesForMyself | opcvCapVideo05_drawAxis_outMP4.py | <filename>opcvCapVideo05_drawAxis_outMP4.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import cv2
import sys
import cv2.aruco as aruco
import numpy as np
#targetVideo = 0 # カメラデバイス
#targetVideo = "test-video.mp4" # 動画
#targetVideo = "http://{IP Address}:8090/?action=stream" # MJPG-Streamer
targetVideo = "test-video.mp4"
cap = cv2.VideoCapture( targetVideo )
outputVideo = "editV05.mp4"
# Set AR Marker
aruco_dict = aruco.getPredefinedDictionary(aruco.DICT_4X4_50)
# 幅,高さ取得
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
size = (width, height)
#フレームレート(1フレームの時間単位はミリ秒)の取得
frame_rate = int(cap.get(cv2.CAP_PROP_FPS))
# 保存用
fmt = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
writer = cv2.VideoWriter( outputVideo, fmt, frame_rate, size)
parameters = aruco.DetectorParameters_create()
# CORNER_REFINE_NONE, no refinement. CORNER_REFINE_SUBPIX, do subpixel refinement. CORNER_REFINE_CONTOUR use contour-Points
parameters.cornerRefinementMethod = aruco.CORNER_REFINE_CONTOUR
cameraMatrix = np.array(
[[1.42068235e+03,0.00000000e+00,9.49208512e+02],
[0.00000000e+00,1.37416685e+03,5.39622051e+02],
[0.00000000e+00,0.00000000e+00,1.00000000e+00]] )
distCoeffs = np.array( [1.69926613e-01,-7.40003491e-01,-7.45655262e-03,-1.79442353e-03, 2.46650225e+00] )
while cap.isOpened():
ret, frame = cap.read()
# Check if frame is not empty
if frame is None :
break
parameters = aruco.DetectorParameters_create()
corners, ids, _ = aruco.detectMarkers(frame, aruco_dict, parameters=parameters)
frame = aruco.drawDetectedMarkers(frame, corners, ids)
rvecs, tvecs, _objPoints = aruco.estimatePoseSingleMarkers(corners, 0.05, cameraMatrix, distCoeffs)
if ids is not None:
for i in range( ids.size ):
aruco.drawAxis(frame, cameraMatrix, distCoeffs, rvecs[i], tvecs[i], 0.1)
# Display the resulting frame
cv2.imshow('frame', frame)
# Output as video data
writer.write(frame)
if not ret:
continue
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release everything:
writer.release()
cap.release()
cv2.destroyAllWindows()
|
zgw426/OpenCV_SamplesForMyself | opcv_outputARmark01.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ↓ $ pip install opencv-contrib-python
import cv2
aruco = cv2.aruco
dictionary = aruco.getPredefinedDictionary(aruco.DICT_4X4_50)
def arGenerator():
fileName = "ar.png"
generator = aruco.drawMarker(dictionary, 0, 150) # 0: ID番号,150x150ピクセル
cv2.imwrite(fileName, generator)
img = cv2.imread(fileName)
arGenerator()
|
zgw426/OpenCV_SamplesForMyself | opcvCapImg07_drawIMG_outImg.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
from PIL import Image
import cv2
targetImg = "test-img02.png"
mappingImg = "test-mapping.png"
aruco = cv2.aruco
dictionary = aruco.getPredefinedDictionary(aruco.DICT_4X4_50)
outputImg = "edit07-" + targetImg[0:-4]+".png"
aruco_dict = aruco.Dictionary_get(aruco.DICT_4X4_50) # ARマーカー
parameters = aruco.DetectorParameters_create()
def arReader():
img = cv2.imread( targetImg ) # ARマーカーを含む画像
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
im_src = cv2.imread( mappingImg ) # マッピングする画像
im_src = cv2.cvtColor(im_src, cv2.COLOR_BGR2RGB)
# ARマーカを検出
## type(ids)= <class 'numpy.ndarray'> ※ARマーカ―検出
## type(ids)= <class 'NoneType'> ※ARマーカ―未検出
## corners: 検出した各ARマーカーの4隅の座標
corners, ids, _ = aruco.detectMarkers(img, aruco_dict, parameters=parameters)
if np.all(ids != None):
# 検出したARマーカーの数ループする
for c in corners :
x1 = (c[0][0][0], c[0][0][1])
x2 = (c[0][1][0], c[0][1][1])
x3 = (c[0][2][0], c[0][2][1])
x4 = (c[0][3][0], c[0][3][1])
size = im_src.shape
pts_dst = np.array([x1, x2, x3, x4])
pts_src = np.array(
[
[0,0],
[size[1] - 1, 0],
[size[1] - 1, size[0] -1],
[0, size[0] - 1 ]
],dtype=float
)
h, status = cv2.findHomography(pts_src, pts_dst)
temp = cv2.warpPerspective(im_src.copy(), h, (img.shape[1], img.shape[0]))
cv2.fillConvexPoly(img, pts_dst.astype(int), 0, 16)
img = cv2.add(img , temp)
aruco.drawDetectedMarkers(img, corners, ids, (255,0,0)) #検出したマーカに,マッピング用の画像を描画する
img = Image.fromarray(img)
img.save( outputImg )
arReader()
|
zgw426/OpenCV_SamplesForMyself | opcv_outputARmark02.py | <reponame>zgw426/OpenCV_SamplesForMyself
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import cv2
# ArUcoのライブラリを導入
aruco = cv2.aruco
# 4x4のマーカー,ID番号は50までの辞書を使う
dictionary = aruco.getPredefinedDictionary(aruco.DICT_4X4_50)
def main():
# 10枚のマーカーを作るために10回繰り返す
for i in range(10):
ar_image = aruco.drawMarker(dictionary, i, 150) # i: ID番号,150x150ピクセル.
fileName = "ar" + str(i).zfill(2) + ".png" # ファイル名を "ar0x.png" の様に作る
cv2.imwrite(fileName, ar_image) # マーカー画像を保存する
if __name__ == "__main__":
main()
|
zgw426/OpenCV_SamplesForMyself | opcvCapImg08_drawAxis_outImg.py | <reponame>zgw426/OpenCV_SamplesForMyself<filename>opcvCapImg08_drawAxis_outImg.py<gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import cv2
from PIL import Image
import numpy as np
targetImg = "test-img02.png"
aruco = cv2.aruco
outputImg = "edit08-" + targetImg[0:-4]+".png"
dictionary = aruco.getPredefinedDictionary(aruco.DICT_4X4_50)
parameters = aruco.DetectorParameters_create()
# CORNER_REFINE_NONE, no refinement. CORNER_REFINE_SUBPIX, do subpixel refinement. CORNER_REFINE_CONTOUR use contour-Points
parameters.cornerRefinementMethod = aruco.CORNER_REFINE_CONTOUR
cameraMatrix = np.array(
[[1.42068235e+03,0.00000000e+00,9.49208512e+02],
[0.00000000e+00,1.37416685e+03,5.39622051e+02],
[0.00000000e+00,0.00000000e+00,1.00000000e+00]] )
distCoeffs = np.array( [1.69926613e-01,-7.40003491e-01,-7.45655262e-03,-1.79442353e-03, 2.46650225e+00] )
def arReader():
img = cv2.imread( targetImg )
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
corners, ids, rejectedImgPoints = aruco.detectMarkers(img, dictionary) #マーカを検出
# draw ID
#aruco.drawDetectedMarkers(img, corners, ids, (0,255,0)) #検出したマーカに描画する
rvecs, tvecs, _objPoints = aruco.estimatePoseSingleMarkers(corners, 0.05, cameraMatrix, distCoeffs)
if ids is not None:
for i in range( ids.size ):
aruco.drawAxis(img, cameraMatrix, distCoeffs, rvecs[i], tvecs[i], 0.1)
img = Image.fromarray(img)
img.save( outputImg )
arReader()
|
zgw426/OpenCV_SamplesForMyself | opcvCapVideo04_drawText_outMP4.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import cv2
import sys
import cv2.aruco as aruco
import numpy as np
#targetVideo = 0 # カメラデバイス
#targetVideo = "test-video.mp4" # 動画
#targetVideo = "http://{IP Address}:8090/?action=stream" # MJPG-Streamer
targetVideo = "test-video.mp4"
cap = cv2.VideoCapture( targetVideo )
outputVideo = "editV04.mp4"
# Set AR Marker
aruco_dict = aruco.getPredefinedDictionary(aruco.DICT_4X4_50)
# 幅,高さ取得
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
size = (width, height)
#フレームレート(1フレームの時間単位はミリ秒)の取得
frame_rate = int(cap.get(cv2.CAP_PROP_FPS))
# 保存用
fmt = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
writer = cv2.VideoWriter( outputVideo, fmt, frame_rate, size)
parameters = aruco.DetectorParameters_create()
# CORNER_REFINE_NONE, no refinement. CORNER_REFINE_SUBPIX, do subpixel refinement. CORNER_REFINE_CONTOUR use contour-Points
parameters.cornerRefinementMethod = aruco.CORNER_REFINE_CONTOUR
while cap.isOpened():
ret, frame = cap.read()
# Check if frame is not empty
if frame is None :
break
parameters = aruco.DetectorParameters_create()
corners, ids, _ = aruco.detectMarkers(frame, aruco_dict, parameters=parameters)
frame = aruco.drawDetectedMarkers(frame, corners, ids)
for i, corner in enumerate( corners ):
points = corner[0].astype(np.int32)
cv2.putText(frame, str("A"), tuple(points[0]), cv2.FONT_HERSHEY_PLAIN, 1,(0,0,0), 1)
cv2.putText(frame, str("B"), tuple(points[1]), cv2.FONT_HERSHEY_PLAIN, 1,(0,0,0), 1)
cv2.putText(frame, str("C"), tuple(points[2]), cv2.FONT_HERSHEY_PLAIN, 1,(0,0,0), 1)
cv2.putText(frame, str("D"), tuple(points[3]), cv2.FONT_HERSHEY_PLAIN, 1,(0,0,0), 1)
# テキスト描画
# arg1 : cv.imread で開いた画像
# arg2 : 文字列
# arg3 : 文字列の位置(文字列の左下)
# arg4 : フォントスタイル CV_FONT_HERSHEY_PLAIN, CV_FONT_HERSHEY_SCRIPT_SIMPLEX など
# arg5 : フォントサイズ
# arg6 : フォントの色
# arg7 : フォントの太さ
# arg8 : lineType 4, 8, cv.LINE_AA の3択
# Display the resulting frame
cv2.imshow('frame', frame)
# Output as video data
writer.write(frame)
if not ret:
continue
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release everything:
writer.release()
cap.release()
cv2.destroyAllWindows()
|
zgw426/OpenCV_SamplesForMyself | opcvCapImg04_drawRectangle_outImg.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import cv2
from PIL import Image
import numpy as np
targetImg = "test-img02.png"
aruco = cv2.aruco
dictionary = aruco.getPredefinedDictionary(aruco.DICT_4X4_50)
outputImg = "edit04-" + targetImg[0:-4]+".png"
def arReader( argTargetImg , argOutputImg ):
img = cv2.imread( argTargetImg )
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
corners, ids, rejectedImgPoints = aruco.detectMarkers(img, dictionary) #マーカを検出
for i, corner in enumerate( corners ):
points = corner[0].astype(np.int32)
cv2.rectangle(img,tuple(points[0]),tuple(points[1]),(200, 0, 0),3)
cv2.rectangle(img,tuple(points[1]),tuple(points[2]),( 0,200, 0),3)
cv2.rectangle(img,tuple(points[2]),tuple(points[3]),( 0, 0,200),3)
cv2.rectangle(img,tuple(points[3]),tuple(points[0]),(100,100, 0),3)
img = Image.fromarray(img)
img.save( argOutputImg )
arReader( targetImg , outputImg )
|
zgw426/OpenCV_SamplesForMyself | opcvCapImg02_drawText_outImg.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import cv2
from PIL import Image
import numpy as np
targetImg = "test-img02.png"
aruco = cv2.aruco
dictionary = aruco.getPredefinedDictionary(aruco.DICT_4X4_50)
outputImg = "edit02-" + targetImg[0:-4]+".png"
def arReader( argTargetImg , argOutputImg ):
img = cv2.imread( argTargetImg )
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
corners, ids, rejectedImgPoints = aruco.detectMarkers(img, dictionary) #マーカを検出
# draw ID
aruco.drawDetectedMarkers(img, corners, ids, (0,255,0)) #検出したマーカに描画する
for i, corner in enumerate( corners ):
points = corner[0].astype(np.int32)
# マーカーの輪郭
cv2.polylines(img, [points], True, (255,0,0))
cv2.putText(img, str(points[0]), tuple(points[0]), cv2.FONT_HERSHEY_PLAIN, 1,(100,0,100), 2)
cv2.putText(img, str(points[1]), tuple(points[1]), cv2.FONT_HERSHEY_PLAIN, 1,(0,255,0), 2)
cv2.putText(img, str(points[2]), tuple(points[2]), cv2.FONT_HERSHEY_PLAIN, 1,(0,0,255), 2)
cv2.putText(img, str(points[3]), tuple(points[3]), cv2.FONT_HERSHEY_PLAIN, 1,(255,0,0), 2)
# テキスト描画
# arg1 : cv.imread で開いた画像
# arg2 : 文字列
# arg3 : 文字列の位置(文字列の左下)
# arg4 : フォントスタイル CV_FONT_HERSHEY_PLAIN, CV_FONT_HERSHEY_SCRIPT_SIMPLEX など
# arg5 : フォントサイズ
# arg6 : フォントの色
# arg7 : フォントの太さ
# arg8 : lineType 4, 8, cv.LINE_AA の3択
img = Image.fromarray(img)
img.save( argOutputImg )
arReader( targetImg , outputImg )
|
zgw426/OpenCV_SamplesForMyself | opcvCapImg06_drawPolylines_outImg.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import cv2
from PIL import Image
import numpy as np
targetImg = "test-img02.png"
aruco = cv2.aruco
dictionary = aruco.getPredefinedDictionary(aruco.DICT_4X4_50)
outputImg = "edit06-" + targetImg[0:-4]+".png"
def arReader( argTargetImg , argOutputImg ):
img = cv2.imread( argTargetImg )
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
corners, ids, rejectedImgPoints = aruco.detectMarkers(img, dictionary) #マーカを検出
for i, corner in enumerate( corners ):
points = corner[0].astype(np.int32)
# [Open CV] cv2.polylines(多角形)
# http://labs.eecs.tottori-u.ac.jp/sd/Member/oyamada/OpenCV/html/py_tutorials/py_gui/py_drawing_functions/py_drawing_functions.html
pts = np.array(
[
tuple(points[0]),
tuple(points[1]),
tuple(points[2]),
tuple(points[3]),
tuple(points[0]),
tuple(points[2]),
tuple(points[3]),
tuple(points[1]),
], np.int32)
pts = pts.reshape((-1,1,2))
cv2.polylines(img,[pts],True,(0,255,255))
img = Image.fromarray(img)
img.save( argOutputImg )
arReader( targetImg , outputImg )
|
zgw426/OpenCV_SamplesForMyself | opcvCapVideo07_drawVideo_outMP4.py | <reponame>zgw426/OpenCV_SamplesForMyself
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import cv2
import sys
import cv2.aruco as aruco
import numpy as np
#targetVideo = 0 # カメラデバイス
#targetVideo = "test-video.mp4" # 動画
#targetVideo = "http://{IP Address}:8090/?action=stream" # MJPG-Streamer
targetVideo = "test-video.mp4"
#mappingVideo= 0 # カメラデバイス
#mappingVideo= "test-mapping.mp4" # 動画
#mappingVideo= "http://{IP Address}:8090/?action=stream" # MJPG-Streamer
mappingVideo= "test-mapping.mp4"
outputVideo= "editV07.mp4"
aruco_dict = aruco.Dictionary_get(aruco.DICT_4X4_50) # ARマーカー
cap = cv2.VideoCapture(targetVideo)
# 幅,高さ取得
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
size = (width, height)
#フレームレート(1フレームの時間単位はミリ秒)の取得
frame_rate = int(cap.get(cv2.CAP_PROP_FPS))
# 保存用
fmt = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
writer = cv2.VideoWriter( outputVideo, fmt, frame_rate, size)
fn = 0
map_vid = cv2.VideoCapture(mappingVideo)
while cap.isOpened():
fn += 1
if fn >= map_vid.get(cv2.CAP_PROP_FRAME_COUNT):
fn = 0
map_vid.set(cv2.CAP_PROP_POS_FRAMES, fn)
ret_v, im_src = map_vid.read()
# Capture frame-by-frame
ret, frame = cap.read()
if frame is None :
break
scale_percent = 100 # percent of original size
width = int(frame.shape[1] * scale_percent / 100)
height = int(frame.shape[0] * scale_percent / 100)
dim = (width, height)
frame = cv2.resize(frame, dim, interpolation = cv2.INTER_AREA)
org_frame = frame
# Check if frame is not empty
if not ret_v:
cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
parameters = aruco.DetectorParameters_create()
corners, ids, _ = aruco.detectMarkers(frame, aruco_dict, parameters=parameters)
if np.all(ids != None):
for c in corners :
x1 = (c[0][0][0], c[0][0][1])
x2 = (c[0][1][0], c[0][1][1])
x3 = (c[0][2][0], c[0][2][1])
x4 = (c[0][3][0], c[0][3][1])
im_dst = frame
size = im_src.shape
pts_dst = np.array([x1, x2, x3, x4])
pts_src = np.array(
[
[0,0],
[size[1] - 1, 0],
[size[1] - 1, size[0] -1],
[0, size[0] - 1 ]
],dtype=float
)
h, status = cv2.findHomography(pts_src, pts_dst)
#project corners into frame
temp = cv2.warpPerspective(im_src.copy(), h, (org_frame.shape[1], org_frame.shape[0]))
cv2.fillConvexPoly(org_frame, pts_dst.astype(int), 0, 16)
org_frame = cv2.add(org_frame, temp)
cv2.imshow('frame', org_frame)
# Output as video data
writer.write(frame)
else:
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release everything:
writer.release()
cap.release()
cv2.destroyAllWindows()
|
zgw426/OpenCV_SamplesForMyself | opcvCapVideo02_drawID_outMP4.py | <gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import cv2
import sys
import cv2.aruco as aruco
#targetVideo = 0 # カメラデバイス
#targetVideo = "test-video.mp4" # 動画
#targetVideo = "http://{IP Address}:8090/?action=stream" # MJPG-Streamer
targetVideo = "test-video.mp4"
outputVideo = "editV02.mp4"
cap = cv2.VideoCapture( targetVideo )
# Set AR Marker
aruco_dict = aruco.getPredefinedDictionary(aruco.DICT_4X4_50)
# 幅,高さ取得
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
size = (width, height)
#フレームレート(1フレームの時間単位はミリ秒)の取得
frame_rate = int(cap.get(cv2.CAP_PROP_FPS))
# 保存用
fmt = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
writer = cv2.VideoWriter( outputVideo, fmt, frame_rate, size)
while cap.isOpened():
ret, frame = cap.read()
# Check if frame is not empty
if frame is None :
break
parameters = aruco.DetectorParameters_create()
corners, ids, _ = aruco.detectMarkers(frame, aruco_dict, parameters=parameters)
frame = aruco.drawDetectedMarkers(frame, corners, ids)
scale_percent = 100 # percent of original size
width = int(frame.shape[1] * scale_percent / 100)
height = int(frame.shape[0] * scale_percent / 100)
dim = (width, height)
# resize image
frame = cv2.resize(frame, dim, interpolation = cv2.INTER_AREA)
# Display the resulting frame
cv2.imshow('frame', frame)
# Output as video data
writer.write(frame)
if not ret:
continue
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release everything:
print("hoge")
writer.release()
cap.release()
cv2.destroyAllWindows()
|
ayushpetigara/Guardian-Angel | app.py | <filename>app.py
from flask import Flask, render_template, request, url_for, redirect, Markup, jsonify, make_response, send_from_directory, session
import os
is_prod = os.environ.get('IS_HEROKU', None)
longitudeHistory = [None]
latitudeHistory = [None]
if is_prod == None:
from keys import *
else:
API_KEY = os.environ.get('GOOGLE_KEY', None)
AGORA_KEY = os.environ.get('AGORA_KEY', None)
FIREBASE_KEY = os.environ.get('FIREBASE_KEY', None)
app = Flask(__name__, static_url_path='/static')
@app.route('/', methods=['GET'])
def index():
# return render_template("map.html", API_KEY=API_KEY, AGORA_KEY=AGORA_KEY)
return render_template('index.html', API_KEY = API_KEY)
@app.route('/walker', methods=['GET'])
def select_screen():
# return render_template("map.html", API_KEY=API_KEY, AGORA_KEY=AGORA_KEY)
return render_template('walk.html', API_KEY = API_KEY, AGORA_KEY=AGORA_KEY)
@app.route('/sendText', methods=['GET'])
def send_text():
# return render_template("map.html", API_KEY=API_KEY, AGORA_KEY=AGORA_KEY)
message = """Chris has requested a guardian angel to overlook his walk home. Check out: https://guardianangels.herokuapp.com/mySite?person=Chris&location=Home"""
os.system("lib messagebird.sms.create --recipient 18646097067 --body {}".format(message))
return {"status": True}
@app.route('/longLat', methods=['GET'])
def get_long_lat():
longitude = request.args.get('long')
latitude = request.args.get('lat')
print("Long: {} | Lat: {}".format(longitude, latitude))
longitudeHistory.append(longitude)
latitudeHistory.append(latitude)
return jsonify({"status": True})
@app.route('/guardian', methods=['GET'])
def get_guardian():
#print("Long: {} | Lat: {}".format(longitude, latitude))
longitude = longitudeHistory
latitude = latitudeHistory
return render_template("guardian.html", LATITUDE=latitude, LONGITUDE=longitude, AGORA_KEY=AGORA_KEY)
@app.route('/mySite', methods=['GET'])
def my_site():
#print("Long: {} | Lat: {}".format(longitude, latitude))
return render_template("mySite.html", API_KEY = API_KEY, AGORA_KEY=AGORA_KEY)
@app.route('/audio', methods=['GET'])
def get_audio():
return render_template("audio.html", AGORA_KEY=AGORA_KEY)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000)
|
Saevon/syncrae | management/commands/runsyncrae.py | from django.core.management.base import BaseCommand
# optparse AND argparse???
from django.conf import settings
from webdnd.syncrae.config.log import setup as logging_setup
from webdnd.syncrae.websocket import get_app
import argparse
import logging
import tornado.ioloop
def cmdline(args=None, version=''):
parser = argparse.ArgumentParser(
prog='Syncrae',
version='%(prog)s ' + version,
)
parser.add_argument(
'-l', '--log-level',
dest='SYNCRAE_LOG_LEVEL',
nargs=1,
action='store',
default=False,
help='Changes the lowest logged level',
metavar='level',
)
parser.add_argument(
'-f', '--log-file',
dest='SYNCRAE_LOG_FILE',
nargs=1,
action='store',
default=False,
help='Selects where to output the logs',
metavar='file',
)
parser.add_argument(
'-o', '--host', '--open',
dest='SYNCRAE_HOST',
action='store_true',
default=False,
help='Enables outside hosting (0.0.0.0)',
)
# Argument Parsing
out = parser.parse_args(args)
return out
class Command(BaseCommand):
'''
Removes expired tokens
'''
can_import_settings = True
def run_from_argv(self, argv):
# Get rid of 'manage.py', 'syncrae'
argv = argv[2:]
args = cmdline(args=argv, version=settings.SYNCRAE_VERSION)
if args.SYNCRAE_LOG_FILE:
settings.SYNCRAE_LOG_FILE = args.SYNCRAE_LOG_FILE[0]
level = args.SYNCRAE_LOG_LEVEL
if level:
level = level[0]
try:
level = int(level)
except ValueError:
pass
settings.SYNCRAE_LOG_LEVEL = level
settings.SYNCRAE_HOST = '0.0.0.0' if args.SYNCRAE_HOST else '127.0.0.1'
self.handle()
def handle(self, *args, **options):
logging_setup(level=settings.SYNCRAE_LOG_LEVEL)
self.runserver()
def runserver(self):
import tornado.options
# Intro Message
import textwrap
logging.info(textwrap.dedent("""
Syncrae ~ v%(version)s
Debug: %(debug)s
Server is running at http://%(host)s:%(port)s/
Quit the server with CTRL-C.
""" % {
'version': settings.SYNCRAE_VERSION,
'debug': settings.DEBUG,
'port': settings.SYNCRAE_PORT,
'host': settings.SYNCRAE_HOST,
}))
get_app({
'debug': settings.DEBUG,
'cookie_secret': settings.SECRET_KEY,
'host': settings.SYNCRAE_HOST,
}).listen(settings.SYNCRAE_PORT)
# Allow Ctrl-C to stop the server without the error traceback
try:
tornado.ioloop.IOLoop.instance().start()
except (KeyboardInterrupt, SystemExit):
tornado.ioloop.IOLoop.instance().stop()
# Remove the ^C that appears on the same line as your terminal input
print("")
finally:
tornado.ioloop.IOLoop.instance().close()
|
Saevon/syncrae | events/queue.py | <gh_stars>0
from collections import defaultdict
from syncrae.events.event import Event
from syncrae.utils.decorators import cascade
class Queue(object):
queues = {}
def __init__(self, id):
self._all = set()
self.id = id
# Make sure the factory still works
self.queues[id] = self
@classmethod
def get(cls, id):
if not id in cls.queues.keys():
return cls(id)
return cls.queues[id]
@classmethod
def remove(cls, id):
if id in cls.queues.keys():
del cls.queues[id]
@cascade
def write_message(self, topic, data):
listeners = self.listeners(topic)
event = Event(topic, data)
# send message to all listeners
for listener in listeners:
event.write_message(listener)
def listeners(self, topic):
listeners = self._all
return listeners
@cascade
def listen(self, obj):
self._all.add(obj)
@cascade
def drop(self, obj):
self._all.remove(obj)
if self.is_empty():
self.remove(self.id)
def is_empty(self):
return len(self._all) == 0
class CampaignQueue(Queue):
queues = {}
def __init__(self, id):
super(CampaignQueue, self).__init__(id)
self._listeners = defaultdict(set)
self._users = {}
def listeners(self, topic):
listeners = super(CampaignQueue, self).listeners(topic)
listeners.update(set(self._listeners[topic]))
return listeners
@cascade
def listen(self, obj, topics=True):
self._users[obj.user.id] = obj
if topics == True:
super(CampaignQueue, self).listen(obj)
else:
for topic in topics:
self._listeners[topic].add(obj)
@cascade
def drop(self, obj, topics=True):
super(CampaignQueue, self).drop(obj)
self._users.pop(obj.user.id)
if topics != True:
for topic in topics:
self._listeners[topic].remove(obj)
if self.is_empty():
self.remove(self.id)
def is_empty(self):
parent = super(CampaignQueue, self).is_empty()
return (parent and len(self._listeners) == 0)
def users(self):
return self._users
class ChatQueue(Queue):
'''
A Private chat between n people
'''
queues = {}
def __init__(self, id):
super(ChatQueue, self).__init__(id)
@staticmethod
def id(users):
ids = list(users)
ids.sort()
id = '-'.join([str(id) for id in ids])
return id
@cascade
def listen(self, obj):
super(ChatQueue, self).listen(obj)
self.write_message('/chat/open', {
'chatid': self.id,
# Uniquify the userids, prevents problems with multiple listeners in a chat
'users': list(set([u.user.id for u in self._all])),
'expected': list(set(self.id.split('-'))),
})
@cascade
def drop(self, obj):
super(ChatQueue, self).drop(obj)
self.write_message('/chat/close', {
'chatid': self.id,
})
class CharacterQueue(Queue):
queues = {}
|
Saevon/syncrae | events/event.py | <gh_stars>0
from django.conf import settings
from syncrae.utils.decorators import cascade
import json
import logging
logging = logging.getLogger()
class Event(object):
topic = None
data = None
def __init__(self, topic, data, err=False):
self.topic = topic
self.data = data
self.__json = None
if not err is False:
self.data['err_code'] = err
self.data['err_msg'] = settings.SYNCRAE_ERR_CODES[err]
logging.info(
'New Message'
+ ' - < %s >' % self.topic
+ ('' if not err else ' - ERR: < %s > %s' % (
err, settings.SYNCRAE_ERR_CODES[err]
))
)
@property
def json(self):
if self.__json is None:
self.__json = json.dumps({
'topic': self.topic,
'data': self.data,
})
return self.__json
@cascade
def write_message(self, listener):
try:
listener.write_message(self.json)
except BaseException:
logging.exception('Error sending message - < %s >' % self.topic)
|
Saevon/syncrae | terminal.py | from django.conf import settings
from syncrae.events.event import Event
from webdnd.player.models.roll import roll_text
import logging
logging = logging.getLogger('')
class Terminal(object):
def __init__(self, web):
self.web = web
def __call__(self, data):
full_cmd = data.get('cmd')
cmd = full_cmd.split()
if len(cmd) == 0:
return
elif cmd[0] not in Terminal.COMMANDS.keys():
Event('/terminal/result', {
'level': 'error',
'text': 'Invalid Command: `%s`' % full_cmd,
}).write_message(self)
return
cmd, args = cmd[0], ' '.join([] if len(cmd) <= 1 else cmd[1:])
# Return the command to the client to state that it was recieved
Event('/terminal/result', {
'cmd': True,
'level': 'cmd',
'text': full_cmd,
}).write_message(self)
# only log accepted commands
logging.info('New Command - %s' % full_cmd)
handler = 'term_' + Terminal.COMMANDS[cmd]['handler']
if hasattr(self, handler):
getattr(self, handler)(args)
return
else:
logging.error('Invalid Handler for cmd: < %s > - %s:%s' % (self.__full, full_cmd, handler))
return
def write_message(self, json):
self.web.write_message(json)
def start(self):
pass
def terminal_write(self, text, level='info', err=False):
Event('/terminal/result', {
'level': level,
'text': text or ' ', #
}, err=err).write_message(self)
def terminal_err(self, err, level=None):
data = {}
if not level is None:
data['level'] = level
Event('/terminal/error', data, err=err).write_message(self)
##############################################
# Actual commands
##############################################
COMMANDS = {
'colors': {
'handler': 'color_test',
},
'echo': {
'handler': 'echo',
},
'error': {
'handler': 'error',
},
'roll': {
'handler': 'roll',
}
}
def term_color_test(self, cmd):
self.terminal_write('Color Test:')
levels = ['cmd', 'normal', 'info', 'warn', 'error', 'critical', 'muted']
for level in levels:
self.terminal_write(level=level, text=" >> %s" % level)
def term_echo(self, cmd):
self.terminal_write(cmd, level='normal')
def term_error(self, cmd):
if len(cmd) > 1 and cmd in settings.SYNCRAE_ERR_CODES:
self.terminal_err(level='error', err=cmd)
else:
self.terminal_write('Invalid err code.', level='error')
def term_roll(self, cmd):
self.terminal_write(roll_text(cmd))
|
Saevon/syncrae | config/log.py | <reponame>Saevon/syncrae<filename>config/log.py
from logutils.colorize import ColorizingStreamHandler
import logging
# Customize error colors
# levels to (background, foreground, bold/intense)
ColorizingStreamHandler.level_map = {
logging.DEBUG: (None, 'cyan', False),
# Use default color for info messages
logging.INFO: (None, '', False),
logging.WARNING: (None, 'magenta', False),
logging.ERROR: (None, 'red', True),
logging.CRITICAL: ('red', 'white', True),
}
def setup(level=None):
logger = logging.getLogger()
# disable all other handlers
for h in logger.handlers:
logger.removeHandler(h)
if isinstance(level, basestring):
level = level.upper()
if level:
try:
logger.setLevel(level)
except TypeError as err:
# Logging here actually doesn't work ??
# logger.warn('Bad Log level was passed in %(err)s', err)
# Bad log level passed in
pass
logger.addHandler(ColorizingStreamHandler())
|
Saevon/syncrae | websocket.py | <gh_stars>0
from django.conf import settings
from django.contrib.auth import get_user
from django.utils.importlib import import_module
from webdnd.player.models.roll import roll_text
from webdnd.player.models.campaigns import Campaign
from events.queue import CampaignQueue, ChatQueue, CharacterQueue
from functools import wraps
from syncrae.events.event import Event
from syncrae.session import Session
from syncrae.terminal import Terminal
import logging
import simplejson
import tornado.ioloop
import tornado.web
import tornado.websocket
logging = logging.getLogger('')
class RequestDummy(object):
def __init__(self, session):
self.session = session
class EventWebsocket(tornado.websocket.WebSocketHandler):
all = {}
@staticmethod
def get(uid):
return EventWebsocket.all.get(uid, False)
@staticmethod
def remove(uid):
return EventWebsocket.all.pop(uid)
def async(self, func, args=None, kwargs=None):
if args is None:
args = []
if kwargs is None:
kwargs = {}
# wraps the function to pass the given args
@wraps(func)
def wrapper():
return func(*args, **kwargs)
# Use tornado's async timer, set to now
tornado.ioloop.IOLoop.instance().add_callback(wrapper)
def get_current_user(self):
engine = import_module(settings.SESSION_ENGINE)
sessionid = self.get_cookie(settings.SESSION_COOKIE_NAME)
self.webdnd_session = engine.SessionStore(sessionid)
request = RequestDummy(self.webdnd_session)
return get_user(request)
def open(self):
user = self.get_current_user()
if not user or not user.is_authenticated():
return self.reject()
self.user = user
self.session = Session.get(self.user.id)
if self.session is None:
self.session = Session(self)
else:
self.session.listen(self)
# Store this in the uid->EventWebsocket dict
# TODO: can't store more than one uid->socket pair
# thus you can't have multiple tabs
EventWebsocket.all[self.user.id] = self
cid = self.webdnd_session.get('cid')
# Only editing a Character
if not cid:
pass
self.campaign = Campaign.objects.get(id=cid)
self.player = self.campaign.players.get(user=self.user)
self.queue = CampaignQueue.get(cid)
self.queue.listen(self)
self.chats = set()
for player in self.campaign.players.exclude(user__id=self.user.id):
uid = player.user.id
self.new_chat(uid)
# send player starting values
for player in self.campaign.players.all():
Event('/session/update', {
'uid': player.user.id,
'color': player.color,
'name': player.user.name,
# TODO: status
}).write_message(self)
# Send the 'New user' event
self.queue.write_message('/session/update', {
'uid': self.user.id,
'status': 'online',
})
self.terminal = Terminal(self)
self.async(self.terminal.start)
def on_close(self):
if hasattr(self, 'session') and not self.session is None:
self.session.drop(self)
if hasattr(self, 'queue') and not self.queue is None:
self.queue.drop(self)
# Make sure the campaign group knows you logged out
self.queue.write_message('/session/update', {
'status': 'offline',
'uid': self.user.id,
})
if hasattr(self, 'chats') and not self.chats is None:
for chat in self.chats:
chat.drop(self)
def on_message(self, raw_message):
try:
message = simplejson.loads(raw_message)
topic = message['topic']
data = message['data']
except BaseException:
logging.exception('Cannot parse message: ' + raw_message)
self.handle_topic(topic, data)
def new_chat(self, uid):
id = ChatQueue.id([self.user.id, uid])
chat = ChatQueue.get(id)
self.chats.add(chat)
chat.listen(self)
other = EventWebsocket.get(uid)
if other:
chat.listen(other)
##############################################
# Topic Handling Code
##############################################
def handle_topic(self, topic, data):
self.__full = topic
topic = topic.strip().strip('/')
parts = topic.split('/')
# call every base handler
for l in range(len(parts) - 1):
sub_topic = '/%s' % '/'.join(parts[:l + 1])
self.call(sub_topic, data)
# Final Handler is special, this is the one that actually counts
# if it fails we use the default handler
if not self.call(self.__full, data):
self.hdl_default(self.__full, data)
def call(self, topic, data):
'''
calls the relevant handler
'''
if not topic in EventWebsocket.TOPICS.keys():
return False
handler = 'hdl_' + EventWebsocket.TOPICS[topic]
if hasattr(self, handler):
getattr(self, handler)(data)
return True
else:
logging.error('Invalid Handler for topic: < %s > - %s:%s' % (self.__full, topic, handler))
return False
##############################################
# Topic Handlers
##############################################
TOPICS = {
'/messages': 'message',
'/messages/new': 'msg_new',
'/terminal/command': 'terminal',
}
def reject(self):
'''
Occurs when the session is rejected
'''
Event('/session/error', {
'error': 'Your gameplay key was wrong, go back to the campaign and try again.',
}, err='5101').write_message(self)
self.close()
def hdl_default(self, topic, data):
# emit event to all listeners
# using the original full topic
self.queue.write_message(topic, data)
def hdl_msg_new(self, data):
chatid = data.get('chatid')
if chatid and chatid != 'campaign':
ChatQueue.get(chatid).write_message('/messages/new', data)
else:
data['chatid'] = 'campaign'
self.hdl_default('/messages/new', data)
def hdl_message(self, data):
data['name'] = self.user.name
data['msg'] = roll_text(data['msg'])
def hdl_terminal(self, data):
return self.terminal(data)
application = None
def get_app(settings):
global application
if application is None:
application = tornado.web.Application([
# Application URI's
(r'/event', EventWebsocket),
], **settings)
return application
|
Saevon/syncrae | config/settings.py | # Syncrae Application settings
SYNCRAE_PORT = 8888
SYNCRAE_TORNADO_SETTINGS = {
'port': SYNCRAE_PORT,
}
# format: 'major.minor.bug name'
SYNCRAE_VERSION = '0.3.0 BETA'
# Loging info
SYNCRAE_LOG_LEVEL = 'INFO'
SYNCRAE_LOG_FILE = None
SYNCRAE_ERR_CODES = {
# All err codes start with a '5' to represent Syncrae
# And are a 4 digit number
# Digit two represents one of the following groups
# The remaining two digits are a simple unique identifier
# 1: User error
'5101': 'Not Logged In',
}
# Local settings for Syncrae
try:
from syncrae.local_settings import *
except ImportError:
pass
|
Saevon/syncrae | session.py | import logging
logging = logging.getLogger()
class Session(object):
'''
Persists a session across tabs, so you can have multiple websockets
across different browsers/tabs etc.
'''
all = {}
STATUSES = {
'on': 'Online',
'off': 'Offline',
}
def __init__(self, listener):
self.user = listener.user
self.status = 'off'
self.listeners = set()
self.listen(listener)
Session.all[self.user.id] = self
self.id = self.user.id
logging.info('New Session - %s:%s' % (self.user.id, self.user.name))
@staticmethod
def get(uid):
return Session.all.get(uid)
@staticmethod
def remove(uid):
s = Session.get(uid)
if s is None:
return
s.set_status('off')
logging.info('Logged Out - %s %s' % (s.user.id, s.user.name))
def set_status(self, status):
self.status = status
def write_message(self, event):
'''
Gives a message to every open window for this session
'''
for l in self.listeners:
event.write_message(l)
def listen(self, listener):
if self.status == 'off':
self.set_status('on')
self.listeners.add(listener)
def drop(self, listener):
self.listeners.discard(listener)
if len(self.listeners) == 0:
Session.remove(self.id)
@property
def json(self):
return {
'id': self.user.id,
'name': self.user.name,
'status': Session.STATUSES[self.status],
}
|
Saevon/syncrae | utils/decorators.py | <filename>utils/decorators.py
from functools import wraps
def cascade(func):
"""
class method decorator, always returns the
object that called the method
"""
@wraps(func)
def wrapper(self, *args, **kwargs):
func(self, *args, **kwargs)
return self
return wrapper
|
Saevon/syncrae | webdnd_api.py | <reponame>Saevon/syncrae
from settings import settings
from utils.decorators import cascade
import logging
import simplejson
import urllib
import urllib2
class Api(object):
WEBDND = 'localhost:9000'
def __init__(self, key, endpoint, data=None):
self.__key = key
self.__endpoint = endpoint
self.__error = False
self.request(data)
@cascade
def request(self, data=None):
if data is None:
data = {}
data['key'] = self.__key
headers = {
'AUTHORIZATION': 'Basic %s' % settings.WEBDND_AUTH,
}
url_values = urllib.urlencode(data)
req = urllib2.Request('http://%(WEBDND)s/api/%(endpoint)s?%(url_values)s' % {
'WEBDND': Api.WEBDND,
'endpoint': self.__endpoint,
'url_values': url_values,
}, headers=headers)
try:
response = urllib2.urlopen(req)
except urllib2.HTTPError, err:
# HTTP Error
self._error(code=err.code, text=err.msg)
except urllib2.URLError, err:
# Failed to Reach the Server
self._error(text=err.reason[1], code=err.reason[0])
else:
out = response.read()
try:
self.__response = simplejson.loads(out)
except simplejson.JSONDecodeError:
self._error(text='JSON Decode Error')
def _error(self, code=0, text=''):
self.__error = True
self.__response = {
'errors': [{
'code': code,
'text': text,
'location': self.__endpoint,
}]
}
@property
def has_error(self):
if self.__error:
logging.error(self.errors)
return self.__error
@property
def errors(self):
return self.__response['errors']
@property
def response(self):
return self.__response['output']
@property
def paging(self):
return self.__response['paging']
|
JanuszBedkowski/observation_equations | codes/python-scripts/point-to-point-metrics/point_to_point_source_to_landmark_rodrigues_wc.py | <filename>codes/python-scripts/point-to-point-metrics/point_to_point_source_to_landmark_rodrigues_wc.py
from sympy import *
import sys
sys.path.insert(1, '..')
from tait_bryan_R_utils import *
from rodrigues_R_utils import *
from quaternion_R_utils import *
x_L, y_L, z_L = symbols('x_L y_L z_L')
x_s, y_s, z_s = symbols('x_s y_s z_s')
px, py, pz = symbols('px py pz')
#om, fi, ka = symbols('om fi ka')
sx, sy, sz = symbols('sx sy sz')
#q0, q1, q2, q3 = symbols('q0 q1 q2 q3')
position_symbols = [px, py, pz]
#orientation_symbols = [om, fi, ka]
orientation_symbols = [sx, sy, sz]
#orientation_symbols = [q0, q1, q2, q3]
landmark_symbols = [x_L, y_L, z_L]
all_symbols = position_symbols + orientation_symbols + landmark_symbols
point_Landmark = Matrix([x_L, y_L, z_L]).vec()
point_source = Matrix([x_s, y_s, z_s, 1]).vec()
#transformed_point_source = (matrix44FromTaitBryan(px, py, pz, om, fi, ka) * point_source)[:-1,:]
transformed_point_source = (matrix44FromRodrigues(px, py, pz, sx, sy, sz) * point_source)[:-1,:]
#transformed_point_source = (matrix44FromQuaternion(px, py, pz, q0, q1, q2, q3) * point_source)[:-1,:]
target_value = Matrix([0,0,0]).vec()
model_function = transformed_point_source-point_Landmark
delta = target_value - model_function
delta_jacobian=delta.jacobian(all_symbols)
print(delta)
print(delta_jacobian)
with open("point_to_point_source_to_landmark_rodrigues_wc_jacobian.h",'w') as f_cpp:
f_cpp.write("inline void point_to_point_source_to_landmark_rodrigues_wc(double &delta_x, double &delta_y, double &delta_z, double px, double py, double pz, double sx, double sy, double sz, double x_s, double y_s, double z_s, double x_L, double y_L, double z_L)\n")
f_cpp.write("{")
f_cpp.write("delta_x = %s;\n"%(ccode(delta[0,0])))
f_cpp.write("delta_y = %s;\n"%(ccode(delta[1,0])))
f_cpp.write("delta_z = %s;\n"%(ccode(delta[2,0])))
f_cpp.write("}")
f_cpp.write("\n")
f_cpp.write("inline void point_to_point_source_to_landmark_rodrigues_wc_jacobian(Eigen::Matrix<double, 3, 9, Eigen::RowMajor> &j, double px, double py, double pz, double sx, double sy, double sz, double x_s, double y_s, double z_s)\n")
f_cpp.write("{")
for i in range (3):
for j in range (9):
f_cpp.write("j.coeffRef(%d,%d) = %s;\n"%(i,j, ccode(delta_jacobian[i,j])))
f_cpp.write("}")
|
JanuszBedkowski/observation_equations | codes/python-scripts/example_func/example_func_ax_plus_b_eq_y_jacobian.py | <reponame>JanuszBedkowski/observation_equations
from sympy import *
x, y, a, b = symbols('x y a b')
all_symbols = [a,b]
target_value = y
model_function = a*x + b
obs_eq = Matrix([target_value - model_function]).vec()
obs_eq_jacobian = obs_eq.jacobian(all_symbols)
print(model_function)
print(obs_eq)
print(obs_eq_jacobian)
print(latex(model_function))
print(latex(obs_eq_jacobian))
with open("example_func_ax_plus_b_eq_y_jacobian.h",'w') as f_cpp:
f_cpp.write("inline void example_func_ax_plus_b(double &y, double x, double a, double b)\n")
f_cpp.write("{")
f_cpp.write("y = %s;\n"%(ccode(model_function)))
f_cpp.write("}")
f_cpp.write("\n")
f_cpp.write("inline void observation_equation_example_func_ax_plus_b_eq_y(double &delta, double x, double y, double a, double b)\n")
f_cpp.write("{")
f_cpp.write("delta = %s;\n"%(ccode(obs_eq[0,0])))
f_cpp.write("}")
f_cpp.write("\n")
f_cpp.write("inline void observation_equation_example_func_ax_plus_b_eq_y_jacobian(Eigen::Matrix<double, 1, 2> &j, double x, double y, double a, double b)\n")
f_cpp.write("{")
for i in range (1):
for j in range (2):
f_cpp.write("j.coeffRef(%d,%d) = %s;\n"%(i,j, ccode(obs_eq_jacobian[i,j])))
f_cpp.write("}")
|
JanuszBedkowski/observation_equations | codes/python-scripts/point-to-point-metrics/point_to_point_source_to_landmark_tait_bryan_wc_cov.py | <filename>codes/python-scripts/point-to-point-metrics/point_to_point_source_to_landmark_tait_bryan_wc_cov.py
from sympy import *
import sys
sys.path.insert(1, '..')
from tait_bryan_R_utils import *
from rodrigues_R_utils import *
from quaternion_R_utils import *
x_L, y_L, z_L = symbols('x_L y_L z_L')
x_s, y_s, z_s = symbols('x_s y_s z_s')
px, py, pz = symbols('px py pz')
om, fi, ka = symbols('om fi ka')
#sx, sy, sz = symbols('sx sy sz')
#q0, q1, q2, q3 = symbols('q0 q1 q2 q3')
position_symbols = [px, py, pz]
orientation_symbols = [om, fi, ka]
#orientation_symbols = [sx, sy, sz]
#orientation_symbols = [q0, q1, q2, q3]
landmark_symbols = [x_L, y_L, z_L]
source_point_symbols = [x_s, y_s, z_s]
beta_symbols = position_symbols + orientation_symbols
x_symbols = source_point_symbols + landmark_symbols
point_Landmark = Matrix([x_L, y_L, z_L]).vec()
point_source = Matrix([x_s, y_s, z_s, 1]).vec()
transformed_point_source = (matrix44FromTaitBryan(px, py, pz, om, fi, ka) * point_source)[:-1,:]
#transformed_point_source = (matrix44FromRodrigues(px, py, pz, sx, sy, sz) * point_source)[:-1,:]
#transformed_point_source = (matrix44FromQuaternion(px, py, pz, q0, q1, q2, q3) * point_source)[:-1,:]
target_value = Matrix([0,0,0]).vec()
model_function = transformed_point_source-point_Landmark
delta = target_value - model_function
sum=Matrix([delta[0,0]*delta[0,0]+delta[1,0]*delta[1,0]+delta[2,0]*delta[2,0]]).vec()
d2sum_dbeta2=sum.jacobian(beta_symbols).jacobian(beta_symbols)
d2sum_dbetadx=sum.jacobian(beta_symbols).jacobian(x_symbols)
with open("point_to_point_source_to_landmark_tait_bryan_wc_cov.h",'w') as f_cpp:
f_cpp.write("inline void point_to_point_source_to_landmark_tait_bryan_wc_d2sum_dbeta2(Eigen::Matrix<double, 6, 6, Eigen::RowMajor> &d2sum_dbeta2, double px, double py, double pz, double om, double fi, double ka, double x_s, double y_s, double z_s, double x_L, double y_L, double z_L)\n")
f_cpp.write("{")
for i in range (6):
for j in range (6):
f_cpp.write("d2sum_dbeta2.coeffRef(%d,%d) = %s;\n"%(i,j, ccode(d2sum_dbeta2[i,j])))
f_cpp.write("}")
f_cpp.write("\n")
f_cpp.write("inline void point_to_point_source_to_landmark_tait_bryan_wc_d2sum_dbetadx(Eigen::Matrix<double, 6, 6, Eigen::RowMajor> &d2sum_dbetadx, double px, double py, double pz, double om, double fi, double ka, double x_s, double y_s, double z_s, double x_L, double y_L, double z_L)\n")
f_cpp.write("{")
for i in range (6):
for j in range (6):
f_cpp.write("d2sum_dbetadx.coeffRef(%d,%d) = %s;\n"%(i,j, ccode(d2sum_dbetadx[i,j])))
f_cpp.write("}")
f_cpp.write("\n")
|
JanuszBedkowski/observation_equations | codes/python-scripts/plot-csv.py | <gh_stars>1-10
#!/usr/bin/env python
import matplotlib.pyplot as plt
import numpy as np
import argparse
# try:
# import seaborn as sns
# sns.set(style='whitegrid',font_scale=1.2)
# except:
# print("Run:\nsudo pip3 install seaborn")
fname='sample-data-to-plot.csv'
delim=','
SIZE=(14,10)
def plot_csv(fname, output, title=None, ylabel='', delim=',', SIZE=(8,6), ymax=None):
print(fname)
with open(fname)as f:
data = f.read().split('\n')
if len(data[-1])==0:
data=data[:-1]
data=[_.split(delim) for _ in data]
header=data[0]
xlabel=header[0]
header=header[1:]
data=data[1:]
data=np.array([[float(__) for __ in _] for _ in data]).transpose()
xs=data[0]
data=data[1:]
plt.figure(figsize=SIZE)
for d,h in zip(data,header):
plt.plot(xs,d,label=h)
plt.xlabel(xlabel)
plt.ylabel(ylabel, rotation=0)
if title is not None:
plt.title(title)
plt.legend()
if ymax is not None:
# plt.ylim(-float(ymax),float(ymax))
plt.ylim(0,float(ymax))
plt.grid(True)
# plt.axis('equal')
plt.tight_layout()
plt.savefig(output)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--title', default=None, help='plot title; default: no title')
parser.add_argument('-d', '--delim', default=',', help='CSV delimiter, default: ,')
parser.add_argument('--ymax', default=None, help='ymax')
parser.add_argument('-y', '--ylabel', default=None, help='Label for Y axis; default: no label')
parser.add_argument('-i', '--input', help='CSV, comma-separated; delimiter can be changed using --delim',required=True)
parser.add_argument('-o', '--output', help='Output file.eps',required=True)
args = parser.parse_args()
plot_csv(args.input, args.output, title=args.title, ylabel=args.ylabel, delim=delim, ymax=args.ymax)
|
RaeedAhmed/pyaastream | src/pyaastream/terms.py | <gh_stars>0
formats = [
".mkv",
".mk3d",
".mp4",
".mov",
".qt",
".asf",
".wmv",
".avi",
".mxf",
".m2p",
".ps",
".ts",
".tsv",
".m2ts",
".mts",
".vob",
".evo",
".3gp",
".3g2",
".f4v",
".flv",
".ogv",
".ogx",
".Org",
".webm",
".rmvb",
".divx",
] |
RaeedAhmed/pyaastream | src/pyaastream/__init__.py | <reponame>RaeedAhmed/pyaastream
"""Torrent/Magnet browser and handler script"""
__version__ = "0.4.5"
|
RaeedAhmed/pyaastream | src/pyaastream/main.py | <reponame>RaeedAhmed/pyaastream<gh_stars>0
import math
import platform
import re
import shlex
import shutil
import subprocess
import urllib.parse
import urllib.request
from typing import NamedTuple
from urllib.error import HTTPError, URLError
from bs4 import BeautifulSoup as bs
from sty import ef, fg, rs
from pyaastream.terms import formats
# type aliases
url = str
html = str
soup = bs
# cli display codes
LOADING = 0
SEARCH = 1
RESULTS = 2
FILES = 3
# change functions if on win
DOS = platform.system() == "Windows"
BASE = "https://nyaa.si"
TEMPFILE = "tmp.torrent"
TEMPDIR = "webtorrent_tmp"
PLAYER = "mpv" if shutil.which("mpv") else "vlc"
class Torrent(NamedTuple):
link: str
title: str
manifest: str
size: str
date: str
seeders: int
class Prompt:
query: str
torrents: list[Torrent]
torrent: int
files: list[str]
file_index = -1
show_all_files: bool = False
show_all_torrents: bool = False
# text styling
def key(command: str):
if command.isdigit():
k, rest = command, ""
else:
k, rest = command[0], command[1:]
return fg(220) + ef.bold + ef.dim + "[" + k + "]" + rs.all + rest
def title(title: str, columns: int, offset=0):
if len(title) > columns - offset:
title = title[:columns - offset - 3] + "..."
return fg(43) + title + rs.all
def info(info: list[str], torrent: Torrent):
labels = []
for attr in info:
labels.append(f"{attr}: {getattr(torrent, attr)}")
return fg.da_grey + "\t" + ", ".join(labels) + rs.all
def header(header: str, columns=1000):
if len(header) > columns:
header = header[:columns-3] + "..."
return fg(161) + header + rs.all
def construct_url() -> url:
Prompt.query = input(f"Search {BASE}: ")
filters = {"f": 1, "c": "1_2", "s": "seeders",
"o": "desc", "q": Prompt.query}
params = urllib.parse.urlencode(filters)
return f"{BASE}?{params}"
def http_get(link: url) -> soup | None:
try:
data = urllib.request.urlopen(link)
if data.info().get_content_subtype() == "html":
html = data.read().decode("utf-8")
return bs(html, "lxml")
except HTTPError as e:
print(f"Error {e.code}\n{e.read()}")
except URLError as e:
print(f"Failed to connected because of {e.reason}")
def get_torrents(html: bs) -> list[Torrent]:
display(LOADING)
if not html:
print("Could not load page")
exit()
data = [entry.find_all("td") for entry in html.tbody.find_all("tr")]
torrents = [
Torrent(
link=BASE + datum[1].select("a")[-1].get("href"),
title=datum[1].select("a")[-1].text,
manifest=BASE + datum[2].select("a")[0].get("href"),
size=datum[3].text or "-",
date=datum[4].text.split(" ")[0] or "-",
seeders=int(datum[5].text),
)
for datum in data
]
return list(filter(lambda torrent: torrent.seeders > 0, torrents))
def fetch_files(manifest: str) -> list[str]:
display(LOADING)
urllib.request.urlretrieve(manifest, TEMPFILE)
output = (
subprocess.run(
shlex.split(f"webtorrent{'.cmd'*DOS} {TEMPFILE} -s -o {TEMPDIR}"),
capture_output=True,
)
.stdout.decode("utf-8")
.splitlines()
)
return [line for line in output if re.match("^[0-9]+ ", line)]
def clear():
subprocess.run(
shlex.split("cmd /c cls")
if platform.system() == "Windows"
else shlex.split("tput reset")
)
def display(context: int) -> None:
clear()
term_size = shutil.get_terminal_size()
if context == SEARCH:
print(header("Ctrl-C to exit"))
if context == LOADING:
pad = "\n" * (round(term_size.lines / 2))
print(f"{pad}{'Loading...'.center(term_size.columns)}{pad[:-1]}")
if context == RESULTS:
print(header(f"Search results for '{Prompt.query}':"))
torrents = (
Prompt.torrents
if Prompt.show_all_torrents
else Prompt.torrents[: (math.floor(term_size.lines / 2) - 2)]
)
for index, torrent in enumerate(torrents):
print(
f"{key(str(index)):28}{title(torrent.title, term_size.columns, offset=6)}")
print(info(["size", "date", "seeders"], torrent))
if context == FILES:
torrent_title = Prompt.torrents[int(Prompt.torrent)].title
print(header(torrent_title, term_size.columns))
print(header(f"Page: {Prompt.torrents[int(Prompt.torrent)].link}"))
files = (
Prompt.files
if Prompt.show_all_files
else [file for file in Prompt.files if any(fmt in file for fmt in formats)]
)
for file in files:
index, file_name = file.split(
" ")[0], " ".join(file.split(" ")[1:])
print(key(index), title(file_name, term_size.columns, offset=6))
def cli() -> None:
if DOS:
# used to init escape codes on windows cmd
subprocess.run("", shell=True)
while True:
display(SEARCH)
try:
Prompt.torrents = get_torrents(http_get(construct_url()))
except AttributeError:
continue
while True:
display(RESULTS)
torrent_index = input(
f"{key('back')}, {key('show all')}, or Choose torrent: ")
if torrent_index.isdigit() and int(torrent_index) in range(len(Prompt.torrents)):
Prompt.torrent = int(torrent_index)
Prompt.files = fetch_files(
Prompt.torrents[Prompt.torrent].manifest)
elif torrent_index == "b":
break
elif torrent_index == "s":
Prompt.show_all_torrents = not Prompt.show_all_torrents
continue
else:
continue
while True:
display(FILES)
last_picked = f" ({Prompt.file_index})" if Prompt.file_index != -1 else ""
file_index = input(
f"{key('back')}, {key('show all')}, or Choose file{last_picked}: ")
if file_index.isdigit() and int(file_index) in range(len(Prompt.files)):
Prompt.file_index = file_index
stream_file(file_index)
elif file_index == "b":
Prompt.file_index = -1
break
elif file_index == "s":
Prompt.show_all_files = not Prompt.show_all_files
else:
continue
def stream(target, choice=" "):
try:
subprocess.run(
shlex.split(
f'webtorrent{".cmd"*DOS} {target} -o {TEMPDIR}{choice} --{PLAYER}'
)
)
except KeyboardInterrupt:
print("Stopping stream")
def stream_file(file_index):
return stream(target=TEMPFILE, choice=f" -s {file_index} ")
def stream_uri(uri):
return stream(target=f'"{uri}"')
def direct():
try:
stream_uri(input("Enter uri: "))
except KeyboardInterrupt:
exit()
finally:
shutil.rmtree(TEMPDIR, ignore_errors=True)
def main():
try:
cli()
except KeyboardInterrupt:
exit()
finally:
shutil.rmtree(TEMPDIR, ignore_errors=True)
try:
shutil.os.unlink(TEMPFILE)
except FileNotFoundError:
pass
clear()
if __name__ == "__main__":
main()
|
0x71d3/text-prob | src/text_prob/__init__.py | from .gpt2_prob import GPT2Prob
|
0x71d3/text-prob | src/text_prob/gpt2_prob.py | from typing import Optional
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoTokenizer, AutoModelForCausalLM
class GPT2Prob:
def __init__(
self,
pretrained_model_name_or_path: str,
do_segmentation: Optional[bool] = False,
use_mecab: Optional[bool] = False
) -> None:
self.tokenizer = AutoTokenizer.from_pretrained(
pretrained_model_name_or_path
)
self.model = AutoModelForCausalLM.from_pretrained(
pretrained_model_name_or_path
)
self.do_segmentation = do_segmentation
self.use_mecab = use_mecab
if do_segmentation:
if use_mecab:
try:
from fugashi import GenericTagger
except ImportError:
raise ImportError('fugashi is required for segmentation.')
try:
import jumandic
except ImportError:
raise ImportError('JumanDIC is required for segmentation.')
self.tagger = GenericTagger('-Owakati ' + jumandic.MECAB_ARGS)
else:
try:
import zenhan
except ImportError:
raise ImportError('zenhan is required for normalization.')
self.zenhan = zenhan
try:
from pyknp import Juman
except ImportError:
raise ImportError('PyKNP is required for segmentation.')
self.jumanpp = Juman()
def _segment(self, text: str) -> str:
if self.use_mecab:
segment_text = self.tagger.parse(text)
else:
result = self.jumanpp.analysis(self.zenhan.h2z(text))
segment_text = ' '.join(mrph.midasi for mrph in result.mrph_list())
return segment_text
def _get_loss(
self,
lm_logits: torch.Tensor,
labels: torch.Tensor
) -> torch.Tensor:
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
loss_fct = CrossEntropyLoss(reduction='none')
loss = loss_fct(
shift_logits.view(-1, shift_logits.size(-1)),
shift_labels.view(-1)
)
return loss
def _get_log_prob(
self,
loss: torch.Tensor,
length_penalty: float
) -> torch.Tensor:
log_prob = -loss.sum() / (loss.numel() ** length_penalty)
return log_prob
def __call__(
self,
text: str,
return_log_prob: Optional[bool] = False,
length_penalty: Optional[float] = 1.0
) -> float:
if self.do_segmentation:
text = self._segment(text)
inputs = self.tokenizer(text, return_tensors='pt')
with torch.no_grad():
outputs = self.model(**inputs)
lm_logits = outputs.logits
labels = inputs['input_ids']
loss = self._get_loss(lm_logits, labels)
log_prob = self._get_log_prob(loss, length_penalty)
if return_log_prob:
return log_prob.item()
prob = log_prob.exp()
return prob.item()
|
0x71d3/text-prob | tests/test_gpt2_prob.py | import pytest
from text_prob import GPT2Prob
def test_gpt2_prob():
text = '人間 と 同 程度 に 言語 を 理解 する こと の できる 人工 知能 システム に ついて 研究 して い ます 。'
prob = GPT2Prob('nlp-waseda/gpt2-small-japanese-wikipedia')
assert prob(text) == 0.04528525471687317
@pytest.mark.parametrize('use_mecab', [False, True])
def test_gpt2_prob_segmentation(use_mecab):
text = '人間と同程度に言語を理解することのできる人工知能システムについて研究しています。'
prob = GPT2Prob('nlp-waseda/gpt2-small-japanese-wikipedia', do_segmentation=True, use_mecab=use_mecab)
assert prob(text) == 0.04528525471687317
|
Gunjanph/GSchedular | Scheduler_Test/oopinsta.py | import os
import requests,sys,webbrowser,bs4
import urllib
import re
import time
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.chrome.options import Options
chrome_options = Options()
# chrome_options.add_argument('--headless')
# chrome_options.add_argument('--no-sandbox')
# chrome_options.add_argument('--disable-dev-shm-usage')
mobile_emulation = { "deviceName": "Galaxy S5" }
# chrome_options = webdriver.ChromeOptions()
chrome_options.add_experimental_option("mobileEmulation", mobile_emulation)
# driver = webdriver.Remote(command_executor='http://127.0.0.1:4444/wd/hub',
# desired_capabilities = chrome_options.to_capabilities())
username="autopostscheduler"
password="<PASSWORD>"
class IGbot:
def _init_(self,username,password):
self.username=username
self.password=password
self.driver = webdriver.Chrome(executable_path=r"./chromedriver",options=chrome_options)
pass
def login():
# Open Insta
self.driver.get("https://www.instagram.com/")
#Login Page 1
login_button_1 = self.driver.find_element_by_xpath("//button[text() = 'Log In']")
login_button_1.click()
#Wait till the Login Input Page Opens
while(len(self.driver.find_elements_by_xpath("//input[@name='username']"))==0):
time.sleep(1)
print('Loading...')
#Find and send Username
username_input = self.driver.find_element_by_xpath("//input[@name='username']")
username_input.send_keys(self.username)
#Find and send Password
password_input = self.driver.find_element_by_xpath("//input[@name='password']")
password_input.send_keys(self.password)
#Final Login Click
login_button_2 = self.driver.find_elements_by_xpath("//button[@class='sqdOP L3NKy y3zKF ']")
login_button_2[1].click()
time.sleep(5)
#Save your Login info - Not Now ( Doesnt matter)
while(len(self.driver.find_elements_by_xpath("//button[@class='sqdOP L3NKy y3zKF ']"))==0):
time.sleep(1)
print('Save Information Loading...')
element = self.driver.find_element_by_xpath("//button[@class='sqdOP L3NKy y3zKF ']")
element.click()
# Turn on Notification - Not Now
while(len(self.driver.find_elements_by_xpath("//button[@class='aOOlW HoLwm ']"))==0):
time.sleep(1)
print('Notification Loading...')
element=self.driver.find_element_by_xpath("//button[@class='aOOlW HoLwm ']")
element.click()
print("Login Compleat")
def upload():
upload_button=self.driver.find_element_by_xpath("//div[@class='q02Nz _0TPg']")
upload_button.click()
# element.send_keys(os.getcwd()+"/Test1.png")
os.system('autokey-run -s select_image')
|
Gunjanph/GSchedular | run.py | from flask import Flask, render_template, url_for, flash, redirect
from forms import LoginForm
app = Flask(__name__)
app.config['SECRET_KEY'] = 'qwertyuiop'
@app.route("/")
@app.route('/login.html', methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
flash(f'Login Successful for {form.username.data}!')
return redirect(url_for('home'))
return render_template('login.html', form=form)
if __name__ == '__main__':
app.run(debug =True) |
Gunjanph/GSchedular | insta.py | <reponame>Gunjanph/GSchedular
import os
import requests,sys,webbrowser,bs4
import urllib
import re
import time
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.chrome.options import Options
chrome_options = Options()
# chrome_options.add_argument('--headless')
# chrome_options.add_argument('--no-sandbox')
# chrome_options.add_argument('--disable-dev-shm-usage')
mobile_emulation = { "deviceName": "Galaxy S5" }
# chrome_options = webdriver.ChromeOptions()
chrome_options.add_experimental_option("mobileEmulation", mobile_emulation)
# driver = webdriver.Remote(command_executor='http://127.0.0.1:4444/wd/hub',
# desired_capabilities = chrome_options.to_capabilities())
driver = webdriver.Chrome(executable_path=r"./chromedriver",options=chrome_options)
driver.get("https://www.instagram.com/")
username="autopostscheduler"
password="<PASSWORD>"
element = driver.find_element_by_xpath("//button[text() = 'Log In']")
element.click()
while(len(driver.find_elements_by_xpath("//input[@name='username']"))==0):
time.sleep(1)
print('Loading...')
username_button = driver.find_element_by_xpath("//input[@name='username']")
username_button.send_keys(username)
pass_button = driver.find_element_by_xpath("//input[@name='password']")
pass_button.send_keys(password)
while(len(driver.find_elements_by_xpath("//button[@class='sqdOP L3NKy y3zKF ']"))==0):
time.sleep(1)
print('Loading...')
element = driver.find_elements_by_xpath("//button[@class='sqdOP L3NKy y3zKF ']")
element[1].click()
time.sleep(5)
while(len(driver.find_elements_by_xpath("//button[@class='sqdOP L3NKy y3zKF ']"))==0):
time.sleep(1)
print('sqdOP Loading...')
element = driver.find_element_by_xpath("//button[@class='sqdOP L3NKy y3zKF ']")
element.click()
while(len(driver.find_elements_by_xpath("//button[@class='aOOlW HoLwm ']"))==0):
time.sleep(1)
print('aOOlW Loading...')
element=driver.find_element_by_xpath("//button[@class='aOOlW HoLwm ']")
element.click()
element=driver.find_element_by_xpath("//div[@class='q02Nz _0TPg']")
element.click()
element.send_keys(os.getcwd()+"/Test1.png")
|
Gunjanph/GSchedular | heroku_Test/app/script.py | from datetime import datetime
now=datetime.now().time()
a=now.strftime("%H:%M:%S")
a=[int(i) for i in a.split(':')]
mint=a[1]
while 1:
now=datetime.now().time()
a=now.strftime("%H:%M:%S")
a=[int(i) for i in a.split(':')]
if(a[2]==0) and a[1]!=mint:
print(now)
mint=a[1]
|
Gunjanph/GSchedular | Scheduler_Test/test.py | from selenium import webdriver
username = "Your Username"
password = "<PASSWORD>"
getdriver = ("https://www.instagram.com/accounts/login/")
driver = webdriver.Firefox()
driver.get(getdriver)
driver.find_element_by_xpath("//input[@name='username']").send_keys(username)
driver.find_element_by_xpath("//input[@name='password']").send_keys(password)
driver.find_element_by_xpath("//button[contains(.,'Log in')]").click() |
Gunjanph/GSchedular | Scheduler_Test/jj.py | import requests,sys,webbrowser,bs4
import urllib
import re
import time
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.chrome.options import Options
chrome_options = Options()
chrome_options.add_argument('--headless')
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--disable-dev-shm-usage')
f=open('JJ Food Enfield.csv')
l=f.read()
l=l.split('\n')
out=open('out2.csv','w+')
out.write('Supplier Product Code,Base Price,On-Shelf Price, Promo Price ,Type of Promo ,R.S.P. ,P.O.R.,Other,,,,Description,Category,SKU,Description2, Pack Size\n')
for ele in l[1:]:
try:
line=ele.split(',')
print(line[1])
driver = webdriver.Chrome(executable_path=r"./chromedriver",chrome_options=chrome_options)
driver.get("https://www.jjfoodservice.com/search?b=EN-MW&q="+line[1]+"&size=12&sortType=search")
time.sleep(2)
r = driver.execute_script("return document.documentElement.outerHTML")
soup = BeautifulSoup(r,'html.parser')
dabbe=soup.find_all('div',{'class':'sc-fihHvN esyGev'})
c=2
while(len(dabbe)==0):
print(c)
driver = webdriver.Chrome(executable_path=r"./chromedriver",chrome_options=chrome_options)
driver.get("https://www.jjfoodservice.com/search?b=EN-MW&q="+line[1]+"&size=12&sortType=search")
time.sleep(c)
r = driver.execute_script("return document.documentElement.outerHTML")
soup = BeautifulSoup(r,'html.parser')
dabbe=soup.find_all('div',{'class':'sc-fihHvN esyGev'})
if c==10:
break
c+=2
driver.quit()
print(len(dabbe))
promo=''
description=''
description2=''
category=''
basePrice=''
onPrice=''
for dabba in dabbe:
#check for Type of Promo
offer=dabba.find_all('span',{'class':'sc-ekulBa iUnWbq'})
if(len(offer)==1):
promo=offer[0].text
elif (len(offer)>1):
print(ele,len(offer))
#description
d=dabba.find('h1',{'class':'sc-crNyjn esoVxW'})
try:
description=d.text
except:
print(d.text)
try:
description2=description.split('-')[-1]
except:
print(d)
#category
cat=soup.find('label',{'class':'sc-fqCOlO kYvrne'})
try:
category=cat.text
except:
print(cat)
#pesa
p=soup.find_all('div',{'class':'sc-ivVeuv exHQsL'})
try:
basePrice=p[1].text.split(':')[1]
except:
print(p)
try:
onPrice=p[0].text.split(':')[1]
except:
print(p)
#incsv
out.write(line[1]+','+basePrice+','+onPrice+',,'+promo+',,,,,,,'+description+','+category+',,'+description2+',\n')
except:
print('error')
|
mspector/expertise | expertise/evaluators/precision_at_m.py | from operator import itemgetter
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from openreview_matcher.evals import base_evaluator
from openreview_matcher import utils
matplotlib.style.use('ggplot')
class Evaluator(base_evaluator.Evaluator):
"""
An Evaluator instance that evaluates
precision_at_m =
(number of papers reviewers bid positively on in top M) /
(total number of papers retrieved)
This evaluation method requires us to look at the bids, so we import
them from somewhere in the __init__() method
"""
def __init__(self, params=None):
datapath = params["data_path"]
self.m_values = params["m_values"]
self.data = utils.load_obj(datapath)
self.bids_by_forum = self.data["bids_by_forum"]
def evaluate(self, ranklists):
"""
Evaluate the model using a ranked list. Either you can evaluate using a single ranked list or
evaluate against each individual query and average their precision scores
Arguments
@ranklists: a list of tuples.
The 0th index of the tuple contains the forum ID of the rank of the list being evaluated
The 1st index of the tuple contains a list of reviewer IDS, in order of expertise score
Returns
a generator object that yields an array of scores for each ranked list. If only one score
is need, return the score in an array by itself
"""
return self.evaluate_using_single_rank(ranklists)
def evaluate_using_individual_queries(self, ranklists):
""" Evaluate using individual query ranks """
for forum, rank_list in ranklists:
scores = []
for m in self.m_values:
positive_labels = ["I want to review", "I can review"]
positive_bids = [bid.signatures[0].encode('utf-8') for bid in self.bids_by_forum[forum] if bid.tag in positive_labels]
relevant_reviewers = [1 if reviewer_id in positive_bids else 0 for reviewer_id in rank_list]
precision = self.precision_at_m(relevant_reviewers, m)
scores.append(precision)
yield forum, scores
def setup_ranked_list(self, rank_list):
"""
Setup the single ranked list for a model
Combines all of the individual query ranks into one single rank
"""
new_rank_list = []
for forum, rank_list in rank_list:
for reviewer_score in rank_list:
reviewer = reviewer_score.split(";")[0]
score = float(reviewer_score.split(";")[1])
has_bid = self.reviewer_has_bid(reviewer, forum) # filter for reviewers that gave a bid value
if has_bid:
new_rank_list.append((reviewer, score, forum))
ranked_reviewers = sorted(new_rank_list, key=itemgetter(1), reverse=True)
return ranked_reviewers
def reviewer_has_bid(self, reviewer, paper):
""" Returns True if the reviewer bid on that 'paper' """
paper_bids = self.bids_by_forum[paper]
has_bid = [True if bid.signatures[0] == reviewer.decode("utf-8") else False for bid in paper_bids][0]
return has_bid
def get_bid_for_reviewer_paper(self, reviewer, paper):
"""
Gets the bid for the reviewer and the paper
Returns 0 if the bid is not relevant and 1 if the bid is relevant
"""
positive_labels = ['I want to review', 'I can review']
paper_bids = self.bids_by_forum[paper]
bid_value = [1 if bid.tag in positive_labels else 0 for bid in paper_bids if
bid.signatures[0] == reviewer.decode('utf-8')]
if len(bid_value) > 0:
return bid_value[0]
else:
return 0
def evaluate_using_single_rank(self, rank_list):
"""
Evaluate against a single ranked list computed by the model
"""
ranked_reviewers = self.setup_ranked_list(rank_list)
scores = []
positive_bids = 0
for reviewer, score, forum in ranked_reviewers:
bid = self.get_bid_for_reviewer_paper(reviewer, forum)
if bid == 1:
positive_bids +=1
for m in range(1, len(ranked_reviewers) + 1):
topM = ranked_reviewers[0: m]
topM = map(lambda reviewer: (reviewer[0], self.get_bid_for_reviewer_paper(reviewer[0], reviewer[2])), topM)
pos_bids_from_topM = [bid for bid in topM if bid[1] == 1]
precision = float(len(pos_bids_from_topM)) / float(m) # precision => relevant bids retrieved / # of retrieved
scores.append((m, precision))
return scores
def precision_at_m(self, ranked_list, m):
"""
Computes precision at M
Arguments:
ranked_list: ranked list of reviewers for a forum where each entry is either a 0 or 1
1 - reviewer that reviewer wanted to bid
0 - reviewer did not want to bid
m: cuttoff value
Returns:
A float representing the precision
"""
topM = np.asarray(ranked_list)[:m] != 0
return np.mean(topM)
def graph_precision_values(self, precision_values):
""" Graph the recall values against M values """
fig, ax = plt.subplots()
df_recall = pd.DataFrame({
'@M': range(1, len(precision_values)+1),
'Recall': precision_values
})
ax = df_recall.plot.line(x="@M", y="Recall", ax=ax)
ax.set_title("Recall Curve", y=1.08)
ax.set_ylabel("Recall")
fig.savefig("results/figures/{0}".format("recall_curve_bow_avg"), dpi=200) |
mspector/expertise | expertise/test/__main__.py | '''
'''
import argparse
import os
import json
from collections import OrderedDict
import expertise
from expertise.config import ModelConfig
def test_model(args):
config_path = os.path.abspath(args.config_path)
with open(config_path) as f:
data = json.load(f, object_pairs_hook=OrderedDict)
config = ModelConfig(**data)
model = expertise.load_model(config.model)
config = model.test(config, *args.additional_params)
config.save(config_path)
parser = argparse.ArgumentParser()
parser.add_argument('config_path', help="a config file for a model")
parser.add_argument('additional_params', nargs=argparse.REMAINDER)
args = parser.parse_args()
test_model(args)
|
mspector/expertise | expertise/preprocess/textrank/textrank_chunks.py | from expertise.preprocess.textrank import TextRank
def keyphrases(text):
textrank = TextRank()
textrank.analyze(text, chunks=True)
return [word for word, _ in textrank.keyphrases()]
|
mspector/expertise | expertise/models/tfidf/setup_tfidf.py | import os
import argparse
import expertise
default_config = {
'name': '',
'dataset': {
'directory': ''
},
'experiment_dir': './',
'min_count_for_vocab': 1,
'num_processes': 4,
'random_seed': 9,
'max_num_keyphrases': 25,
'max_seq_length': 512,
'do_lower_case': True,
'embedding_aggregation_type': 'all',
'batch_size': 32,
'use_cuda': False,
'kp_setup_dir': '',
'tfidf_model': ''
}
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--dataset_dir')
parser.add_argument('-c', '--config_path', default='./config-tfidf.json', help="a config file for a model")
args = parser.parse_args()
if args.dataset_dir:
default_config.update({'dataset': {'directory': args.dataset_dir}})
config = expertise.config.ModelConfig()
config.update(**default_config)
config.save(args.config_path)
config.update_from_file(args.config_path)
config.save(args.config_path)
print(config)
dataset = expertise.dataset.Dataset(**config.dataset)
experiment_dir = os.path.abspath(config.experiment_dir)
setup_dir = os.path.join(experiment_dir, 'setup')
if not os.path.exists(setup_dir):
os.mkdir(setup_dir)
config.update(setup_dir=setup_dir)
config.save(args.config_path)
(train_set_ids,
dev_set_ids,
test_set_ids) = expertise.utils.split_ids(list(dataset.submission_ids), seed=config.random_seed)
bids_by_forum = expertise.utils.get_bids_by_forum(dataset)
test_labels = expertise.utils.format_bid_labels(test_set_ids, bids_by_forum)
expertise.utils.dump_jsonl(os.path.join(setup_dir, 'test_labels.jsonl'), test_labels)
|
mspector/expertise | expertise/models/tfidf/tfidf.py | from __future__ import absolute_import, print_function, unicode_literals
import sys, os
import re
from collections import defaultdict
from collections import Counter
from gensim.similarities import SparseMatrixSimilarity
from gensim.models import TfidfModel
from gensim import corpora
import ipdb
class Model():
def __init__(self, kp_archives_by_paperid, kp_archives_by_userid):
self.dictionary = corpora.Dictionary()
# self.bow_by_userid = defaultdict(Counter)
# self.bow_by_paperid = defaultdict(Counter)
self.all_documents = []
self.kp_archives_by_paperid = kp_archives_by_paperid
self.kp_archives_by_userid = kp_archives_by_userid
for archive in self.kp_archives_by_paperid.values():
for token_list in archive:
self.dictionary.add_documents([token_list])
self.all_documents += [token_list]
for archive in self.kp_archives_by_userid.values():
for token_list in archive:
self.dictionary.add_documents([token_list])
self.all_documents += [token_list]
self.corpus_bows = [self.dictionary.doc2bow(doc) for doc in self.all_documents]
self.tfidf = TfidfModel(self.corpus_bows)
def fit(self):
"""
Fit the TFIDF model
each argument should be a list of lists, where each inner list is a list of keyphrases.
e.g.
submission_kps = [
['deep_learning', 'natural_language_processing'],
['neural_imaging', 'fmri', 'functional_magnetic_resonance']
]
"""
self.bow_archives_by_paperid = {userid: [self.dictionary.doc2bow(doc) for doc in archive] \
for userid, archive in self.kp_archives_by_paperid.items()}
self.bow_archives_by_userid = {userid: [self.dictionary.doc2bow(doc) for doc in archive] \
for userid, archive in self.kp_archives_by_userid.items()}
flattened_archives = [
bow for archive in self.bow_archives_by_paperid.values() for bow in archive]
self.index = SparseMatrixSimilarity(
[self.tfidf[bow] for bow in flattened_archives],
num_features=len(self.dictionary)
)
def predict(self, note_record):
"""
predict() should return a list of openreview user IDs, in descending order by
expertise score in relation to the test record.
Arguments
@test_record: a note record (dict) representing the note to rank against.
Testing records should have a "forum" field. This means that the record
is identified in OpenReview by the ID listed in that field.
Returns
a list of reviewer IDs in descending order of expertise score
"""
scores = [(signature, self.score(signature, note_record)) for signature, _ in self.bow_by_userid.iteritems()]
rank_list = [signature for signature, score in sorted(scores, key=lambda x: x[1], reverse=True)]
return rank_list
def score(self, reviewer_tokens, paper_tokens):
"""
Returns a score from 0.0 to 1.0, representing the degree of fit between the paper and the reviewer
"""
paper_bow = [(t[0], t[1]) for t in self.dictionary.doc2bow(paper_tokens)]
reviewer_bow = [(t[0], t[1]) for t in self.dictionary.doc2bow(reviewer_tokens)]
forum_vector = defaultdict(lambda: 0, {idx: score for (idx, score) in self.tfidf[paper_bow]})
reviewer_vector = defaultdict(lambda: 0, {idx: score for (idx, score) in self.tfidf[reviewer_bow]})
return sum([forum_vector[k] * reviewer_vector[k] for k in forum_vector])
|
mspector/expertise | expertise/config/core.py | from collections import OrderedDict
import json
import random
import os
import pickle
import pkgutil
import expertise
class ModelConfig(object):
def __init__(self, **kwargs):
self._config = {}
# valid_model_names = expertise.available_models()
# if not 'model' in kwargs:
# raise AttributeError(
# f'ModelConfig requires a model. Select from {valid_model_names}')
# model = kwargs['model']
# if model not in valid_model_names:
# raise ValueError(
# f'"model" attribute must be one of {valid_model_names}')
# model_default_file = os.path.join(
# expertise.model_importers()[model].path, model, f'{model}_default.json')
# with open(model_default_file) as f:
# model_default_config = json.load(f, object_pairs_hook=OrderedDict)
# self._config = model_default_config
self.update(**kwargs)
def __repr__(self):
return json.dumps(self._config, indent=4)
def update(self, **kwargs):
self._config = OrderedDict({**self._config, **kwargs})
for k, v in self._config.items():
setattr(self, k, v)
def save(self, outfile):
with open(outfile, 'w') as f:
json.dump(self._config, f, indent=4, separators=(',', ': '))
def update_from_file(self, file):
config_path = os.path.abspath(file)
with open(config_path) as f:
data = json.load(f, object_pairs_hook=OrderedDict)
self.update(**data)
|
mspector/expertise | expertise/models/centroid/centroid.py | <filename>expertise/models/centroid/centroid.py
import numpy as np
import torch
import torch.nn as nn
from torch.nn import BCEWithLogitsLoss
from expertise import utils
from expertise.evaluators.mean_avg_precision import eval_map
from expertise.evaluators.hits_at_k import eval_hits_at_k
class Model(torch.nn.Module):
def __init__(self, config, vocab):
super(Model, self).__init__()
self.config = config
self.vocab = vocab
self.embedding = nn.Embedding(len(vocab)+1, config.embedding_dim, padding_idx=0)
# Vector of ones (used for loss)
if self.config.use_cuda:
self.ones = torch.ones(config.batch_size, 1).cuda()
else:
self.ones = torch.ones(config.batch_size, 1)
self._bce_loss = BCEWithLogitsLoss()
def compute_loss(self, batch_source, pos_result, neg_result, batch_lengths, pos_len, neg_len):
""" Compute the loss (BPR) for a batch of examples
:param batch_source: a batch of source keyphrase indices (list of lists)
:param pos_result: True aliases of the Mentions
:param neg_result: False aliases of the Mentions
:param batch_lengths: a list of sample lengths, one for each sample in the batch (list of lists)
:param pos_len: lengths of positives
:param neg_len: lengths of negatives
:return:
"""
batch_size = len(batch_source)
# B by dim
source_embed = self.embed(batch_source, batch_lengths)
# B by dim
pos_embed = self.embed(pos_result, pos_len)
# B by dim
neg_embed = self.embed(neg_result, neg_len)
loss = self._bce_loss(
utils.row_wise_dot(source_embed , pos_embed )
- utils.row_wise_dot(source_embed , neg_embed ),
self.ones[:batch_size])
return loss
def score_pair(self, source, target, source_len, target_len):
"""
:param source: Batchsize by Max_String_Length
:param target: Batchsize by Max_String_Length
:return: Batchsize by 1
"""
source_embed = self.embed_dev(source, source_len)
target_embed = self.embed_dev(target, target_len)
scores = utils.row_wise_dot(source_embed, target_embed)
return scores
def embed(self, keyword_lists, keyword_lengths):
"""
:param keyword_lists: Numpy array - Batch_size by max_num_keywords - integers corresponding to keywords in the vocabulary.
:param keyword_lengths: numpy array - batch_size by 1
:return: batch_size by embedding dim
"""
kw_indices = torch.from_numpy(keyword_lists).long()
kw_lengths = torch.from_numpy(keyword_lengths)
if self.config.use_cuda:
kw_indices = kw_indices.cuda()
kw_lengths = kw_lengths.cuda()
# get all the embeddings for each keyword
# B x L x d
embeddings = self.embedding(kw_indices)
# make sure that we don't divide by zero
kw_lengths[kw_lengths == 0] = 1
# for each sample within the batch, find the average of all of that sample's keyword embeddings
summed_emb = torch.sum(embeddings, dim=1)
averaged = torch.div(summed_emb, kw_lengths.float())
# B x 1 x d
return averaged
def embed_dev(self, keyword_lists, keyword_lengths, print_embed=False, batch_size=None):
"""
:param keyword_lists: Batch_size by max_num_keywords
:return: batch_size by embedding dim
"""
return self.embed(keyword_lists, keyword_lengths)
def score_dev_test_batch(self,
batch_queries,
batch_query_lengths,
batch_targets,
batch_target_lengths,
batch_size
):
if batch_size == self.config.dev_batch_size:
source_embed = self.embed_dev(batch_queries, batch_query_lengths)
target_embed = self.embed_dev(batch_targets, batch_target_lengths)
else:
source_embed = self.embed_dev(batch_queries, batch_query_lengths, batch_size=batch_size)
target_embed = self.embed_dev(batch_targets, batch_target_lengths, batch_size=batch_size)
scores = utils.row_wise_dot(source_embed, target_embed)
# what is this?
scores[scores != scores] = 0
return scores
def generate_predictions(config, model, batcher):
"""
Use the model to make predictions on the data in the batcher
:param model: Model to use to score reviewer-paper pairs
:param batcher: Batcher containing data to evaluate (a DevTestBatcher)
:return:
"""
for idx, batch in enumerate(batcher.batches(batch_size=config.dev_batch_size)):
if idx % 100 == 0:
print('Predicted {} batches'.format(idx))
batch_queries = []
batch_query_lengths = []
batch_query_ids = []
batch_targets = []
batch_target_lengths = []
batch_target_ids = []
batch_labels = []
batch_size = len(batch)
for data in batch:
# append a positive sample
batch_queries.append(data['source'])
batch_query_lengths.append(data['source_length'])
batch_query_ids.append(data['source_id'])
batch_targets.append(data['positive'])
batch_target_lengths.append(data['positive_length'])
batch_target_ids.append(data['positive_id'])
batch_labels.append(1)
# append a negative sample
batch_queries.append(data['source'])
batch_query_lengths.append(data['source_length'])
batch_query_ids.append(data['source_id'])
batch_targets.append(data['negative'])
batch_target_lengths.append(data['negative_length'])
batch_target_ids.append(data['negative_id'])
batch_labels.append(0)
scores = model.score_dev_test_batch(
np.asarray(batch_queries),
np.asarray(batch_query_lengths),
np.asarray(batch_targets),
np.asarray(batch_target_lengths),
np.asarray(batch_size)
)
if type(batch_labels) is not list:
batch_labels = batch_labels.tolist()
if type(scores) is not list:
scores = list(scores.cpu().data.numpy().squeeze())
for source, source_id, target, target_id, label, score in zip(
batch_queries,
batch_query_ids,
batch_targets,
batch_target_ids,
batch_labels,
scores
):
# temporarily commenting out "source" and "target" because I think they are not needed.
prediction = {
# 'source': source,
'source_id': source_id,
# 'target': target,
'target_id': target_id,
'label': label,
'score': float(score)
}
yield prediction
def generate_scores(config, model, batcher):
"""
Use the model to make predictions on the data in the batcher
:param model: Model to use to score reviewer-paper pairs
:param batcher: Batcher containing data to evaluate (a DevTestBatcher)
:return:
"""
for idx, batch in enumerate(batcher.batches(batch_size=config.dev_batch_size)):
if idx % 100 == 0:
print('Predicted {} batches'.format(idx))
batch_queries = []
batch_query_lengths = []
batch_query_ids = []
batch_targets = []
batch_target_lengths = []
batch_target_ids = []
batch_size = len(batch)
for data in batch:
# append a positive sample
batch_queries.append(data['source'])
batch_query_lengths.append(data['source_length'])
batch_query_ids.append(data['source_id'])
batch_targets.append(data['target'])
batch_target_lengths.append(data['target_length'])
batch_target_ids.append(data['target_id'])
scores = model.score_dev_test_batch(
np.asarray(batch_queries),
np.asarray(batch_query_lengths),
np.asarray(batch_targets),
np.asarray(batch_target_lengths),
np.asarray(batch_size)
)
if type(scores) is not list:
scores = list(scores.cpu().data.numpy().squeeze())
for source, source_id, target, target_id, score in zip(
batch_queries,
batch_query_ids,
batch_targets,
batch_target_ids,
scores
):
# temporarily commenting out "source" and "target" because I think they are not needed.
prediction = {
# 'source': source,
'source_id': source_id,
# 'target': target,
'target_id': target_id,
'score': float(score)
}
yield prediction
def load_jsonl(filename):
labels_by_forum = defaultdict(dict)
scores_by_forum = defaultdict(dict)
for data in utils.jsonl_reader(filename):
forum = data['source_id']
reviewer = data['target_id']
label = data['label']
score = data['score']
labels_by_forum[forum][reviewer] = label
scores_by_forum[forum][reviewer] = score
result_labels = []
result_scores = []
for forum, labels_by_reviewer in labels_by_forum.items():
scores_by_reviewer = scores_by_forum[forum]
reviewer_scores = list(scores_by_reviewer.items())
reviewer_labels = list(labels_by_reviewer.items())
sorted_labels = [label for _, label in sorted(reviewer_labels)]
sorted_scores = [score for _, score in sorted(reviewer_scores)]
result_labels.append(sorted_labels)
result_scores.append(sorted_scores)
return result_labels, result_scores
def eval_map_file(filename):
list_of_list_of_labels, list_of_list_of_scores = utils.load_labels(filename)
return eval_map(list_of_list_of_labels, list_of_list_of_scores)
def eval_hits_at_k_file(filename, k=2, oracle=False):
list_of_list_of_labels,list_of_list_of_scores = utils.load_labels(filename)
return eval_hits_at_k(list_of_list_of_labels, list_of_list_of_scores, k=k,oracle=oracle)
|
mspector/expertise | expertise/models/centroid/train.py | <reponame>mspector/expertise<gh_stars>10-100
import datetime, os, sys, csv
from shutil import copyfile, copytree
import pickle
import torch
import torch.optim as optim
import numpy as np
from expertise.models import centroid
from expertise.utils import save_dict_to_json
from expertise.utils.vocab import Vocab
from expertise.utils.batcher import Batcher
from expertise import utils
current_path = os.path.abspath(os.path.dirname(__file__))
def train(config):
train_dir = os.path.join(config.experiment_dir, 'train')
if not os.path.isdir(train_dir):
os.mkdir(train_dir)
for train_subdir in ['dev_scores', 'dev_predictions']:
train_subdir_path = os.path.join(train_dir, train_subdir)
if not os.path.exists(train_subdir_path):
os.mkdir(train_subdir_path)
vocab_file = os.path.join(config.kp_setup_dir, 'textrank_vocab.pkl')
vocab = utils.load_pkl(vocab_file)
torch.manual_seed(config.random_seed)
batcher = Batcher(
input_file=os.path.join(config.experiment_dir, 'setup', 'train_samples.jsonl'))
batcher_dev = Batcher(
input_file=os.path.join(config.experiment_dir, 'setup', 'dev_samples.jsonl'))
model = centroid.Model(config, vocab)
if config.use_cuda:
model = model.cuda()
optimizer = optim.Adam(model.parameters(), lr=config.learning_rate, weight_decay=config.l2penalty)
# Stats
best_map = 0
sum_loss = 0.0
print('Begin Training')
# Training loop
for counter, batch in enumerate(batcher.batches(batch_size=config.batch_size)):
batch_source = []
batch_pos = []
batch_neg = []
batch_source_lens = []
batch_pos_lens = []
batch_neg_lens = []
for data in batch:
batch_source.append(np.asarray(data['source']))
batch_pos.append(np.asarray(data['positive']))
batch_neg.append(np.asarray(data['negative']))
batch_source_lens.append(np.asarray(data['source_length'], dtype=np.float32))
batch_pos_lens.append(np.asarray(data['positive_length'], dtype=np.float32))
batch_neg_lens.append(np.asarray(data['negative_length'], dtype=np.float32))
print('num_batches: {}'.format(counter))
optimizer.zero_grad()
loss_parameters = (
np.asarray(batch_source),
np.asarray(batch_pos),
np.asarray(batch_neg),
np.asarray(batch_source_lens, dtype=np.float32),
np.asarray(batch_pos_lens, dtype=np.float32),
np.asarray(batch_neg_lens, dtype=np.float32)
)
loss = model.compute_loss(*loss_parameters)
loss.backward()
# torch.nn.utils.clip_grad_norm(model.parameters(), config.clip)
optimizer.step()
# Question: is this if block just for monitoring?
if counter % 100 == 0:
this_loss = loss.cpu().data.numpy()
sum_loss += this_loss
print('Processed {} batches, Loss of batch {}: {}. Average loss: {}'.format(
counter, counter, this_loss, sum_loss / (counter / 100)))
if counter % config.eval_every == 0:
# is this reset needed?
batcher_dev.reset()
predictions = centroid.generate_predictions(config, model, batcher_dev)
prediction_filename = os.path.join(
train_dir,
'dev_predictions/dev.predictions.{}.jsonl'.format(counter))
utils.dump_jsonl(prediction_filename, predictions)
print('prediction filename', prediction_filename)
map_score = float(centroid.eval_map_file(prediction_filename))
hits_at_1 = float(centroid.eval_hits_at_k_file(prediction_filename, 1))
hits_at_3 = float(centroid.eval_hits_at_k_file(prediction_filename, 3))
hits_at_5 = float(centroid.eval_hits_at_k_file(prediction_filename, 5))
hits_at_10 = float(centroid.eval_hits_at_k_file(prediction_filename, 10))
score_lines = [
[config.name, counter, text, data] for text, data in [
('MAP', map_score),
('Hits@1', hits_at_1),
('Hits@3', hits_at_3),
('Hits@5', hits_at_5),
('Hits@10', hits_at_10)
]
]
dev_scores_file = os.path.join(
train_dir,
'dev_scores/dev.scores.{}.tsv'.format(counter))
utils.dump_csv(dev_scores_file, score_lines)
if map_score > best_map:
best_map = map_score
best_model_path = os.path.join(
train_dir, 'model_{}_{}.torch'.format(config.name, 'best'))
torch.save(model, best_model_path)
config.update(best_model_path=best_model_path)
best_scores_file = os.path.join(
train_dir,
'dev.scores.best.tsv')
utils.dump_csv(best_scores_file, score_lines)
if counter == config.num_minibatches:
return config
return config
|
mspector/expertise | expertise/models/tpms/tpms.py | import os
import csv, json
from collections import defaultdict
from expertise.evaluators.mean_avg_precision import eval_map
from expertise.evaluators.hits_at_k import eval_hits_at_k
from expertise.dataset import Dataset
from expertise import utils
import ipdb
def setup(config):
assert os.path.exists(config.tpms_scores_file), 'This model requires a pre-computed tpms score file.'
dataset = Dataset(**config.dataset)
experiment_dir = os.path.abspath(config.experiment_dir)
setup_dir = os.path.join(experiment_dir, 'setup')
if not os.path.exists(setup_dir):
os.mkdir(setup_dir)
(train_set_ids,
dev_set_ids,
test_set_ids) = utils.split_ids(list(dataset.submission_ids), seed=config.random_seed)
bids_by_forum = utils.get_bids_by_forum(dataset)
test_labels = utils.format_bid_labels(test_set_ids, bids_by_forum)
utils.dump_jsonl(os.path.join(config.setup_dir, 'test_labels.jsonl'), test_labels)
def train(config):
print('Nothing to train. This model is a shell that reads in pre-computed TPMS scores.')
assert os.path.exists(config.tpms_scores_file), 'This model requires a pre-computed tpms score file.'
def infer(config):
print('Nothing to infer. This model is a shell that reads in pre-computed TPMS scores.')
assert os.path.exists(config.tpms_scores_file), 'This model requires a pre-computed tpms score file.'
def test(config):
score_file_path = os.path.join(config.test_dir, 'test_scores.jsonl')
labels_file_path = os.path.join(config.setup_dir, 'test_labels.jsonl')
tpms_scores_file = config.tpms_scores_file
scores = {}
for data in utils.jsonl_reader(tpms_scores_file):
source_id = data['source_id']
target_id = data['target_id']
score = data['score']
if source_id not in scores:
scores[source_id] = {}
if target_id not in scores[source_id]:
scores[source_id][target_id] = score
with open(score_file_path, 'w') as w:
for data in utils.jsonl_reader(labels_file_path):
paperid = data['source_id']
userid = data['target_id']
label = data['label']
if paperid in scores:
score = scores[paperid].get(userid, 0.0)
if float(score) > -float('inf'):
result = {
'source_id': paperid,
'target_id': userid,
'score': float(score),
'label': int(label)
}
w.write(json.dumps(result) + '\n')
(list_of_list_of_labels,
list_of_list_of_scores) = utils.load_labels(score_file_path)
map_score = float(eval_map(list_of_list_of_labels, list_of_list_of_scores))
hits_at_1 = float(eval_hits_at_k(list_of_list_of_labels, list_of_list_of_scores, k=1))
hits_at_3 = float(eval_hits_at_k(list_of_list_of_labels, list_of_list_of_scores, k=3))
hits_at_5 = float(eval_hits_at_k(list_of_list_of_labels, list_of_list_of_scores, k=5))
hits_at_10 = float(eval_hits_at_k(list_of_list_of_labels, list_of_list_of_scores, k=10))
score_lines = [
[config.name, text, data] for text, data in [
('MAP', map_score),
('Hits@1', hits_at_1),
('Hits@3', hits_at_3),
('Hits@5', hits_at_5),
('Hits@10', hits_at_10)
]
]
config.test_save(score_lines, 'test.scores.tsv')
|
mspector/expertise | expertise/utils/shuffle_big_file.py | '''
Pseudocode from:
https://blog.janestreet.com/how-to-shuffle-a-big-dataset/?utm_source=share
'''
import argparse
import random
import json
import os
from tqdm import tqdm
from . import utils
def lazy_reader(filepath):
with open(filepath) as f:
for line in f:
yield line
def build_folds(sample_iterable, piles_directory, num_piles):
'''
First pass
create empty piles p[0], ..., p[M - 1]
for i = 0, ..., n - 1 do
j := uniform random draw from {0, ..., M - 1}
append x[i] to pile p[j]
'''
fp_by_index = {}
for pile_index in range(num_piles):
pile_label = str(pile_index).zfill(len(str(num_piles)))
pile_path = os.path.join(piles_directory, 'fold{}.jsonl'.format(pile_label))
fp_by_index[pile_index] = open(pile_path, 'w')
print('reading from big file')
for line in sample_iterable:
pile_index = random.randint(0, num_piles-1)
fp_by_index[pile_index].write(line)
for pile_index, fp in fp_by_index.items():
fp.close()
def shuffle_and_write(filepath, outfile_pointer):
print('shuffling', filepath)
lines = list(utils.jsonl_reader(filepath))
random.shuffle(lines)
for line in lines:
outfile_pointer.write(json.dumps(line) + '\n')
outfile_pointer.flush()
print('wrote {}'.format(filepath))
def integrate_piles(piles_directory, outfile):
'''
Second pass (perhaps done lazily):
for j = 0, ..., M - 1 do
shuffle p[j] in RAM with Fisher-Yates or whatever is convenient
append p[j] to output file
'''
with open(outfile, 'w') as outfile_pointer:
for file in os.listdir(piles_directory):
filepath = os.path.join(piles_directory, file)
shuffle_and_write(filepath, outfile_pointer)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('inputfile')
parser.add_argument('outputfile')
parser.add_argument('--num_piles', type=int, default=20)
parser.add_argument('--build', action='store_true')
parser.add_argument('--integrate', action='store_true')
args = parser.parse_args()
filedir = os.path.dirname(os.path.abspath(args.inputfile))
piles_directory = os.path.join(filedir, 'piles')
if not os.path.isdir(piles_directory):
os.mkdir(piles_directory)
if args.build:
build_folds(tqdm(lazy_reader(args.inputfile)), piles_directory, args.num_piles)
if args.integrate:
integrate_piles(piles_directory, args.outputfile)
|
mspector/expertise | expertise/__init__.py | <filename>expertise/__init__.py
from .core import *
from . import config
from . import dataset
from . import models
from . import preprocess
from . import setup
from . import test
from . import train
from . import utils
|
mspector/expertise | expertise/models/randomize/randomize.py | <filename>expertise/models/randomize/randomize.py
import numpy as np
class Model():
def __init__(self):
pass
def fit(self, **kwargs):
pass
def score(self):
return np.random.random()
# def __init__(self):
# self.reviewers = set()
# pass
# def fit(self, train_data, archive_data):
# self.reviewers = set([record['reviewer_id'] for record in archive_data])
# def predict(self, note_record):
# scores = [(signature, self.score(signature, note_record['forum'])) for signature in self.reviewers]
# return [signature for signature, score in sorted(scores, key=lambda x: x[1], reverse=True)]
# def score(self, signature, note_record):
# return np.random.random()
|
mspector/expertise | expertise/utils/vocab.py | <reponame>mspector/expertise
import codecs
import numpy as np
from collections import Counter
import csv
import io
# Note, because I used sets when I constructed the reviewer keyphrase file, I end up with
# count_keyphrases being a count based on number of documents containing the keyphrase.
def count_keyphrases(reviewer_keyphrase_file, submission_keyphrase_file, outputfile, min_count):
keyphrase_counter = Counter()
with open(reviewer_keyphrase_file, 'rb') as f:
reviewer_keyphrases = pickle.load(f)
for kp_list in reviewer_keyphrases.values():
keyphrase_counter.update(kp_list)
with open(submission_keyphrase_file, 'rb') as f:
submission_keyphrases = pickle.load(f)
for kp_list in submission_keyphrases.values():
keyphrase_counter.update(kp_list)
# start at 2 because other indices are reserved (padding and OOV)
kp_index = 2
with open(outputfile, 'w') as f:
writer = csv.writer(delimiter='\t')
for keyphrase, count in keyphrase_counter.items():
if count >= min_count:
writer.writerow([keyphrase, kp_index])
kp_index += 1
class Vocab(object):
def __init__(self, min_count=1):
self.index_by_item = {}
self.item_by_index = {}
self.count_by_item = Counter()
self.OOV = "<OOV>"
self.PADDING_INDEX = 0
self.OOV_INDEX = 1
self.next_index = 2
self.index_by_item[self.OOV] = self.OOV_INDEX
self.item_by_index[self.OOV_INDEX] = self.OOV
self.min_count = int(min_count)
def __len__(self):
return len(self.index_by_item)
def dump_csv(self, outfile=None, delimiter='\t', encoding='utf-8'):
'''
writes the vocab to a csv file
'''
def write_to_buffer(output):
writer = csv.writer(output, delimiter=delimiter)
for item, count in self.count_by_item.items():
if count >= self.min_count:
writer.writerow([item, self.index_by_item[item]])
if outfile:
with open(outfile, 'w') as f:
write_to_buffer(f)
output = io.StringIO()
write_to_buffer(output)
csv_binary = output.getvalue().encode(encoding)
return csv_binary
def load_items(self, vocab_items):
assert self.next_index not in self.item_by_index, \
'self.next_index not properly incremented (this should not happen)'
for item in vocab_items:
if item not in self.index_by_item:
self.index_by_item[item] = self.next_index
self.item_by_index[self.next_index] = item
self.next_index += 1
self.count_by_item.update(vocab_items)
def to_ints(self, kp_list, max_num_keyphrases=None, padding=True):
kp_indices = []
for kp in kp_list[:max_num_keyphrases]:
kp_indices.append(self.index_by_item.get(kp, self.OOV_INDEX))
if padding and max_num_keyphrases > len(kp_indices):
padding_length = max_num_keyphrases - len(kp_indices)
padding = [0] * padding_length
kp_indices.extend(padding)
return kp_indices
# deprecated
def to_ints_no_pad(self,string):
print('function deprecated')
# arr = []
# for c in list(string.split(" ")):
# arr.append(self.index_by_item.get(c,self.OOV_INDEX))
# if len(arr) > self.max_num_keyphrases:
# return np.asarray(arr[0:self.max_num_keyphrases])
# return np.asarray(arr)
def to_string(self,list_ints):
stri = ""
for c in list_ints:
if c != self.PADDING_INDEX:
stri += self.item_by_index.get(c,self.OOV).encode("utf-8") + " "
return stri
|
mspector/expertise | expertise/models/centroid/__init__.py | <reponame>mspector/expertise
from __future__ import absolute_import
from .centroid import *
from .setup import setup
from .train import train
from .test import test
|
mspector/expertise | expertise/models/centroid_scibert_kps/__init__.py | <reponame>mspector/expertise<filename>expertise/models/centroid_scibert_kps/__init__.py
from __future__ import absolute_import
from .centroid_scibert_kps import *
from .setup_centroid_scibert import setup
from .train_centroid_scibert import train
from .test_centroid_scibert import test
|
mspector/expertise | expertise/utils/summarize_kfold.py | <reponame>mspector/expertise
'''
This script should summarize the results of an experiment across K folds
'''
import argparse
import os
from expertise.config import ModelConfig
import random
import ipdb
import csv
import numpy as np
def get_scores(config, k):
old_experiment_dir = config.experiment_dir
new_experiment_dir = os.path.join(old_experiment_dir, f'{config.name}{k}')
data = {}
with open(os.path.join(new_experiment_dir, 'test', 'test.scores.tsv')) as f:
reader = csv.reader(f, delimiter='\t')
for row in reader:
data[row[1]] = float(row[2])
return data
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('config_path')
parser.add_argument('num_folds', type=int)
args = parser.parse_args()
config_path = os.path.abspath(args.config_path)
experiment_path = os.path.dirname(config_path)
config = ModelConfig()
config.update_from_file(config_path)
all_data = []
for k in range(args.num_folds):
all_data.append(get_scores(config, k))
summary = {}
for measure in ['MAP','Hits@1','Hits@3','Hits@5','Hits@10']:
summary[measure] = np.mean([d[measure] for d in all_data])
with open(os.path.join(config.experiment_dir, f'{config.name}_summary.csv'), 'w') as f:
writer = csv.writer(f, delimiter='\t')
for measure, value in summary.items():
writer.writerow([config.name, measure, value])
|
mspector/expertise | expertise/models/tpms/__init__.py | <filename>expertise/models/tpms/__init__.py
from .tpms import *
|
mspector/expertise | expertise/preprocess/bert/setup_bert_lookup.py | <gh_stars>10-100
import csv, importlib, itertools, json, math, os, pickle, random
from collections import defaultdict
import numpy as np
import openreview
from expertise.utils.batcher import Batcher
from expertise.dataset import Dataset
from expertise.config import ModelConfig
import expertise.utils as utils
from expertise.utils.data_to_sample import data_to_sample
import argparse
import torch
def setup_bert_lookup(config):
def get_values(doc_feature):
cls = doc_feature['features'][0]
values = cls['layers'][0]['values']
return values
print('starting setup')
# features_dir = './scibert_features/akbc19/setup/archives-features/'
features_dir = config.bert_features_dir
archive_features_dir = os.path.join(features_dir, 'archives-features')
submission_features_dir = os.path.join(features_dir, 'submissions-features')
bert_lookup = {}
for target_dir in [archive_features_dir, submission_features_dir]:
for filename in os.listdir(target_dir):
print(filename)
item_id = filename.replace('.npy','')
filepath = os.path.join(target_dir, filename)
archive_features = np.load(filepath)
archive_values = []
for doc_feature in archive_features:
archive_values.append(get_values(doc_feature))
if len(archive_values) == 0:
archive_values = [np.zeros(768)]
result = np.array(archive_values)
bert_lookup[item_id] = torch.Tensor(result)
return bert_lookup
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('config_path', help="a config file for a model")
args = parser.parse_args()
config_path = os.path.abspath(args.config_path)
experiment_path = os.path.dirname(config_path)
config = ModelConfig()
config.update_from_file(config_path)
setup_path = os.path.join(experiment_path, 'setup')
if not os.path.isdir(setup_path):
os.mkdir(setup_path)
bert_lookup = setup_bert_lookup(config)
utils.dump_pkl(os.path.join(config.setup_dir, 'bert_lookup_cls.pkl'), bert_lookup)
|
mspector/expertise | expertise/preprocess/bert/__init__.py | <filename>expertise/preprocess/bert/__init__.py
from .core import get_embeddings
|
mspector/expertise | expertise/evaluators/mean_avg_precision.py | <reponame>mspector/expertise
"""
Copyright (C) 2017-2018 University of Massachusetts Amherst.
This file is part of "learned-string-alignments"
http://github.com/iesl/learned-string-alignments
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
from collections import defaultdict
from sklearn import metrics
import numpy as np
from .. import utils
def eval_map(list_of_list_of_labels, list_of_list_of_scores, randomize=True):
'''
Compute Mean Average Precision
Given two lists with one element per test example, compute the
mean average precision score.
The i^th element of each list is an array of scores or labels corresponding
to the i^th training example.
:param list_of_list_of_labels: Binary relevance labels. One list per example.
:param list_of_list_of_scores: Predicted relevance scores. One list per example.
:return: the mean average precision
'''
assert len(list_of_list_of_labels) == len(list_of_list_of_scores)
avg_precision_scores = []
for labels_list, scores_list in zip(list_of_list_of_labels, list_of_list_of_scores):
if sum(labels_list) > 0:
avg_precision = metrics.average_precision_score(
labels_list,
scores_list
)
avg_precision_scores.append(avg_precision)
return sum(avg_precision_scores) / len(avg_precision_scores)
|
mspector/expertise | expertise/utils/standard_test.py | <filename>expertise/utils/standard_test.py<gh_stars>0
import os
from collections import defaultdict
from expertise import utils
from expertise.dataset import Dataset
from expertise.evaluators.mean_avg_precision import eval_map
from expertise.evaluators.hits_at_k import eval_hits_at_k
from tqdm import tqdm
def test(config):
dataset = Dataset(**config.dataset)
labels_by_reviewer_by_forum = defaultdict(dict)
for bid in dataset.bids():
label = 1 if bid.tag in dataset.positive_bid_values else 0
labels_by_reviewer_by_forum[bid.forum][bid.signatures[0]] = label
inferred_scores_path = os.path.join(config.infer_dir, config.name + '-scores.jsonl')
labeled_data_list = []
for data in utils.jsonl_reader(inferred_scores_path):
forum = data['source_id']
reviewer = data['target_id']
score = float(data['score'])
if not score >= 0.0:
score = 0.0
if reviewer in labels_by_reviewer_by_forum[forum]:
label = labels_by_reviewer_by_forum[forum][reviewer]
labeled_data = {k:v for k,v in data.items()}
labeled_data.update({'label': label, 'score': score})
labeled_data_list.append(labeled_data)
config.test_save(labeled_data_list, 'score_labels.jsonl')
labels_file = config.test_path('score_labels.jsonl')
list_of_list_of_labels, list_of_list_of_scores = utils.load_labels(labels_file)
map_score = float(eval_map(list_of_list_of_labels, list_of_list_of_scores))
hits_at_1 = float(eval_hits_at_k(list_of_list_of_labels, list_of_list_of_scores, k=1))
hits_at_3 = float(eval_hits_at_k(list_of_list_of_labels, list_of_list_of_scores, k=3))
hits_at_5 = float(eval_hits_at_k(list_of_list_of_labels, list_of_list_of_scores, k=5))
hits_at_10 = float(eval_hits_at_k(list_of_list_of_labels, list_of_list_of_scores, k=10))
score_lines = [
[config.name, text, data] for text, data in [
('MAP', map_score),
('Hits@1', hits_at_1),
('Hits@3', hits_at_3),
('Hits@5', hits_at_5),
('Hits@10', hits_at_10)
]
]
config.test_save(score_lines, 'test.scores.tsv')
|
mspector/expertise | expertise/preprocess/pos_regex.py | <gh_stars>10-100
from __future__ import print_function
import os
import json
import itertools
import string
import nltk
from .. import utils
def keyphrases(text):
'''
keyphrases should accept one argument, text, which is a string.
returns a list of normalized keyphrases.
'''
return extract_candidate_chunks(text)
# Deprecated
def read_keyphrases(data_dir):
'''
Given a directory containing reviewer archives or submissions,
generate a dict keyed on signatures whose values are sets of keyphrases.
The input directory should contain .jsonl files. Files representing
reviewer archives should be [...] TODO: Finish this.
'''
print('function deprecated')
# for filename in os.listdir(data_dir):
# filepath = os.path.join(data_dir, filename)
# file_id = filename.replace('.jsonl', '')
# print(file_id)
# keyphrases = []
# with open(filepath) as f:
# for line in f.readlines():
# if line.endswith('\n'):
# line = line[:-1]
# record = json.loads(line)
# content = record['content']
# record_text_unfiltered = utils.content_to_text(content, fields=['title', 'abstract', 'fulltext'])
# record_text_filtered = utils.strip_nonalpha(record_text_unfiltered)
# keyphrases.extend(extract_candidate_chunks(record_text_filtered))
# yield file_id, keyphrases
def preprocess(text, mode='chunks', stemmer=None):
if mode=='chunks':
return extract_candidate_chunks(text, stemmer=stemmer)
if mode=='words':
return extract_candidate_words(text, stemmer=stemmer)
def extract_candidate_chunks(text, grammar=r'NP: {<JJ>*<NN>}', delimiter='_', stemmer=None):
# exclude candidates that are stop words or entirely punctuation
punct = set(string.punctuation)
stop_words = set(nltk.corpus.stopwords.words('english'))
# tokenize, POS-tag, and chunk using regular expressions
chunker = nltk.chunk.regexp.RegexpParser(grammar)
tagged_sents = nltk.pos_tag_sents(nltk.word_tokenize(sent) for sent in nltk.sent_tokenize(text))
all_chunks = list(itertools.chain.from_iterable(
nltk.chunk.tree2conlltags(chunker.parse(tagged_sent)) for tagged_sent in tagged_sents)
)
# join constituent chunk words into a single chunked phrase
if stemmer:
stem = stemmer.stem
else:
stem = lambda x: x
candidates = []
for key, group in itertools.groupby(all_chunks, lambda word_pos_chunk_triple: word_pos_chunk_triple[2] != 'O'):
if key:
words = []
for word, pos, chunk in group:
try:
word = stem(word)
except IndexError:
print("word unstemmable:", word)
words.append(word)
candidates.append(delimiter.join(words).lower())
return [cand for cand in candidates
if cand not in stop_words and not all(char in punct for char in cand)]
def extract_candidate_words(text, good_tags=set(['JJ','JJR','JJS','NN','NNP','NNS','NNPS']), stemmer=None):
# exclude candidates that are stop words or entirely punctuation
punct = set(string.punctuation)
stop_words = set(nltk.corpus.stopwords.words('english'))
# tokenize and POS-tag words
tagged_words = itertools.chain.from_iterable(nltk.pos_tag_sents(nltk.word_tokenize(sent)
for sent in nltk.sent_tokenize(text)))
# filter on certain POS tags and lowercase all words
if stemmer!=None:
candidates = [stemmer.stem(word.lower()) for word, tag in tagged_words
if tag in good_tags and word.lower() not in stop_words
and not all(char in punct for char in word)]
else:
candidates = [word.lower() for word, tag in tagged_words
if tag in good_tags and word.lower() not in stop_words
and not all(char in punct for char in word)]
return candidates
|
mspector/expertise | expertise/models/bert/__init__.py | from __future__ import absolute_import
from .bert import *
|
mspector/expertise | expertise/utils/prepare_kfold.py | <filename>expertise/utils/prepare_kfold.py
'''
This script should "multiply" a directory's config file across K folds
'''
import argparse
import os
from expertise.config import ModelConfig
import random
import ipdb
def prepare_kfold(args, k):
config_path = os.path.abspath(args.config_path)
experiment_path = os.path.dirname(config_path)
config = ModelConfig()
config.update_from_file(args.config_path)
old_experiment_dir = config.experiment_dir
new_experiment_dir = os.path.join(old_experiment_dir, f'{config.name}{k}')
if not os.path.exists(new_experiment_dir):
os.mkdir(new_experiment_dir)
config.update(experiment_dir=new_experiment_dir)
new_config_path = os.path.join(new_experiment_dir, args.config_path)
# config.config_file_path = config.config_file_path.replace(old_experiment_dir, new_experiment_dir)
# config.infer_dir = config.infer_dir.replace(old_experiment_dir, new_experiment_dir)
# config.train_dir = config.train_dir.replace(old_experiment_dir, new_experiment_dir)
# config.setup_dir = config.setup_dir.replace(old_experiment_dir, new_experiment_dir)
# config.test_dir = config.test_dir.replace(old_experiment_dir, new_experiment_dir)
# config.update(kp_setup_dir=os.path.join(old_experiment_dir, 'setup'))
config.update(random_seed=k)
print('new_config_path', new_config_path)
config.save(new_config_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('config_path')
parser.add_argument('num_folds', type=int)
args = parser.parse_args()
for k in range(args.num_folds):
prepare_kfold(args, k)
|
mspector/expertise | expertise/models/centroid_scibert_kps/test_centroid_scibert.py | <filename>expertise/models/centroid_scibert_kps/test_centroid_scibert.py
import torch
import os
from expertise.utils.batcher import Batcher
from expertise.models import centroid_scibert_kps as centroid_scibert
from expertise import utils
def test(config):
print('config.best_model_path', config.best_model_path)
model = torch.load(config.best_model_path)
test_samples_path = os.path.join(
config.setup_dir, 'test_samples.jsonl')
batcher = Batcher(input_file=config.setup_path(test_samples_path))
# a lookup table of torch.Tensor objects, keyed by user/paper ID.
bert_lookup = utils.load_pkl(os.path.join(config.kp_setup_dir, 'bert_lookup.pkl'))
predictions = centroid_scibert.generate_predictions(config, model, batcher, bert_lookup)
prediction_filename = config.test_save(predictions,
'test.predictions.jsonl')
print('prediction filename', prediction_filename)
map_score = float(centroid_scibert.eval_map_file(prediction_filename))
hits_at_1 = float(centroid_scibert.eval_hits_at_k_file(prediction_filename, 1))
hits_at_3 = float(centroid_scibert.eval_hits_at_k_file(prediction_filename, 3))
hits_at_5 = float(centroid_scibert.eval_hits_at_k_file(prediction_filename, 5))
hits_at_10 = float(centroid_scibert.eval_hits_at_k_file(prediction_filename, 10))
score_lines = [
[config.name, text, data] for text, data in [
('MAP', map_score),
('Hits@1', hits_at_1),
('Hits@3', hits_at_3),
('Hits@5', hits_at_5),
('Hits@10', hits_at_10)
]
]
config.test_save(score_lines, 'test.scores.tsv')
|
mspector/expertise | expertise/utils/batcher.py | """
Copyright (C) 2017-2018 University of Massachusetts Amherst.
This file is part of "learned-string-alignments"
http://github.com/iesl/learned-string-alignments
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from collections import defaultdict
import numpy as np
import time
import csv
import sys, os
import random
import ast
from . import utils
csv.field_size_limit(sys.maxsize)
class Batcher(object):
def __init__(self, input_file, triplet=False):
self.data = []
self.num_examples = 0
self.triplet = triplet
self.input_file = input_file
self.load_data(self.input_file)
def reset(self):
self.start_index = 0
def shuffle_data(self):
# perm = np.random.permutation(self.num_examples)
print('shuffling {} lines via the following permutation'.format(self.num_examples))
# data_array = np.asarray(self.data)
# shuffled_data_array = data_array[perm]
# self.data = shuffled_data_array.tolist()
self.data = np.random.permutation(self.data).tolist()
return self.data
def load_data(self, input_file, delimiter='\t'):
self.input_file = input_file
self.data = []
with open(input_file) as f:
if any(input_file.endswith(ext) for ext in ['.tsv','.csv']):
reader = csv.reader(f, delimiter=delimiter)
for line in reader:
for column_index, item in enumerate(line):
self.data[column_index].append(item)
self.num_examples += 1
if input_file.endswith('.jsonl'):
for data_dict in utils.jsonl_reader(input_file):
self.data.append(data_dict)
self.num_examples = len(self.data)
def batches(self, batch_size, delimiter='\t'):
batch = []
self.start_index = 0
for data in utils.jsonl_reader(self.input_file):
batch.append(data)
self.start_index += 1
if self.start_index % batch_size == 0 or self.start_index == self.num_examples:
yield batch
batch = []
# deprecated
def batches_triplet(self, batch_size, delimiter='\t'):
print('function deprecated')
"""
with open(self.input_file) as f:
reader = csv.reader(f, delimiter=delimiter)
source_batch = []
positives_batch = []
negatives_batch = []
source_lens_batch = []
pos_lens_batch = []
neg_lens_batch = []
self.start_index = 0
for row in reader:
sample = [ast.literal_eval(item) for item in row]
source_batch.append(sample[0])
positives_batch.append(sample[1])
negatives_batch.append(sample[2])
source_lens_batch.append(sample[3])
pos_lens_batch.append(sample[4])
neg_lens_batch.append(sample[5])
self.start_index += 1
if self.start_index % batch_size == 0 or self.start_index == self.num_examples:
batch = (
np.asarray(source_batch),
np.asarray(positives_batch),
np.asarray(negatives_batch),
np.asarray(source_lens_batch, dtype=np.float32),
np.asarray(pos_lens_batch, dtype=np.float32),
np.asarray(neg_lens_batch, dtype=np.float32)
)
yield batch
source_batch = []
positives_batch = []
negatives_batch = []
source_lens_batch = []
pos_lens_batch = []
neg_lens_batch = []
"""
# function deprecated
def write_data_triplet(self, delimiter='\t'):
print('function deprecated')
'''
print('writing data triplet to {}'.format(self.input_file))
with open(self.input_file, 'w') as f:
writer = csv.writer(f, delimiter=delimiter)
for sample in zip(
self.sources.tolist(),
self.positives.tolist(),
self.negatives.tolist(),
self.source_lens.tolist(),
self.pos_lens.tolist(),
self.neg_lens.tolist()
):
writer.writerow(sample)
'''
# function deprecated
def shuffle_data_triplet(self):
print('function deprecated')
'''
"""
Shuffles maintaining the same order.
"""
perm = np.random.permutation(self.num_examples)
for data in [
self.sources,
self.positives,
self.negatives,
self.source_lens,
self.pos_lens,
self.neg_lens]:
data = data[perm]
'''
# function deprecated
def load_data_triplet(self, input_file, delimiter='\t'):
print('function deprecated')
'''
if not self.input_file:
self.input_file = input_file
with open(input_file) as f:
reader = csv.reader(f, delimiter=delimiter)
sources = []
sources_lengths = []
positives = []
pos_lengths = []
negatives = []
neg_lengths = []
for ct, line in enumerate(reader):
if len(line) < 3:
# TODO: log this error somewhere (don't print it!)
pass
else:
sources.append(np.asarray(self.vocab.to_ints(line[0])))
sources_lengths.append([min(self.config.max_num_keyphrases,len(line[0])) ])
positives.append(np.asarray(self.vocab.to_ints(line[1])))
pos_lengths.append([min(self.config.max_num_keyphrases,len(line[1])) ])
negatives.append(np.asarray(self.vocab.to_ints(line[2])))
neg_lengths.append([min(self.config.max_num_keyphrases,len(line[2])) ])
self.sources = np.asarray(sources)
self.positives = np.asarray(positives)
self.negatives = np.asarray(negatives)
self.source_lens = np.asarray(sources_lengths, dtype=np.float32)
self.pos_lens = np.asarray(pos_lengths, dtype=np.float32)
self.neg_lens = np.asarray(neg_lengths, dtype=np.float32)
self.num_examples = len(self.sources)
print("length of data", self.num_examples)
'''
def get_next_batch_pairwise(self):
"""
returns the next batch
TODO(rajarshd): move the if-check outside the loop, so that conditioned is not checked every time. the conditions are suppose to be immutable.
"""
print('function deprecated')
# self.start_index = 0
# while True:
# if self.start_index > self.num_examples - self.batch_size:
# if self.return_one_epoch:
# return # stop after returning one epoch
# self.start_index = 0
# if self.shuffle:
# self.shuffle_data_pairwise()
# else:
# if self.input_type == "TODOIMPLEMENTDEV":
# current_token = self.sources[self.start_index]
# i = 0
# while self.sources[self.start_index + i] == current_token and len(self.sources) > self.start_index + i:
# i += 1
# end_index = self.start_index + i
# else:
# num_data_returned = min(self.batch_size, self.num_examples - self.start_index)
# assert num_data_returned > 0
# end_index = self.start_index + num_data_returned
# yield self.sources[self.start_index:end_index], \
# self.targets[self.start_index:end_index], \
# self.labels[self.start_index:end_index], \
# self.source_lens[self.start_index:end_index], \
# self.targ_lens[self.start_index:end_index]
# self.start_index = end_index
def shuffle_data_pairwise(self):
"""
Shuffles maintaining the same order.
"""
print('function deprecated')
# perm = np.random.permutation(self.num_examples) # perm of index in range(0, num_questions)
# assert len(perm) == self.num_examples
# for data in [self.sources, self.targets, self.labels, self.source_lens, self.targ_lens]:
# data = data[perm]
def load_data_pairwise(self):
print('function deprecated')
# with codecs.open(self.input_file, "r", "UTF-8", errors="replace") as inp:
# sources = []
# sources_lengths = []
# targets = []
# targets_lengths = []
# labels = []
# counter = 0
# for line in inp:
# line = line.encode("UTF-8").strip()
# split = line.decode("UTF-8").split("\t") #source, target, label
# if len(split) >= 3:
# if split[2] == "0" or split[2] == "1":
# sources.append(self.vocab.to_ints(split[0]))
# sources_lengths.append([min(self.config.max_num_keyphrases,len(split[0])) ])
# targets.append(self.vocab.to_ints(split[1]))
# targets_lengths.append([min(self.config.max_num_keyphrases,len(split[1])) ])
# labels.append(int(split[2]))
# else:
# sources.append(self.vocab.to_ints(split[0]))
# sources_lengths.append([len(split[0])])
# targets.append(self.vocab.to_ints(split[1]))
# targets_lengths.append([len(split[1])])
# labels.append(1)
# sources.append(self.vocab.to_ints(split[0]))
# sources_lengths.append([len(split[0])])
# targets.append(self.vocab.to_ints(split[2]))
# targets_lengths.append([len(split[2])])
# labels.append(0)
# else:
# print(split, len(split), counter)
# counter += 1
# self.sources = np.asarray(sources)
# self.targets = np.asarray(targets)
# self.labels = np.asarray(labels, dtype=np.int32)
# self.source_lens = np.asarray(sources_lengths, dtype=np.float32)
# self.targ_lens = np.asarray(targets_lengths, dtype=np.float32)
# print(self.sources.shape)
# print(self.targets.shape)
# print(self.labels.shape)
# print("length of data", len(sources))
|
mspector/expertise | expertise/models/centroid_scibert/centroid_scibert.py | <gh_stars>10-100
import numpy as np
# import fastText as ft
import torch
import torch.nn as nn
from torch.nn import BCEWithLogitsLoss
from expertise import utils
from expertise.evaluators.mean_avg_precision import eval_map
from expertise.evaluators.hits_at_k import eval_hits_at_k
import ipdb
class Model(torch.nn.Module):
def __init__(self, config, vocab):
super(Model, self).__init__()
self.config = config
self.vocab = vocab
# if self.config.fasttext:
# self.cached_ft = ft.load_model(self.config.fasttext)
# else:
# self.cached_ft = None
# Keyword embeddings
self.linear_layer = nn.Linear(config.bert_dim, config.embedding_dim)
# Vector of ones (used for loss)
if self.config.use_cuda:
self.ones = torch.ones(config.batch_size, 1).cuda()
else:
self.ones = torch.ones(config.batch_size, 1)
self._bce_loss = BCEWithLogitsLoss()
def compute_loss(self, batch_source, batch_pos, batch_neg):
""" Compute the loss (BPR) for a batch of examples
:param batch_source: a batch of source keyphrase indices (list of lists)
:param batch_pos: True aliases of the Mentions
:param batch_neg: False aliases of the Mentions
"""
batch_size = len(batch_source)
# B by dim
source_embed = self.embed(batch_source)
# B by dim
pos_embed = self.embed(batch_pos)
# B by dim
neg_embed = self.embed(batch_neg)
loss = self._bce_loss(
utils.row_wise_dot(source_embed, pos_embed )
- utils.row_wise_dot(source_embed, neg_embed ),
self.ones[:batch_size])
return loss
def score_pair(self, source, target, source_len, target_len):
"""
:param source: Batchsize by Max_String_Length
:param target: Batchsize by Max_String_Length
:return: Batchsize by 1
"""
source_embed = self.embed_dev(source, source_len)
target_embed = self.embed_dev(target, target_len)
scores = utils.row_wise_dot(source_embed, target_embed)
return scores
def embed(self, vector_batch):
"""
:param vector_batch: torch.Tensor - Batch_size by bert_embedding_dim
"""
if self.config.use_cuda:
vector_batch = vector_batch.cuda()
# do a linear transformation of the scibert vector into the centroid dimension
# B x bert_embedding_dim
try:
embeddings = self.linear_layer(vector_batch)
except AttributeError as e:
ipdb.set_trace()
raise e
return embeddings
def embed_dev(self, vector_batch, print_embed=False, batch_size=None):
"""
:param keyword_lists: Batch_size by max_num_keywords
"""
return self.embed(vector_batch)
def score_dev_test_batch(self,
batch_queries,
batch_targets,
batch_size
):
if batch_size == self.config.dev_batch_size:
source_embed = self.embed_dev(batch_queries)
target_embed = self.embed_dev(batch_targets)
else:
source_embed = self.embed_dev(batch_queries, batch_size=batch_size)
target_embed = self.embed_dev(batch_targets, batch_size=batch_size)
scores = utils.row_wise_dot(source_embed, target_embed)
# what is this?
scores[scores != scores] = 0
return scores
def generate_predictions(config, model, batcher, bert_lookup):
"""
Use the model to make predictions on the data in the batcher
:param model: Model to use to score reviewer-paper pairs
:param batcher: Batcher containing data to evaluate (a DevTestBatcher)
:return:
"""
for idx, batch in enumerate(batcher.batches(batch_size=config.dev_batch_size)):
if idx % 100 == 0:
print('Predicted {} batches'.format(idx))
batch_queries = []
batch_query_lengths = []
batch_query_ids = []
batch_targets = []
batch_target_lengths = []
batch_target_ids = []
batch_labels = []
batch_size = len(batch)
for data in batch:
# append a positive sample
batch_queries.append(bert_lookup[data['source_id']])
batch_query_lengths.append(data['source_length'])
batch_query_ids.append(data['source_id'])
batch_targets.append(bert_lookup[data['positive_id']])
batch_target_lengths.append(data['positive_length'])
batch_target_ids.append(data['positive_id'])
batch_labels.append(1)
# append a negative sample
batch_queries.append(bert_lookup[data['source_id']])
batch_query_lengths.append(data['source_length'])
batch_query_ids.append(data['source_id'])
batch_targets.append(bert_lookup[data['negative_id']])
batch_target_lengths.append(data['negative_length'])
batch_target_ids.append(data['negative_id'])
batch_labels.append(0)
scores = model.score_dev_test_batch(
torch.stack(batch_queries),
torch.stack(batch_targets),
np.asarray(batch_size)
)
if type(batch_labels) is not list:
batch_labels = batch_labels.tolist()
if type(scores) is not list:
scores = list(scores.cpu().data.numpy().squeeze())
for source, source_id, target, target_id, label, score in zip(
batch_queries,
batch_query_ids,
batch_targets,
batch_target_ids,
batch_labels,
scores
):
# temporarily commenting out "source" and "target" because I think they are not needed.
prediction = {
# 'source': source,
'source_id': source_id,
# 'target': target,
'target_id': target_id,
'label': label,
'score': float(score)
}
yield prediction
def load_jsonl(filename):
labels_by_forum = defaultdict(dict)
scores_by_forum = defaultdict(dict)
for data in utils.jsonl_reader(filename):
forum = data['source_id']
reviewer = data['target_id']
label = data['label']
score = data['score']
labels_by_forum[forum][reviewer] = label
scores_by_forum[forum][reviewer] = score
result_labels = []
result_scores = []
for forum, labels_by_reviewer in labels_by_forum.items():
scores_by_reviewer = scores_by_forum[forum]
reviewer_scores = list(scores_by_reviewer.items())
reviewer_labels = list(labels_by_reviewer.items())
sorted_labels = [label for _, label in sorted(reviewer_labels)]
sorted_scores = [score for _, score in sorted(reviewer_scores)]
result_labels.append(sorted_labels)
result_scores.append(sorted_scores)
return result_labels, result_scores
def eval_map_file(filename):
list_of_list_of_labels, list_of_list_of_scores = utils.load_labels(filename)
return eval_map(list_of_list_of_labels, list_of_list_of_scores)
def eval_hits_at_k_file(filename, k=2, oracle=False):
list_of_list_of_labels,list_of_list_of_scores = utils.load_labels(filename)
return eval_hits_at_k(list_of_list_of_labels, list_of_list_of_scores, k=k,oracle=oracle)
|
mspector/expertise | expertise/setup/__main__.py | <gh_stars>10-100
'''
'''
import argparse
import os
import json
from collections import OrderedDict
import expertise
from expertise.config import ModelConfig
from .core import setup_model
parser = argparse.ArgumentParser()
parser.add_argument('config_path', help="a config file for a model")
parser.add_argument('additional_params', nargs=argparse.REMAINDER)
args = parser.parse_args()
config = setup_model(args)
model = expertise.load_model(config.model)
config = model.setup(config, *args.additional_params)
config.save(config_path)
|
mspector/expertise | expertise/evaluators/recall_at_m.py | from __future__ import print_function, absolute_import
import sys, os
from operator import itemgetter
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
matplotlib.style.use('ggplot')
class Evaluator():
"""
An Evaluator instance that evaluates
recall_at_m =
(number of papers reviewer bid positively on in top M) /
(total number of papers reviewer bid positively on)
This evaluation method requires us to look at the bids, so we import them
from somewhere in the __init__() method.
"""
def __init__(self, bids_by_forum, m=50):
# datapath = os.path.join(os.path.dirname(__file__), '../samples/uai_data')
# self.data = utils.load_obj(datapath)
# self.bids_by_forum = self.data['bids_by_forum']
self.bids_by_forum = bids_by_forum
self.m_values = range(m)
def recall_at_m(self, ranked_signatures, forum, m):
'''
Among the top M users in the model's rank list,
what percentage of all high bidders are included?
'''
positive_bids = self.get_pos_bids_for_forum(forum)
positive_signatures = [bid['signature'] for bid in positive_bids]
if not positive_signatures:
# what value should be returned if there are no positives?
return 1.0
else:
all_bids = self.get_all_bids_for_forum(forum)
bid_signatures = [bid['signature'] for bid in all_bids]
# only rank the reviewers who made a bid of some kind
# filtered_ranked_signatures = [r for r in ranked_signatures if r in bid_signatures]
# topM = filtered_ranked_signatures[:m]
topM = ranked_signatures[:m]
positive_signatures_from_topM = [r for r in positive_signatures if r in topM]
return float(len(positive_signatures_from_topM))/float(len(positive_signatures))
def evaluate(self, ranklists):
"""
Evaluate the model using a ranked list. Either you can evaluate using a single ranked list or
evaluate against each individual query and average their precision scores
Arguments
@ranklists: A list of tuples.
The 0th index of the tuple contains the forum ID of the rank list being evaluated.
The 1st index of the tuple contains a list of reviewer IDs, in order of expertise score.
Returns
a generator object that yields an array of scores for each ranked list. If only one
score is needed, return the score in an array by itself.
"""
for forum, rank_list in ranklists:
ranked_signatures = [rank.split(";")[0] for rank in rank_list]
scores = []
for m in self.m_values:
scores.append(self.recall_at_m(ranked_signatures, forum, m))
yield forum, scores
def get_all_bids_for_forum(self, forum_id):
""" Returns all bids for the forum_id """
forum_bids = self.bids_by_forum[forum_id]
return [{"signature": bid.signatures[0], "bid": bid.tag} for bid in forum_bids]
def get_pos_bids_for_forum(self, forum_id):
""" Get all of the positive bids for a forum """
positive_labels = ["I want to review", "I can review"]
forum_bids = self.get_all_bids_for_forum(forum_id)
return [bid for bid in forum_bids if bid["bid"] in positive_labels]
|
mspector/expertise | expertise/preprocess/textrank/textrank_words.py | from .textrank import TextRank
def keyphrases(text, include_scores=False, include_tokenlist=False):
textrank = TextRank()
textrank.analyze(text, chunks=False)
tokenlist = [word for sentence in textrank.sentences for word in sentence]
top_tokens = [(word, _) if include_scores else word for word, _ in textrank.keyphrases()]
if include_tokenlist:
return top_tokens, tokenlist
else:
return top_tokens
|
mspector/expertise | expertise/core.py | <filename>expertise/core.py
import pkgutil
from . import models
def model_importers():
return {m: i for i, m, _ in pkgutil.iter_modules(models.__path__)}
def available_models():
return [k for k in model_importers().keys()]
def load_model(module_name):
return model_importers()[module_name].find_module(module_name).load_module()
|
mspector/expertise | expertise/preprocess/words.py | from expertise.preprocess.textrank import TextRank
def keyphrases(text):
textrank = TextRank()
segmented_sentences = textrank.sentence_segment(text)
return [word for sentence in segmented_sentences for word in sentence]
|
mspector/expertise | expertise/models/centroid_scibert/train_centroid_scibert.py | <reponame>mspector/expertise
import datetime, os, sys, csv
from shutil import copyfile, copytree
import pickle
import torch
import torch.optim as optim
import numpy as np
from expertise.models import centroid_scibert
from expertise import utils
from expertise.utils.vocab import Vocab
from expertise.utils.batcher import Batcher
from expertise.config import Config
current_path = os.path.abspath(os.path.dirname(__file__))
#def train(setup_path, train_path, config, dataset):
def train(config):
for train_subdir in ['dev_scores', 'dev_predictions']:
train_subdir_path = os.path.join(config.train_dir, train_subdir)
if not os.path.exists(train_subdir_path):
os.mkdir(train_subdir_path)
vocab_path = os.path.join(
config.kp_setup_dir, 'textrank_vocab.pkl')
vocab = utils.load_pkl(vocab_path)
torch.manual_seed(config.random_seed)
train_samples_path = os.path.join(
config.setup_dir, 'train_samples.jsonl')
dev_samples_path = os.path.join(
config.setup_dir, 'dev_samples.jsonl')
print('reading train samples from ', train_samples_path)
batcher = Batcher(input_file=train_samples_path)
batcher_dev = Batcher(input_file=dev_samples_path)
model = centroid_scibert.Model(config, vocab)
if config.use_cuda:
model = model.cuda()
optimizer = optim.Adam(model.parameters(), lr=config.learning_rate, weight_decay=config.l2penalty)
# Stats
best_map = 0
sum_loss = 0.0
# a lookup table of torch.Tensor objects, keyed by user/paper ID.
bert_lookup = utils.load_pkl(os.path.join(config.kp_setup_dir, 'bert_lookup.pkl'))
print('Begin Training')
# Training loop
for counter, batch in enumerate(batcher.batches(batch_size=config.batch_size)):
batch_source = []
batch_pos = []
batch_neg = []
for data in batch:
batch_source.append(bert_lookup[data['source_id']])
batch_pos.append(bert_lookup[data['positive_id']])
batch_neg.append(bert_lookup[data['negative_id']])
print('num_batches: {}'.format(counter))
optimizer.zero_grad()
loss_parameters = (
torch.stack(batch_source),
torch.stack(batch_pos),
torch.stack(batch_neg)
)
loss = model.compute_loss(*loss_parameters)
loss.backward()
# torch.nn.utils.clip_grad_norm(model.parameters(), config.clip)
optimizer.step()
# Question: is this if block just for monitoring?
if counter % 100 == 0:
this_loss = loss.cpu().data.numpy()
sum_loss += this_loss
print('Processed {} batches, Loss of batch {}: {}. Average loss: {}'.format(
counter, counter, this_loss, sum_loss / (counter / 100)))
if counter % config.eval_every == 0:
# is this reset needed?
batcher_dev.reset()
predictions = centroid_scibert.generate_predictions(config, model, batcher_dev, bert_lookup)
prediction_filename = config.train_save(predictions,
'dev_predictions/dev.predictions.{}.jsonl'.format(counter))
print('prediction filename', prediction_filename)
map_score = float(centroid_scibert.eval_map_file(prediction_filename))
hits_at_1 = float(centroid_scibert.eval_hits_at_k_file(prediction_filename, 1))
hits_at_3 = float(centroid_scibert.eval_hits_at_k_file(prediction_filename, 3))
hits_at_5 = float(centroid_scibert.eval_hits_at_k_file(prediction_filename, 5))
hits_at_10 = float(centroid_scibert.eval_hits_at_k_file(prediction_filename, 10))
score_lines = [
[config.name, counter, text, data] for text, data in [
('MAP', map_score),
('Hits@1', hits_at_1),
('Hits@3', hits_at_3),
('Hits@5', hits_at_5),
('Hits@10', hits_at_10)
]
]
config.train_save(score_lines, 'dev_scores/dev.scores.{}.tsv'.format(counter))
if map_score > best_map:
best_map = map_score
best_model_path = os.path.join(
config.train_dir, 'model_{}_{}.torch'.format(config.name, 'best'))
torch.save(model, best_model_path)
config.best_model_path = best_model_path
config.best_map_score = best_map
config.hits_at_1 = hits_at_1
config.hits_at_3 = hits_at_3
config.hits_at_5 = hits_at_5
config.hits_at_10 = hits_at_10
config.save_config()
config.train_save(score_lines, 'dev.scores.best.tsv')
if counter == config.num_minibatches:
break
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('config_path', help='a config file for a model')
args = parser.parse_args()
train_model(args.config_path)
|
mspector/expertise | expertise/create_dataset.py | <reponame>mspector/expertise<gh_stars>0
'''
A script for generating a dataset.
Assumes that the necessary evidence-collecting steps have been done,
and that papers have been submitted.
'''
import os, json, argparse
from datetime import datetime
import openreview
from tqdm import tqdm
from collections import defaultdict, OrderedDict
def search_with_retries(client, term, max_retries=5):
results = []
for iteration in range(max_retries):
try:
results = openreview_client.search_notes(
term=term, content='authors', group='all',source='forum')
return results
except requests.exceptions.RequestException:
pass
print('search failed: ', term)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('config', help='a JSON file containing all other arguments')
parser.add_argument('--overwrite', action='store_true')
parser.add_argument('--username')
parser.add_argument('--password')
parser.add_argument('--baseurl')
args = parser.parse_args()
with open(args.config) as file_handle:
config = json.load(file_handle)
print(config)
dataset_dir = config['dataset']['directory'] if 'dataset' in config else './'
if not os.path.isdir(dataset_dir):
os.mkdir(dataset_dir)
archive_dir = os.path.join(dataset_dir, 'archives')
if not os.path.isdir(archive_dir):
os.mkdir(archive_dir)
submission_dir = os.path.join(dataset_dir, 'submissions')
if not os.path.isdir(submission_dir):
os.mkdir(submission_dir)
bids_dir = os.path.join(dataset_dir, 'bids')
if not os.path.isdir(bids_dir):
os.mkdir(bids_dir)
openreview_client = openreview.Client(
username=args.username,
password=<PASSWORD>,
baseurl=args.baseurl
)
metadata = {
"reviewer_count": 0,
"submission_count": 0,
"archive_counts": defaultdict(lambda: {'arx': 0, 'bid': 0}),
"bid_counts": {},
}
minimum_timestamp = 0
if 'oldest_year' in config:
epoch = datetime.fromtimestamp(0)
date = datetime.strptime(config['oldest_year'], '%Y')
minimum_timestamp = (date - epoch).total_seconds() * 1000.0
print('minimum_timestamp', minimum_timestamp)
excluded_ids_by_user = defaultdict(list)
if 'exclusion_inv' in config:
user_grouped_edges = openreview.tools.iterget_grouped_edges(
openreview_client,
invitation=config['exclusion_inv'],
groupby='tail',
select='id,head,label,weight'
)
for edges in user_grouped_edges:
for edge in edges:
excluded_ids_by_user[edge.tail].append(edge.head)
# if group ID is supplied, collect archives for every member
# (except those whose archives already exist)
if 'match_group' in config:
group_id = config['match_group']
group = openreview_client.get_group(group_id)
profile_members = [member for member in group.members if '~' in member]
email_members = [member for member in group.members if '@' in member]
profile_search_results = openreview_client.search_profiles(
emails=email_members, ids=None, term=None)
valid_members = []
if profile_members:
valid_members.extend(profile_members)
if profile_search_results and type(profile_search_results) == dict:
valid_members.extend([p.id for p in profile_search_results.values()])
print('finding archives for {} valid members'.format(len(valid_members)))
archive_direct_uploads = openreview.tools.iterget_notes(
openreview_client, invitation='OpenReview.net/Archive/-/Direct_Upload')
direct_uploads_by_signature = defaultdict(list)
for direct_upload in archive_direct_uploads:
direct_uploads_by_signature[direct_upload.signatures[0]].append(direct_upload)
for member in tqdm(valid_members, total=len(valid_members)):
file_path = os.path.join(archive_dir, member + '.jsonl')
if args.overwrite or not os.path.exists(file_path):
member_papers = search_with_retries(openreview_client, member)
member_papers.extend(direct_uploads_by_signature[member])
filtered_papers = [
n for n in member_papers \
if n.id not in excluded_ids_by_user[member] \
]
seen_keys = []
filtered_papers = []
for n in member_papers:
paperhash = openreview.tools.get_paperhash('', n.content['title'])
timestamp = n.cdate if n.cdate else n.tcdate
if n.id not in excluded_ids_by_user[member] \
and timestamp > minimum_timestamp \
and paperhash not in seen_keys:
filtered_papers.append(n)
seen_keys.append(paperhash)
metadata['archive_counts'][member]['arx'] = len(filtered_papers)
with open(file_path, 'w') as f:
for paper in filtered_papers:
f.write(json.dumps(paper.to_json()) + '\n')
# if invitation ID is supplied, collect records for each submission
if 'paper_invitation' in config:
invitation_id = config['paper_invitation']
# (1) get submissions from OpenReview
submissions = list(openreview.tools.iterget_notes(
openreview_client, invitation=invitation_id))
print('finding records of {} submissions'.format(len(submissions)))
for paper in tqdm(submissions, total=len(submissions)):
file_path = os.path.join(submission_dir, paper.id + '.jsonl')
if args.overwrite or not os.path.exists(file_path):
with open(file_path, 'w') as f:
f.write(json.dumps(paper.to_json()) + '\n')
metadata['bid_counts'][paper.forum] = 0
if 'bid_inv' in config:
invitation_id = config['bid_inv']
bids = openreview.tools.iterget_tags(
openreview_client, invitation=invitation_id)
for bid in tqdm(bids, desc='writing bids'):
file_path = os.path.join(bids_dir, bid.forum + '.jsonl')
if bid.forum in metadata['bid_counts']:
metadata['bid_counts'][bid.forum] += 1
metadata['archive_counts'][bid.signatures[0]]['bid'] += 1
with open(file_path, 'a') as f:
f.write(json.dumps(bid.to_json()) + '\n')
metadata['bid_counts'] = OrderedDict(
sorted(metadata['bid_counts'].items(), key=lambda t: t[0]))
metadata['archive_counts'] = OrderedDict(
sorted(metadata['archive_counts'].items(), key=lambda t: t[0]))
metadata['reviewer_count'] = len(metadata['archive_counts'])
metadata['submission_count'] = len(submissions)
metadata_file = os.path.join(dataset_dir, 'metadata.json')
with open(metadata_file, 'w') as f:
json.dump(metadata, f, indent=4, ensure_ascii=False)
|
mspector/expertise | expertise/preprocess/keyphrases_pke.py | import json
import os
import pickle
import pke
import string
import sys
from nltk.corpus import stopwords
from .. import utils
def keyphrases(data_dir):
'''
Given a directory containing reviewer archives or submissions,
generate a dict keyed on signatures whose values are sets of keyphrases.
The input directory should contain .jsonl files. Files representing
reviewer archives should be [...] TODO: Finish this.
'''
reviewer_or_submission_keyphrases = {}
for filename in os.listdir(data_dir):
filepath = os.path.join(data_dir, filename)
file_id = filename.replace('.jsonl', '')
print(file_id)
keyphrases = []
with open(filepath) as f:
for line in f.readlines():
if line.endswith('\n'):
line = line[:-1]
record = json.loads(line)
content = record['content']
record_text_unfiltered = utils.content_to_text(content, fields=['title', 'abstract', 'fulltext'])
record_text_filtered = utils.strip_nonalpha(record_text_unfiltered)
# define the set of valid Part-of-Speeches
pos = {'NOUN', 'PROPN', 'ADJ'}
# 1. create a SingleRank extractor.
extractor = pke.unsupervised.SingleRank()
# 2. load the content of the document.
extractor.load_document(input=record_text_filtered,
language='en',
normalization=None)
# 3. select the longest sequences of nouns and adjectives as candidates.
extractor.candidate_selection(pos=pos)
# 4. weight the candidates using the sum of their word's scores that are
# computed using random walk. In the graph, nodes are words of
# certain part-of-speech (nouns and adjectives) that are connected if
# they occur in a window of 10 words.
extractor.candidate_weighting(window=10,
pos=pos)
# 5. get the 10-highest scored candidates as keyphrases
keyphrases.extend([word[0].replace(' ', '_') for word in extractor.get_n_best(n=3)])
yield file_id, keyphrases
|
mspector/expertise | expertise/config/__main__.py | <gh_stars>10-100
'''
'''
from __future__ import absolute_import
import argparse
import os
import pkgutil
import expertise
parser = argparse.ArgumentParser()
parser.add_argument('model', help=f'select one of {expertise.available_models()}')
parser.add_argument('--outfile', '-o', help='file to write config')
args = parser.parse_args()
config = expertise.config.ModelConfig(model=args.model)
outfile = args.outfile if args.outfile else f'./{args.model}.json'
experiment_dir = os.path.dirname(os.path.abspath(outfile))
config.update(experiment_dir=experiment_dir)
config.save(outfile)
|
mspector/expertise | expertise/preprocess/textrank/__main__.py | import argparse
import json
import os
from collections import OrderedDict
from expertise.config import ModelConfig
from .core import run_textrank
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('config_path', help="a config file for a model")
args = parser.parse_args()
config_path = os.path.abspath(args.config_path)
with open(config_path) as f:
data = json.load(f, object_pairs_hook=OrderedDict)
config = ModelConfig(**data)
run_textrank(config)
print('saving', config_path, config)
config.save(config_path)
|
mspector/expertise | expertise/dataset/helpers.py | import os
import json
from openreview import Tag
from expertise import utils
from tqdm import tqdm
def filter_by_fields(content, fields):
filtered_record = {field: value for field, value in content.items() if field in fields}
return filtered_record
def read_json_records(data_dir, return_batches):
for filename in os.listdir(data_dir):
filepath = os.path.join(data_dir, filename)
file_id = filename.replace('.jsonl', '')
if return_batches:
batch = []
for content in utils.jsonl_reader(filepath):
if not return_batches:
yield file_id, content
else:
batch.append(content)
if return_batches:
yield file_id, batch
def get_items_generator(path, num_items, return_batches, progressbar='', partition_id=0, num_partitions=1):
items_generator = read_json_records(
path, return_batches=return_batches)
if num_partitions > 1:
items_generator = utils.partition(
items_generator,
partition_id=partition_id, num_partitions=num_partitions)
num_items = num_items / num_partitions
desc = '{} (partition {})'.format(progressbar, partition_id)
if progressbar:
items_generator = tqdm(
items_generator,
total=num_items,
desc=progressbar)
return items_generator
def read_bid_records(data_dir, return_batches):
for filename in os.listdir(data_dir):
filepath = os.path.join(data_dir, filename)
file_id = filename.replace('.jsonl', '')
if return_batches:
batch = []
for record in utils.jsonl_reader(filepath):
if not return_batches:
yield file_id, record
else:
batch.append(record)
if return_batches:
yield file_id, batch
def get_bids_generator(path, num_items, return_batches, progressbar='', partition_id=0, num_partitions=1):
items_generator = read_bid_records(
path, return_batches=return_batches)
if num_partitions > 1:
items_generator = utils.partition(
items_generator, partition_id=partition_id, num_partitions=num_partitions)
num_items = num_items / num_partitions
desc = '{} (partition {})'.format(progressbar, partition_id)
if progressbar:
items_generator = tqdm(
items_generator,
total=num_items,
desc=progressbar)
return items_generator
|
mspector/expertise | expertise/utils/data_to_sample.py | <filename>expertise/utils/data_to_sample.py<gh_stars>10-100
import argparse
import os
from . import utils
def data_to_sample(data, vocab, max_num_keyphrases):
'''
Converts one line of the training data into a training sample.
Training samples consist of the following:
source:
a numpy array containing integers. Each integer corresponds to
a token in the vocabulary. This array of tokens represents the
source text.
source_length:
a list containing one element, an integer, which is the number
of keyphrases in 'source'.
positive:
...
positive_length:
Similar to "source_length", but applies to the "positive" list.
negative:
...
negative_length:
Similar to "source_length", but applies to the "negative" list.
'''
source = vocab.to_ints(data['source'], max_num_keyphrases=max_num_keyphrases)
source_length = [len(source)]
positive = vocab.to_ints(data['positive'], max_num_keyphrases=max_num_keyphrases)
positive_length = [len(positive)]
negative = vocab.to_ints(data['negative'], max_num_keyphrases=max_num_keyphrases)
negative_length = [len(negative)]
sample = {
'source': source,
'source_length': source_length,
'source_id': data['source_id'],
'positive': positive,
'positive_length': positive_length,
'positive_id': data['positive_id'],
'negative': negative,
'negative_length': negative_length,
'negative_id': data['negative_id']
}
return sample
# if __name__ == '__main__':
# parser = argparse.ArgumentParser()
# parser.add_argument('config_path')
# parser.add_argument('datafile')
# parser.add_argument('--samples_filename', default='train_samples_permuted.jsonl')
# args = parser.parse_args()
# config_path = os.path.abspath(args.config_path)
# config = Config(args.config_path)
# vocab = config.setup_load('vocab.pkl')
# data_reader = utils.jsonl_reader(args.datafile)
# train_samples = (data_to_sample(data, vocab) for data in data_reader)
# config.setup_save(train_samples, args.samples_filename)
|
mspector/expertise | expertise/setup/core.py | import os
from collections import OrderedDict
import json
import expertise
def setup_model(args):
config_path = os.path.abspath(args.config_path)
with open(config_path) as f:
data = json.load(f, object_pairs_hook=OrderedDict)
config = expertise.config.ModelConfig(**data)
return config
|
mspector/expertise | expertise/models/centroid_scibert/__init__.py | <filename>expertise/models/centroid_scibert/__init__.py
from __future__ import absolute_import
from .centroid_scibert import *
from .setup_centroid_scibert import setup
from .train_centroid_scibert import train
from .test_centroid_scibert import test
|
mspector/expertise | expertise/preprocess/__init__.py | from . import bert
from . import textrank
|
mspector/expertise | setup.py | <filename>setup.py
from setuptools import setup
setup(
name='openreview-expertise',
version='0.1',
description='OpenReview paper-reviewer affinity modeling',
url='https://github.com/iesl/openreview-evidence',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
packages=[
'expertise'
],
install_requires=[
'openreview-py>=1.0.1',
'numpy',
'pandas',
'nltk',
'gensim',
'torch',
'cloudpickle',
'scikit-learn',
'tqdm',
'pytorch_pretrained_bert'
],
zip_safe=False
)
|
mspector/expertise | expertise/preprocess/chunks.py | <gh_stars>10-100
from expertise.preprocess.textrank import TextRank
def keyphrases(text):
textrank = TextRank()
chunked_sentences = textrank.sentence_segment_chunk(text)
return [word for sentence in chunked_sentences for word in sentence]
|
mspector/expertise | expertise/models/tfidf/test_tfidf.py | <filename>expertise/models/tfidf/test_tfidf.py<gh_stars>0
import argparse
import os
import csv, json
import numpy as np
from collections import defaultdict
import expertise
from expertise.dataset import Dataset
from expertise.evaluators.mean_avg_precision import eval_map
from expertise.evaluators.hits_at_k import eval_hits_at_k
from tqdm import tqdm
import ipdb
def test(config):
dataset = Dataset(**config.dataset)
model = expertise.utils.load_pkl(os.path.join(config.train_dir, 'model.pkl'))
paperidx_by_id = {
paperid: index
for index, paperid
in enumerate(model.bow_archives_by_paperid.keys())
}
test_dir = os.path.join(config.experiment_dir, 'test')
if not os.path.isdir(test_dir):
os.mkdir(test_dir)
config.update(test_dir=test_dir)
score_file_path = os.path.join(config.test_dir, 'test_scores.jsonl')
labels_file_path = os.path.join(config.setup_dir, 'test_labels.jsonl')
scores = {}
with open(score_file_path, 'w') as w:
for data in expertise.utils.jsonl_reader(labels_file_path):
paperid = data['source_id']
userid = data['target_id']
label = data['label']
if userid not in scores:
# bow_archive is a list of BOWs.
if userid in model.bow_archives_by_userid and len(model.bow_archives_by_userid[userid]) > 0:
bow_archive = model.bow_archives_by_userid[userid]
else:
bow_archive = [[]]
best_scores = np.amax(model.index[bow_archive], axis=0)
scores[userid] = best_scores
if paperid in paperidx_by_id:
paper_index = paperidx_by_id[paperid]
score = scores[userid][paper_index]
result = {
'source_id': paperid,
'target_id': userid,
'score': float(score),
'label': int(label)
}
w.write(json.dumps(result) + '\n')
(list_of_list_of_labels,
list_of_list_of_scores) = expertise.utils.load_labels(score_file_path)
map_score = float(eval_map(list_of_list_of_labels, list_of_list_of_scores))
hits_at_1 = float(eval_hits_at_k(list_of_list_of_labels, list_of_list_of_scores, k=1))
hits_at_3 = float(eval_hits_at_k(list_of_list_of_labels, list_of_list_of_scores, k=3))
hits_at_5 = float(eval_hits_at_k(list_of_list_of_labels, list_of_list_of_scores, k=5))
hits_at_10 = float(eval_hits_at_k(list_of_list_of_labels, list_of_list_of_scores, k=10))
score_lines = [
[config.name, text, data] for text, data in [
('MAP', map_score),
('Hits@1', hits_at_1),
('Hits@3', hits_at_3),
('Hits@5', hits_at_5),
('Hits@10', hits_at_10)
]
]
expertise.utils.dump_csv(
os.path.join(config.test_dir, 'test.scores.tsv'), score_lines)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('config_path', help="a config file for a model")
args = parser.parse_args()
config = expertise.config.ModelConfig()
config.update_from_file(args.config_path)
test(config)
|
mspector/expertise | expertise/preprocess/bert/setup_bert_kps_lookup.py | import csv, importlib, itertools, json, math, os, pickle, random
from collections import defaultdict
import numpy as np
import openreview
from expertise.utils.batcher import Batcher
from expertise.dataset import Dataset
from expertise.config import ModelConfig
import expertise.utils as utils
from expertise.utils.data_to_sample import data_to_sample
import argparse
import torch
def setup_bert_kps_lookup(config):
print('starting setup')
# features_dir = config.bert_features_dir
archive_features_dir = os.path.join(config.experiment_dir, 'setup', 'archives-features')
submission_features_dir = os.path.join(config.experiment_dir, 'setup', 'submissions-features')
textrank_kps = utils.load_pkl(os.path.join(config.setup_dir, 'textrank_kps_by_id.pkl'))
bert_lookup = {}
for target_dir in [archive_features_dir, submission_features_dir]:
for filename in os.listdir(target_dir):
print(filename)
item_id = filename.replace('.npy','')
filepath = os.path.join(target_dir, filename)
archives = np.load(filepath)
document_kps = textrank_kps[item_id]
kps_seen = []
kp_features = []
for document in archives:
features = document['features']
for feature in features:
if feature['token'] in document_kps and feature['token'] not in kps_seen:
kps_seen.append(feature['token'])
kp_features.append(feature['layers'][-1]['values'])
kp_features = kp_features[:config.max_num_keyphrases]
while len(kp_features) < config.max_num_keyphrases:
kp_features.append(np.zeros(config.bert_dim))
result = np.array(kp_features)
bert_lookup[item_id] = torch.Tensor(result)
return bert_lookup
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('config_path', help="a config file for a model")
args = parser.parse_args()
config_path = os.path.abspath(args.config_path)
experiment_path = os.path.dirname(config_path)
config = ModelConfig()
config.update_from_file(config_path)
setup_path = os.path.join(experiment_path, 'setup')
if not os.path.isdir(setup_path):
os.mkdir(setup_path)
bert_lookup = setup_bert_kps_lookup(config)
utils.dump_pkl(os.path.join(config.setup_dir, 'bert_lookup.pkl'), bert_lookup)
|
mspector/expertise | expertise/utils/batcher_devtest.py | """
Copyright (C) 2017-2018 University of Massachusetts Amherst.
This file is part of "learned-string-alignments"
http://github.com/iesl/learned-string-alignments
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import codecs
import numpy as np
import pickle
class DevTestBatcher(object):
"""
Class for Dev/Test batching
"""
def __init__(self, config, vocab, input_file, submission_kps_file, reviewer_kps_file):
"""Construct a DevTestBatcher
Construct a batcher that works on the Dev / test set
"""
self.config = config
self.vocab = vocab
self.filename = input_file
self.batch_size = self.config.dev_batch_size
with open(submission_kps_file, 'rb') as f:
self.submission_keyphrases = pickle.load(f)
with open(reviewer_kps_file, 'rb') as f:
self.reviewer_keyphrases = pickle.load(f)
def batches(self):
"""Provide all batches in the dev/test set
Generator over batches in the dataset. Note that the last batch may be
of a different size than the other batches
:return: Generator over bathes of size self.config.dev_batch_size.
Each element of the generator contains the following tuple:
batch_queries,
batch_query_lengths,
batch_query_strings,
batch_targets,
batch_target_lengths,
batch_target_strings,
batch_labels,
batch_size
"""
batch_queries = []
batch_query_lengths = []
batch_query_strings = []
batch_targets = []
batch_target_lengths = []
batch_target_strings = []
batch_labels = []
counter = 0
num_oov = 0
with codecs.open(self.filename,'r','UTF-8') as fin:
for line in fin:
if counter % self.batch_size == 0 and counter > 0:
# print(len(batch_queries))
if len(batch_queries) > 0:
yield np.asarray(batch_queries),\
np.asarray(batch_query_lengths),\
batch_query_strings,\
np.asarray(batch_targets),\
np.asarray(batch_target_lengths),\
batch_target_strings,\
np.asarray(batch_labels),\
self.batch_size
batch_queries = []
batch_query_lengths = []
batch_query_strings = []
batch_targets = []
batch_target_lengths = []
batch_target_strings = []
batch_labels = []
split = line.rstrip().split("\t")
query_string = split[0]
target_string = split[1]
if query_string in self.submission_keyphrases and target_string in self.reviewer_keyphrases:
if len(self.submission_keyphrases[query_string]) > 0 and len(self.reviewer_keyphrases[target_string]) > 0:
if self.config.model_name not in ['TPMS', 'TFIDF', 'Random']:
query_string = " ".join([kp.replace(" ", "_") for kp in self.submission_keyphrases[query_string]])
query_len = [min(self.config.max_num_keyphrases,len(query_string.split(" ")))]
query_vec = np.asarray(self.vocab.to_ints(query_string))
unique, counts = np.unique(query_vec, return_counts=True)
count_dict = dict(zip(unique, counts))
if 1 in count_dict:
num_oov_current = count_dict[1]
else:
num_oov_current = 0
num_oov += num_oov_current
batch_queries.append(query_vec)
batch_query_lengths.append(query_len)
batch_query_strings.append(query_string)
if self.config.model_name not in ['TPMS', 'TFIDF', 'Random']:
target_string = " ".join([kp.replace(" ", "_") for kp in self.reviewer_keyphrases[target_string]])
target_len = [min(self.config.max_num_keyphrases,len(target_string.split(" ")))]
target_vec = np.asarray(self.vocab.to_ints(target_string))
unique, counts = np.unique(target_vec, return_counts=True)
count_dict = dict(zip(unique, counts))
if 1 in count_dict:
num_oov_current = count_dict[1]
else:
num_oov_current = 0
num_oov += num_oov_current
label = int(split[2])
batch_targets.append(target_vec)
batch_target_lengths.append(target_len)
batch_target_strings.append(target_string)
batch_labels.append(label)
counter += 1
if len(batch_queries) >= 1:
yield np.asarray(batch_queries), \
np.asarray(batch_query_lengths), \
batch_query_strings, \
np.asarray(batch_targets), \
np.asarray(batch_target_lengths), \
batch_target_strings, \
np.asarray(batch_labels), \
len(batch_queries)
class DevBatcher(DevTestBatcher):
def __init__(self,config,vocab):
super(self.__class__, self).__init__(config,vocab,use_dev=True)
class TestBatcher(DevTestBatcher):
def __init__(self,config,vocab):
super(self.__class__, self).__init__(config,vocab,use_dev=False)
|
mspector/expertise | expertise/models/randomize/__init__.py | from __future__ import absolute_import
from .randomize import *
|
mspector/expertise | expertise/preprocess/textrank/__init__.py | from .core import *
from .textrank import TextRank
from .textrank_words import keyphrases
|
mspector/expertise | expertise/models/centroid/test.py | import torch
import os
from expertise import utils
from expertise.utils.batcher import Batcher
from expertise.models import centroid
def test(config):
print('config.best_model_path', config.best_model_path)
test_dir = os.path.join(config.experiment_dir, 'test')
if not os.path.isdir(test_dir):
os.mkdir(test_dir)
model = torch.load(config.best_model_path)
batcher = Batcher(
input_file=os.path.join(config.experiment_dir, 'setup', 'test_samples.jsonl'))
predictions = centroid.generate_predictions(config, model, batcher)
prediction_filename = os.path.join(config.experiment_dir, 'test', 'test.predictions.jsonl')
utils.dump_jsonl(prediction_filename, predictions)
print('prediction filename', prediction_filename)
map_score = float(centroid.eval_map_file(prediction_filename))
hits_at_1 = float(centroid.eval_hits_at_k_file(prediction_filename, 1))
hits_at_3 = float(centroid.eval_hits_at_k_file(prediction_filename, 3))
hits_at_5 = float(centroid.eval_hits_at_k_file(prediction_filename, 5))
hits_at_10 = float(centroid.eval_hits_at_k_file(prediction_filename, 10))
score_lines = [
[config.name, text, data] for text, data in [
('MAP', map_score),
('Hits@1', hits_at_1),
('Hits@3', hits_at_3),
('Hits@5', hits_at_5),
('Hits@10', hits_at_10)
]
]
utils.dump_csv(
os.path.join(config.experiment_dir, 'test', 'test.scores.tsv'), score_lines)
report = {
'MAP': map_score,
'Hits@1': hits_at_1,
'Hits@3': hits_at_3,
'Hits@5': hits_at_5,
'Hits@10': hits_at_10
}
config.update(test_scores=report)
return config
|
mspector/expertise | expertise/preprocess/bert/__main__.py | <reponame>mspector/expertise<filename>expertise/preprocess/bert/__main__.py
import argparse
import os
import json
from collections import OrderedDict
from expertise import utils
from expertise.config import ModelConfig
from .setup_bert_kps_lookup import setup_bert_kps_lookup
from .setup_bert_lookup import setup_bert_lookup
from .core import get_embeddings
def main(config, partition_id=0, num_partitions=1, local_rank=-1):
experiment_dir = os.path.abspath(config.experiment_dir)
bert_dir = os.path.join(experiment_dir, 'bert')
feature_dirs = [
'submissions-features',
'archives-features'
]
for d in feature_dirs:
os.makedirs(os.path.join(bert_dir, d), exist_ok=True)
dataset = Dataset(**config.dataset)
extraction_args = {
'max_seq_length': config.max_seq_length,
'batch_size': config.batch_size,
'no_cuda': not config.use_cuda
}
# convert submissions and archives to bert feature vectors
dataset_args = {
'partition_id': partition_id,
'num_partitions': num_partitions,
'progressbar': True,
'sequential': False
}
for text_id, text_list in dataset.submissions(**dataset_args):
feature_dir = os.path.join(bert_dir, 'submissions-features')
outfile = os.path.join(feature_dir, '{}.npy'.format(text_id))
if not os.path.exists(outfile):
embeddings = get_embeddings(
text_list[:config.max_lines], config.bert_model)
np.save(outfile, embeddings)
else:
print('skipping {}'.format(outfile))
for text_id, text_list in dataset.archives(**dataset_args):
feature_dir = os.path.join(bert_dir, 'archives-features')
outfile = os.path.join(feature_dir, '{}.npy'.format(text_id))
if not os.path.exists(outfile):
embeddings = get_embeddings(
text_list[:config.max_lines], config.bert_model)
np.save(outfile, embeddings)
else:
print('skipping {}'.format(outfile))
return config
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('config_path', help="a config file for a model")
parser.add_argument('--partition', type=int, default=0)
parser.add_argument('--num_partitions', type=int, default=1)
parser.add_argument('--use_kps', action='store_true', default=True)
args = parser.parse_args()
config_path = os.path.abspath(args.config_path)
experiment_path = os.path.dirname(config_path)
config = ModelConfig()
config.update_from_file(config_path)
config = main(
config,
partition_id=args.partition,
num_partitions=args.num_partitions)
# if args.use_kps:
# bert_lookup = setup_bert_kps_lookup(config)
# else:
# bert_lookup = setup_bert_lookup(config)
# utils.dump_pkl(os.path.join(config.experiment_dir, 'setup', 'bert_lookup.pkl'), bert_lookup)
|
mspector/expertise | expertise/dataset/__init__.py | <reponame>mspector/expertise
from .core import Dataset
from .helpers import filter_by_fields, read_json_records, get_items_generator
|
mspector/expertise | expertise/models/centroid_scibert_cls/setup_centroid_scibert.py | <filename>expertise/models/centroid_scibert_cls/setup_centroid_scibert.py<gh_stars>10-100
import csv, importlib, itertools, json, math, os, pickle, random
from collections import defaultdict
import numpy as np
import openreview
from expertise.utils.vocab import Vocab
from expertise.utils.batcher import Batcher
from expertise.dataset import Dataset
import expertise.utils as utils
from expertise.utils.data_to_sample import data_to_sample
import torch
def get_values(doc_feature):
cls = doc_feature['features'][0]
values = cls['layers'][0]['values']
return values
def setup(config):
print('starting setup')
dataset = Dataset(**config.dataset)
bids_by_forum = utils.get_bids_by_forum(dataset)
vocab = utils.load_pkl(os.path.join(config.kp_setup_dir, 'textrank_vocab.pkl'))
(train_set_ids,
dev_set_ids,
test_set_ids) = utils.split_ids(list(dataset.submission_ids), seed=config.random_seed)
def fold_reader(id):
fold_file = f'{id}.jsonl'
fold_path = os.path.join(config.kp_setup_dir, 'folds', fold_file)
return utils.jsonl_reader(fold_path)
train_folds = [fold_reader(i) for i in train_set_ids]
dev_folds = [fold_reader(i) for i in dev_set_ids]
test_folds = [fold_reader(i) for i in test_set_ids]
train_samples = (data_to_sample(
data, vocab, config.max_num_keyphrases) for data in itertools.chain(*train_folds))
train_samples_path = os.path.join(
config.setup_dir, 'train_samples.jsonl')
utils.dump_jsonl(train_samples_path, train_samples)
dev_samples = (data_to_sample(
data, vocab, config.max_num_keyphrases) for data in itertools.chain(*dev_folds))
dev_samples_path = os.path.join(
config.setup_dir, 'dev_samples.jsonl')
utils.dump_jsonl(dev_samples_path, dev_samples)
test_samples = (data_to_sample(
data, vocab, config.max_num_keyphrases) for data in itertools.chain(*test_folds))
test_samples_path = os.path.join(
config.setup_dir, 'test_samples.jsonl')
utils.dump_jsonl(test_samples_path, test_samples)
# features_dir = './scibert_features/akbc19/setup/archives-features/'
features_dir = config.bert_features_dir
archive_features_dir = os.path.join(features_dir, 'archives-features')
submission_features_dir = os.path.join(features_dir, 'submissions-features')
|
mspector/expertise | expertise/infer_model.py | '''
Generate scores from the model defined by the config.
'''
import argparse
import importlib
import os
from expertise.config import Config
def infer_model(config_path):
config_path = os.path.abspath(config_path)
experiment_path = os.path.dirname(config_path)
config = Config(config_path)
model = importlib.import_module(config.model)
'''
# it's not clear if this should be here or in the model's `infer` function.
infer_path = os.path.join(experiment_path, 'infer')
if not os.path.isdir(infer_path):
os.mkdir(infer_path)
'''
model.infer(config)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('config_path', help="a config file for a model")
args = parser.parse_args()
infer_model(args.config_path)
|
mspector/expertise | expertise/setup/__init__.py | from .core import setup_model
|
czechflek/leaguedirector_evolved | leaguedirector/enable.py | <gh_stars>100-1000
import os
import psutil
import platform
import logging
import subprocess
from PySide2.QtCore import *
def findWindowsInstalled(paths):
"""
Find games installs in the windows registry.
"""
settings = QSettings('HKEY_LOCAL_MACHINE\\SOFTWARE\\WOW6432Node', QSettings.NativeFormat);
settings.beginGroup('Riot Games, Inc')
for key in settings.allKeys():
if key.endswith('/Location'):
paths.append(settings.value(key))
settings.endGroup()
def findWindowsRunning(paths):
"""
Find any running games on windows
"""
for process in psutil.process_iter(attrs=['name', 'exe']):
name = process.info['name'].lower()
path = process.info['exe']
if name == 'leagueclient.exe' and '\\RADS' in path:
paths.append(path.split('\\RADS')[0])
if name == 'leagueclient.exe' and '\\LeagueClient\\' in path:
paths.append(os.path.join(path.split('\\LeagueClient')[0]))
elif name in ('launcher.exe', 'singleplayertool.exe') and 'DevRoot' in path:
paths.append(os.path.join(path.split('\\DevRoot')[0], 'DevRoot'))
def findWindowsCached(paths):
"""
Search through the windows MUI cache which is another place windows
will keep track of league of legends clients that have been started
on this machine.
"""
settings = QSettings('HKEY_CURRENT_USER\\Software\\Classes\\Local Settings\\Software\\Microsoft\\Windows\\Shell\\MuiCache', QSettings.NativeFormat);
for key in settings.allKeys():
index = key.lower().find('league of legends.exe')
if index > 0:
paths.append(key[0:index])
def findMacInstalled(paths):
"""
Ask the mac system profiler to list all installed apps.
"""
query = 'kMDItemCFBundleIdentifier==com.riotgames.leagueoflegends'
for line in subprocess.check_output(['mdfind', query]).splitlines():
paths.append(line.decode())
def findMacRunning(paths):
"""
List all the running league client processes.
"""
for process in psutil.process_iter(attrs=['name']):
if process.info['name'].lower() == 'leagueclient':
path = process.exe().split('/Contents/LoL/RADS/')
if len(path) == 2:
paths.append(path[0])
def findInstalledGames():
paths = []
# Find running games on windows
if platform.system() == 'Windows':
findWindowsInstalled(paths)
findWindowsRunning(paths)
findWindowsCached(paths)
elif platform.system() == 'Darwin':
findMacInstalled(paths)
findMacRunning(paths)
# Make sure all paths are valid and formatted the same
paths = [configFilePath(os.path.abspath(path)) for path in paths]
# Remove nones + duplicates and sort
return sorted(list(set([os.path.normcase(path) for path in paths if path is not None])))
def configFilePath(path):
path = os.path.abspath(path)
if platform.system() == 'Darwin':
path = os.path.join(path, 'Contents', 'LoL')
config = os.path.join(path, 'Config', 'game.cfg')
if os.path.isfile(config):
return config
config = os.path.join(path, 'Game', 'Config', 'game.cfg')
if os.path.isfile(config):
return config
config = os.path.join(path, 'DATA', 'CFG', 'game.cfg')
if os.path.isfile(config):
return config
def isGameEnabled(path):
if os.path.isfile(path):
settings = QSettings(path, QSettings.IniFormat)
value = settings.value('EnableReplayApi', False)
return str(value).lower() in ['true', '1']
return False
def setGameEnabled(path, enabled):
if os.path.isfile(path):
logging.info('Setting EnableReplayApi %s=%d', path, enabled)
settings = QSettings(path, QSettings.IniFormat)
settings.setValue('EnableReplayApi', int(enabled))
|
czechflek/leaguedirector_evolved | leaguedirector/sequencer.py | <gh_stars>100-1000
import copy
import threading
import webbrowser
import statistics
from operator import attrgetter, methodcaller
from PySide2.QtGui import *
from PySide2.QtCore import *
from PySide2.QtWidgets import *
from leaguedirector.widgets import *
PRECISION = 10000.0
SNAPPING = 4
OVERLAP = 4
ADJACENT = 0.05
class SequenceKeyframe(QGraphicsPixmapItem):
def __init__(self, api, item, track):
self.pixmapNormal = QPixmap(respath('kfnormal.png'))
self.pixmapOverlap = QPixmap(respath('kfoverlap.png'))
QGraphicsPixmapItem.__init__(self, self.pixmapNormal, track)
self.api = api
self.track = track
self.item = item
self.duplicate = None
self.setCursor(Qt.ArrowCursor)
self.setShapeMode(QGraphicsPixmapItem.BoundingRectShape)
flags = QGraphicsItem.ItemIgnoresTransformations
flags |= QGraphicsItem.ItemIsMovable
flags |= QGraphicsItem.ItemIsSelectable
flags |= QGraphicsItem.ItemSendsGeometryChanges
self.setFlags(flags)
self.setOffset(-10, 3)
self.update()
def viewport(self):
return self.scene().views()[0]
@property
def time(self):
return self.item['time']
@time.setter
def time(self, value):
if self.item['time'] != value:
self.item['time'] = value
self.api.sequence.update()
self.track.updateOverlap()
self.update()
@property
def valueType(self):
value = self.item['value']
if isinstance(value, float):
return 'float'
elif isinstance(value, bool):
return 'bool'
elif isinstance(value, dict):
if 'x' in value and 'y' in value and 'z' in value:
return 'vector'
if 'r' in value and 'g' in value and 'b' in value and 'a' in value:
return 'color'
return ''
@property
def value(self):
return self.item['value']
@value.setter
def value(self, value):
if self.item['value'] != value:
self.item['value'] = value
self.api.sequence.update()
self.update()
@property
def blend(self):
return self.item.get('blend')
@blend.setter
def blend(self, value):
if self.item.get('blend') != value:
self.item['blend'] = value
self.api.sequence.update()
self.update()
def update(self):
self.setPos(int(self.time * PRECISION), 0)
self.setToolTip(self.tooltip())
def tooltip(self):
value = self.value
if isinstance(value, dict):
value = tuple(value.values())
return 'Time: {}\nBlend: {}\nValue: {}'.format(self.time, self.blend, value)
def delete(self):
self.api.sequence.removeKeyframe(self.track.name, self.item)
self.scene().removeItem(self)
def setOverlapping(self, overlapping):
self.setPixmap(self.pixmapOverlap if overlapping else self.pixmapNormal)
def mouseDoubleClickEvent(self, event):
if event.button() == Qt.LeftButton and event.modifiers() == Qt.NoModifier:
if len(self.scene().selectedItems()) < 2:
self.api.playback.pause(self.time)
event.accept()
QGraphicsPixmapItem.mouseDoubleClickEvent(self, event)
def mouseReleaseEvent(self, event):
for key in self.scene().selectedItems():
if isinstance(key, SequenceKeyframe):
key.duplicate = None
QGraphicsPixmapItem.mouseReleaseEvent(self, event)
def itemChange(self, change, value):
if change == QGraphicsItem.ItemPositionChange:
value.setX(self.performSnapping(value.x()))
value.setX(max(0, value.x()))
value.setY(0)
self.performDuplication()
return value
elif change == QGraphicsItem.ItemPositionHasChanged:
if value:
self.time = value.x() / PRECISION
return QGraphicsPixmapItem.itemChange(self, change, value)
def performDuplication(self):
if self.isSelected() and self.duplicate is None:
if QApplication.mouseButtons() == Qt.LeftButton:
if QApplication.keyboardModifiers() == Qt.AltModifier:
self.duplicate = self.track.duplicateKeyframe(self)
def performSnapping(self, time):
if QApplication.mouseButtons() == Qt.LeftButton:
if QApplication.keyboardModifiers() == Qt.NoModifier:
if len(self.scene().selectedItems()) < 2:
scene = self.scene()
viewport = self.viewport()
screenPosition = viewport.mapFromScene(time, 0).x()
left = viewport.mapToScene(screenPosition - SNAPPING, 0).x()
right = viewport.mapToScene(screenPosition + SNAPPING, 0).x()
items = scene.items(left, float(0), right - left, scene.height(), Qt.IntersectsItemBoundingRect, Qt.AscendingOrder)
for item in items:
if isinstance(item, SequenceKeyframe):
if item != self and not item.isSelected() and item.track != self.track:
return item.x()
elif isinstance(item, SequenceTime):
return self.api.playback.time * PRECISION
return time
class SequenceTrack(QGraphicsRectItem):
height = 22
def __init__(self, api, name, index):
QGraphicsRectItem.__init__(self)
self.api = api
self.name = name
self.index = index
self.setPos(0, self.height * self.index)
self.setToolTip(self.api.sequence.getLabel(self.name))
self.setPen(QPen(QColor(70, 70, 70, 255)))
self.updateOverlapTimer = QTimer()
self.updateOverlapTimer.timeout.connect(self.updateOverlapNow)
self.updateOverlapTimer.setSingleShot(True)
self.gradient = QLinearGradient(QPointF(0, 0), QPointF(120 * PRECISION, 0))
self.gradient.setColorAt(0, QColor(30, 30, 30, 255))
self.gradient.setColorAt(0.49999999999999, QColor(30, 30, 30, 255))
self.gradient.setColorAt(0.5, QColor(40, 40, 40, 255))
self.gradient.setColorAt(1, QColor(40, 40, 40, 255))
self.gradient.setSpread(QGradient.RepeatSpread)
self.setBrush(QBrush(self.gradient))
self.reload()
self.update()
def viewport(self):
return self.scene().views()[0]
def paint(self, *args):
self.updateOverlap()
return QGraphicsRectItem.paint(self, *args)
def reload(self):
for item in self.childItems():
if isinstance(item, SequenceKeyframe):
self.scene().removeItem(item)
for item in self.api.sequence.getKeyframes(self.name):
SequenceKeyframe(self.api, item, self)
def addKeyframe(self):
item = self.api.sequence.createKeyframe(self.name)
return SequenceKeyframe(self.api, item, self)
def duplicateKeyframe(self, keyframe):
item = copy.deepcopy(keyframe.item)
self.api.sequence.appendKeyframe(self.name, item)
return SequenceKeyframe(self.api, item, self)
def clearKeyframes(self):
for item in self.childItems():
if isinstance(item, SequenceKeyframe):
item.delete()
def updateOverlapNow(self):
viewport = self.viewport()
distance = viewport.mapToScene(OVERLAP, 0).x() - viewport.mapToScene(0, 0).x()
previous = None
for child in sorted(self.childItems(), key=methodcaller('x')):
if isinstance(child, SequenceKeyframe):
if previous and abs(child.x() - previous.x()) < distance:
child.setOverlapping(True)
previous.setOverlapping(True)
else:
child.setOverlapping(False)
previous = child
def updateOverlap(self):
self.updateOverlapTimer.start(100)
def update(self):
self.setRect(0, 0, int(self.api.playback.length * PRECISION), self.height)
class SequenceHeader(QGraphicsRectItem):
height = 22
def __init__(self, api, name, index, callback):
QGraphicsRectItem.__init__(self)
self.api = api
self.name = name
self.index = index
self.callback = callback
self.setPos(0, self.height * self.index)
self.setRect(0, 0, 160, self.height)
self.setToolTip(self.label())
self.setPen(QPen(Qt.NoPen))
self.setBrush(QColor(20, 20, 50, 255))
self.setFlags(QGraphicsItem.ItemIgnoresTransformations)
self.text = QGraphicsSimpleTextItem(self.label(), self)
self.text.setBrush(QApplication.palette().brightText())
self.text.setPos(145 - self.text.boundingRect().width() - 20, 4)
self.button = QGraphicsPixmapItem(QPixmap(respath('plus.png')), self)
self.button.setPos(140, 4)
self.button.setCursor(Qt.ArrowCursor)
self.button.mousePressEvent = lambda event: self.callback(self.name)
def label(self):
return self.api.sequence.getLabel(self.name)
class SequenceHeaderView(QGraphicsView):
addKeyframe = Signal(str)
def __init__(self, api):
self.api = api
self.scene = QGraphicsScene()
QGraphicsView.__init__(self, self.scene)
self.setFixedWidth(162)
self.setAlignment(Qt.AlignLeft | Qt.AlignTop)
self.setDragMode(QGraphicsView.ScrollHandDrag)
self.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
for index, name in enumerate(self.api.sequence.keys()):
self.scene.addItem(SequenceHeader(self.api, name, index, self.addKeyframe.emit))
class SequenceTime(QGraphicsLineItem):
pass
class SequenceTrackView(QGraphicsView):
selectionChanged = Signal()
def __init__(self, api, headers):
self.api = api
self.scene = QGraphicsScene()
QGraphicsView.__init__(self, self.scene)
self.tracks = {}
self.timer = schedule(10, self.animate)
self.scale(1.0 / PRECISION, 1.0)
self.setDragMode(QGraphicsView.NoDrag)
self.setAlignment(Qt.AlignLeft | Qt.AlignTop)
self.setTransformationAnchor(QGraphicsView.AnchorUnderMouse)
self.setSizeAdjustPolicy(QAbstractScrollArea.AdjustToContents)
for index, name in enumerate(self.api.sequence.keys()):
track = SequenceTrack(self.api, name, index)
self.scene.addItem(track)
self.tracks[name] = track
self.time = SequenceTime(0, 1, 0, self.scene.height() - 2)
self.time.setPen(QPen(QApplication.palette().highlight(), 1))
self.time.setFlags(QGraphicsItem.ItemIgnoresTransformations)
self.scene.addItem(self.time)
self.api.playback.updated.connect(self.update)
self.api.sequence.updated.connect(self.update)
self.api.sequence.dataLoaded.connect(self.reload)
headers.addKeyframe.connect(self.addKeyframe)
headers.verticalScrollBar().valueChanged.connect(lambda value: self.verticalScrollBar().setValue(value))
self.verticalScrollBar().valueChanged.connect(lambda value: headers.verticalScrollBar().setValue(value))
self.scene.selectionChanged.connect(self.selectionChanged.emit)
def reload(self):
for track in self.tracks.values():
track.reload()
def selectedKeyframes(self):
return [key for key in self.scene.selectedItems() if isinstance(key, SequenceKeyframe)]
def allKeyframes(self):
return [key for key in self.scene.items() if isinstance(key, SequenceKeyframe)]
def addKeyframe(self, name):
self.tracks[name].addKeyframe()
def clearKeyframes(self):
for track in self.tracks.values():
track.clearKeyframes()
def deleteSelectedKeyframes(self):
for selected in self.selectedKeyframes():
selected.delete()
def selectAllKeyframes(self):
for child in self.allKeyframes():
child.setSelected(True)
def selectAdjacentKeyframes(self):
for selected in self.selectedKeyframes():
for child in self.allKeyframes():
if abs(child.time - selected.time) < ADJACENT:
child.setSelected(True)
def selectNextKeyframe(self):
selectionSorted = sorted(self.selectedKeyframes(), key=attrgetter('time'))
trackSelection = {key.track : key for key in selectionSorted}
for track, selected in trackSelection.items():
for child in sorted(track.childItems(), key=attrgetter('time')):
if child.time > selected.time:
trackSelection[track] = child
break
self.scene.clearSelection()
for item in trackSelection.values():
item.setSelected(True)
def selectPrevKeyframe(self):
selectionSorted = sorted(self.selectedKeyframes(), key=attrgetter('time'), reverse=True)
trackSelection = {key.track : key for key in selectionSorted}
for track, selected in trackSelection.items():
for child in sorted(track.childItems(), key=attrgetter('time'), reverse=True):
if child.time < selected.time:
trackSelection[track] = child
break
self.scene.clearSelection()
for item in trackSelection.values():
item.setSelected(True)
def seekSelectedKeyframe(self):
selected = [key.time for key in self.selectedKeyframes()]
if selected:
self.api.playback.pause(statistics.mean(selected))
def update(self):
for track in self.tracks.values():
track.update()
def mousePressEvent(self, event):
if event.button() == Qt.RightButton:
self.setDragMode(QGraphicsView.ScrollHandDrag)
QGraphicsView.mousePressEvent(self, QMouseEvent(
QEvent.GraphicsSceneMousePress,
event.pos(),
Qt.MouseButton.LeftButton,
Qt.MouseButton.LeftButton,
Qt.KeyboardModifier.NoModifier
))
elif event.button() == Qt.LeftButton:
if event.modifiers() == Qt.ShiftModifier:
self.setDragMode(QGraphicsView.RubberBandDrag)
QGraphicsView.mousePressEvent(self, event)
QGraphicsView.mousePressEvent(self, event)
def mouseDoubleClickEvent(self, event):
QGraphicsView.mouseDoubleClickEvent(self, event)
if not self.scene.selectedItems() and not event.isAccepted():
self.api.playback.pause(self.mapToScene(event.pos()).x() / PRECISION)
def mouseReleaseEvent(self, event):
QGraphicsView.mouseReleaseEvent(self, event)
self.setDragMode(QGraphicsView.NoDrag)
def wheelEvent(self, event):
if event.angleDelta().y() > 0:
self.scale(1.1, 1.0)
else:
self.scale(0.9, 1.0)
def animate(self):
self.time.setPos(self.api.playback.currentTime * PRECISION, 0)
class SequenceCombo(QComboBox):
def __init__(self, api):
QComboBox.__init__(self)
self.api = api
self.update()
self.api.sequence.namesLoaded.connect(self.update)
self.activated.connect(self.onActivated)
def onActivated(self, index):
self.api.sequence.load(self.itemText(index))
def showPopup(self):
self.api.sequence.reloadNames()
QComboBox.showPopup(self)
def update(self):
self.clear()
for name in self.api.sequence.names:
self.addItem(name)
self.setCurrentIndex(self.api.sequence.index)
class SequenceSelectedView(QWidget):
def __init__(self, api, tracks):
QWidget.__init__(self)
self.api = api
self.api.playback.updated.connect(self.update)
self.api.sequence.updated.connect(self.update)
self.tracks = tracks
self.tracks.selectionChanged.connect(self.update)
self.form = QFormLayout(self)
self.setLayout(self.form)
self.layout()
self.update()
def layout(self):
self.label = QLabel()
self.time = FloatInput()
self.blend = QComboBox()
self.value = HBoxWidget()
self.valueLabel = QLabel('Multiple Selected')
self.valueFloat = FloatInput()
self.valueBool = BooleanInput()
self.valueVector = VectorInput()
self.valueColor = ColorInput()
self.value.addWidget(self.valueLabel)
self.value.addWidget(self.valueFloat)
self.value.addWidget(self.valueBool)
self.value.addWidget(self.valueVector)
self.value.addWidget(self.valueColor)
self.blend.activated.connect(self.updateBlend)
for option in self.api.sequence.blendOptions:
self.blend.addItem(option)
self.blendHelp = QPushButton()
self.blendHelp.setFixedWidth(20)
self.blendHelp.setIcon(self.style().standardIcon(QStyle.SP_TitleBarContextHelpButton))
self.blendHelp.clicked.connect(self.openBlendHelp)
self.form.addRow('', self.label)
self.form.addRow('Time', self.time)
self.form.addRow('Blend', HBoxWidget(self.blend, self.blendHelp))
self.form.addRow('Value', self.value)
self.time.valueChanged.connect(self.updateTime)
self.valueFloat.valueChanged.connect(self.updateValue)
self.valueBool.valueChanged.connect(self.updateValue)
self.valueVector.valueChanged.connect(self.updateValue)
self.valueColor.valueChanged.connect(self.updateValue)
self.blend.activated.connect(self.updateBlend)
def openBlendHelp(self):
threading.Thread(target=lambda: webbrowser.open_new('https://easings.net')).start()
def update(self):
selected = self.tracks.selectedKeyframes()
self.setVisible(len(selected))
self.time.setRange(0, self.api.playback.length)
blending = list(set([key.blend for key in selected]))
self.label.setText("{} keyframes selected".format(len(selected)))
if len(blending) == 1:
self.blend.setCurrentText(blending[0])
else:
self.blend.setCurrentIndex(-1)
times = list(set([key.time for key in selected]))
if len(times):
self.time.update(times[0])
if len(set([key.valueType for key in selected])) == 1:
valueType = selected[0].valueType
if valueType == 'float':
self.valueFloat.update(selected[0].value)
self.valueLabel.setVisible(False)
self.valueFloat.setVisible(True)
self.valueBool.setVisible(False)
self.valueVector.setVisible(False)
self.valueColor.setVisible(False)
elif valueType == 'bool':
self.valueBool.update(selected[0].value)
self.valueLabel.setVisible(False)
self.valueFloat.setVisible(False)
self.valueBool.setVisible(True)
self.valueVector.setVisible(False)
self.valueColor.setVisible(False)
elif valueType == 'vector':
self.valueVector.update(selected[0].value)
self.valueLabel.setVisible(False)
self.valueFloat.setVisible(False)
self.valueBool.setVisible(False)
self.valueVector.setVisible(True)
self.valueColor.setVisible(False)
elif valueType == 'color':
self.valueColor.update(selected[0].value)
self.valueLabel.setVisible(False)
self.valueFloat.setVisible(False)
self.valueBool.setVisible(False)
self.valueVector.setVisible(False)
self.valueColor.setVisible(True)
else:
self.valueLabel.setVisible(True)
self.valueFloat.setVisible(False)
self.valueBool.setVisible(False)
self.valueVector.setVisible(False)
self.valueColor.setVisible(False)
def updateTime(self):
for item in self.tracks.selectedKeyframes():
item.time = self.time.value()
def updateValue(self, value):
for item in self.tracks.selectedKeyframes():
item.value = value
def updateBlend(self, index):
for item in self.tracks.selectedKeyframes():
item.blend = self.blend.itemText(index)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.