max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
setup.py
|
jowanpittevils/Databasemanager_Signalplotter
| 0
|
12781951
|
<gh_stars>0
import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="signalplotterV2", # Replace with your own username
version="0.0.1",
author="<NAME>; <NAME>",
author_email="<EMAIL>",
description="A package for exploring databases and plotting biomedical data",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/jowanpittevils/Databasemanager_Signalplotter",
classifiers=[
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"License :: OSI Approved :: BSD 3-Clause",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Python Modules",
"Intended Audience :: Developers",
],
package_dir={"": "src"},
packages=setuptools.find_packages(where="src"),
install_requires=["matplotlib","PyQt5","numpy","datetime","pyqtgraph","PyQtChart"],
python_requires=">=3.6",
)
| 1.507813
| 2
|
SecureWitness/accounts/admin.py
|
vivianbuan/cs3240-s15-team20
| 0
|
12781952
|
from django.contrib import admin
from accounts.models import UserGroup, UserProfile
# Register your models here.
class UserAdmin(admin.ModelAdmin):
pass
class GroupAdmin(admin.ModelAdmin):
pass
admin.site.register(UserProfile, UserAdmin)
admin.site.register(UserGroup, GroupAdmin)
| 1.695313
| 2
|
A1/python_scripts/draw_scripts.py
|
ankurshaswat/COL819
| 0
|
12781953
|
<reponame>ankurshaswat/COL819
import os
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
from scipy.optimize import curve_fit
CHORD_LOGS_PATH = '../chord/logs/'
PASTRY_LOGS_PATH = '../pastry/logs/'
def read(path):
lis = []
with open(path, 'r') as file:
lines = file.readlines()
lis = [int(i.strip()) for i in lines]
return lis
def mean(lst):
return sum(lst)/len(lst)
def draw_all_graphs(initial_path, naming):
num_keys_100 = read(initial_path + 'sim1_keys_before.txt')
num_keys_500 = read(initial_path + 'sim2_keys_before.txt')
num_keys_1000 = read(initial_path + 'sim3_keys_before.txt')
path_lengths_100 = read(initial_path + 'sim1_before.txt')
path_lengths_500 = read(initial_path + 'sim2_before.txt')
path_lengths_1000 = read(initial_path + 'sim3_before.txt')
avg_len_100 = mean(path_lengths_100)
avg_len_500 = mean(path_lengths_500)
avg_len_1000 = mean(path_lengths_1000)
num_keys_100_af = read(initial_path + 'sim1_keys_after.txt')
num_keys_500_af = read(initial_path + 'sim2_keys_after.txt')
num_keys_1000_af = read(initial_path + 'sim3_keys_after.txt')
path_lengths_100_af = read(initial_path + 'sim1_after.txt')
path_lengths_500_af = read(initial_path + 'sim2_after.txt')
path_lengths_1000_af = read(initial_path + 'sim3_after.txt')
avg_len_100_af = mean(path_lengths_100_af)
avg_len_500_af = mean(path_lengths_500_af)
avg_len_1000_af = mean(path_lengths_1000_af)
# * Number of nodes vs. Number of keys per node
print(naming, 'Number of nodes vs. Number of keys per node')
data = [[] for i in range(10)]
labels = ['' for i in range(10)]
data[0], labels[0] = num_keys_100, 100
data[4], labels[4] = num_keys_500, 500
data[9], labels[9] = num_keys_1000, 1000
plt.boxplot(data, labels=labels, showfliers=False,
showmeans=True, meanline=True)
plt.title('Number of keys per node vs. number of nodes (Before deletion of half nodes)')
plt.xlabel('Number of nodes')
plt.ylabel('Num of keys per node')
plt.savefig(
'graphs/'+naming+'_num_keys_per_node_vs_num_nodes_before.svg', format='svg')
plt.clf()
data = [[] for i in range(10)]
labels = ['' for i in range(10)]
data[0], labels[0] = num_keys_100_af, 100
data[4], labels[4] = num_keys_500_af, 500
data[9], labels[9] = num_keys_1000_af, 1000
plt.boxplot(data, labels=labels, showfliers=False,
showmeans=True, meanline=True)
plt.title('Number of keys per node vs. number of nodes (After deletion of half nodes)')
plt.xlabel('Number of nodes')
plt.ylabel('Num of keys per node')
plt.savefig(
'graphs/'+naming+'_num_keys_per_node_vs_num_nodes_after.svg', format='svg')
plt.clf()
# * PDF for num keys per node
print(naming, 'PDF for num keys per node')
plt.title('PDF of number of keys per node (Before deletion)')
plt.xlabel('Number of keys per node')
plt.ylabel('Probability')
bins = np.arange(0, max(max(num_keys_100),
max(num_keys_500), max(num_keys_1000)) + 1.5) - 0.5
sns.distplot(num_keys_100, hist=False, bins=bins,
kde_kws={'bw': 1}, label='100 Nodes').set(xlim=(0))
sns.distplot(num_keys_500, hist=False, bins=bins,
kde_kws={'bw': 1}, label='500 Nodes').set(xlim=(0))
sns.distplot(num_keys_1000, hist=False, bins=bins,
kde_kws={'bw': 1}, label='1000 Nodes').set(xlim=(0))
plt.legend(title='Legend')
plt.savefig('graphs/'+naming +
'_pdf_num_keys_per_node_before.svg', format='svg')
plt.clf()
plt.title('PDF of number of keys per node (After deletion)')
plt.xlabel('Number of keys per node')
plt.ylabel('Probability')
bins = np.arange(0, max(max(num_keys_100_af),
max(num_keys_500_af), max(num_keys_1000_af)) + 1.5) - 0.5
sns.distplot(num_keys_100_af, hist=False, bins=bins,
kde_kws={'bw': 1}, label='100 Nodes').set(xlim=(0))
sns.distplot(num_keys_500_af, hist=False, bins=bins,
kde_kws={'bw': 1}, label='500 Nodes').set(xlim=(0))
sns.distplot(num_keys_1000_af, hist=False, bins=bins,
kde_kws={'bw': 1}, label='1000 Nodes').set(xlim=(0))
plt.legend(title='Legend')
plt.savefig('graphs/'+naming +
'_pdf_num_keys_per_node_after.svg', format='svg')
plt.clf()
# * num keys per node vs. num keys
print(naming, 'num keys per node vs. num keys')
data = []
labels = []
for i in range(1, 11):
labels.append(i)
data.append(read(initial_path + 'keys_var'+str(i)+"_keys_before.txt"))
plt.boxplot(data, labels=labels, showfliers=False,
showmeans=True, meanline=True)
plt.title('Variation of number of keys per node with increasing keys (1000 Nodes)')
plt.xlabel('Number of keys (x10000)')
plt.ylabel('Number of keys per node')
plt.savefig('graphs/'+naming+'_num_keys_per_node_dist.svg', format='svg')
plt.clf()
# * Path length vs. number of nodes
print(naming, 'Path length vs. number of nodes')
data = [[] for i in range(10)]
labels = ['' for i in range(10)]
data[0], labels[0] = path_lengths_100, 100
data[4], labels[4] = path_lengths_500, 500
data[9], labels[9] = path_lengths_1000, 1000
plt.boxplot(data, labels=labels, showfliers=False,
showmeans=True, meanline=True)
plt.title('Path length vs. num nodes (Before deletion)')
plt.xlabel('Number of Nodes')
plt.ylabel('Path length')
plt.savefig('graphs/'+naming +
'_path_length_vs_num_nodes_before.svg', format='svg')
plt.clf()
data = [[] for i in range(10)]
labels = ['' for i in range(10)]
data[0], labels[0] = path_lengths_100_af, 100
data[4], labels[4] = path_lengths_500_af, 500
data[9], labels[9] = path_lengths_1000_af, 1000
plt.boxplot(data, labels=labels, showfliers=False,
showmeans=True, meanline=True)
plt.title('Path length vs. number of nodes (After deletion)')
plt.xlabel('Number of nodes')
plt.ylabel('Path length')
plt.savefig('graphs/'+naming +
'_path_length_vs_num_nodes_after.svg', format='svg')
plt.clf()
# * PDF for path length
print(naming, 'PDF for path length')
plt.title('PDF of path length (Before deletion)')
plt.xlabel('Path length')
plt.ylabel('Probability')
bins = np.arange(0, max(max(path_lengths_100),
max(path_lengths_500), max(path_lengths_1000)) + 1.5) - 0.5
sns.distplot(path_lengths_100, hist=False, bins=bins,
kde_kws={'bw': 1}, label='100 Nodes').set(xlim=(0))
sns.distplot(path_lengths_500, hist=False, bins=bins,
kde_kws={'bw': 1}, label='500 Nodes').set(xlim=(0))
sns.distplot(path_lengths_1000, hist=False, bins=bins,
kde_kws={'bw': 1}, label='1000 Nodes').set(xlim=(0))
plt.legend(title='Legend')
plt.savefig('graphs/'+naming+'_pdf_path_len_before.svg', format='svg')
plt.clf()
plt.title('PDF of path length (After deletion)')
plt.xlabel('Path length')
plt.ylabel('Probability')
bins = np.arange(0, max(max(path_lengths_100_af),
max(path_lengths_500_af), max(path_lengths_1000_af)) + 1.5) - 0.5
sns.distplot(path_lengths_100_af, hist=False, bins=bins,
kde_kws={'bw': 1}, label='100 Nodes').set(xlim=(0))
sns.distplot(path_lengths_500_af, hist=False, bins=bins,
kde_kws={'bw': 1}, label='500 Nodes').set(xlim=(0))
sns.distplot(path_lengths_1000_af, hist=False, bins=bins,
kde_kws={'bw': 1}, label='1000 Nodes').set(xlim=(0))
plt.legend(title='Legend')
plt.savefig('graphs/'+naming+'_pdf_path_len_after.svg', format='svg')
plt.clf()
# * Avg number of hops vs. nodes
print(naming, 'Avg number of hops vs. nodes')
x, y = [100, 500, 1000, 2000, 5000, 10000], []
# logs = np.log(x)
for i in range(1, 7):
lst = read(initial_path + 'nodes_var'+str(i)+"_before.txt")
y.append(sum(lst) / len(lst))
popt, _ = curve_fit(func, x, y)
# base = np.log(100)/y[0]
# const = y[1]*base/np.log(500)
plt.plot(x, y, marker='o', label=naming.title())
plt.plot(x, popt[0]*np.log(x), marker='o', label='O(log(N))')
plt.title('Avg. number of hops vs number of nodes')
plt.xlabel('Number of nodes')
plt.xscale('log')
plt.ylabel('Avg. number of hops')
plt.legend()
plt.savefig('graphs/'+naming+'_avg_hops_vs_node.svg', format='svg')
plt.clf()
# * Avg. number of hops comparison with deletion
print(naming, 'Avg. number of hops comparison with deletion')
x, y = [100, 500, 1000], [avg_len_100, avg_len_500, avg_len_1000]
y2 = [avg_len_100_af, avg_len_500_af, avg_len_1000_af]
plt.plot(x, y, 'bo-', label='Before deletion')
plt.plot(x, y2, 'ro-', label='After deletion')
plt.title('Avg. number of hops vs number of nodes')
plt.xlabel('Number of nodes')
plt.xscale('log')
plt.ylabel('Avg. number of hops')
plt.legend()
plt.savefig('graphs/'+naming +
'_avg_hops_vs_node_deletions.svg', format='svg')
plt.clf()
# * Path length histogram
print(naming, 'Path length histograms')
plt.title('Path length Distribution Histogram (100 Nodes)')
plt.xlabel('Path length')
plt.ylabel('Number of searches')
bins = np.arange(0, max(max(path_lengths_100),
max(path_lengths_100_af)) + 1.5) - 0.5
plt.hist([path_lengths_100, path_lengths_100_af], bins,
label=['Before Deletion', 'After Deletion'])
plt.legend(title='Legend')
plt.savefig('graphs/'+naming+'_histogram_path_len_100.svg', format='svg')
plt.clf()
bins = np.arange(0, max(max(path_lengths_500),
max(path_lengths_500_af)) + 1.5) - 0.5
plt.title('Path length Distribution Histogram (500 Nodes)')
plt.xlabel('Path length')
plt.ylabel('Number of searches')
plt.hist([path_lengths_500, path_lengths_500_af], bins,
label=['Before Deletion', 'After Deletion'])
plt.legend(title='Legend')
plt.savefig('graphs/'+naming+'_histogram_path_len_500.svg', format='svg')
plt.clf()
bins = np.arange(0, max(max(path_lengths_1000),
max(path_lengths_1000_af)) + 1.5) - 0.5
plt.title('Path length Distribution Histogram (1000 Nodes)')
plt.xlabel('Path length')
plt.ylabel('Number of searches')
plt.hist([path_lengths_1000, path_lengths_1000_af], bins,
label=['Before Deletion', 'After Deletion'])
plt.legend(title='Legend')
plt.savefig('graphs/'+naming+'_histogram_path_len_1000.svg', format='svg')
plt.clf()
def func(x, a):
return a*np.log(x)
if __name__ == "__main__":
draw_all_graphs(CHORD_LOGS_PATH, 'chord')
draw_all_graphs(PASTRY_LOGS_PATH, 'pastry')
| 2.4375
| 2
|
hydrachain/examples/native/fungible/test_fungible_contract.py
|
bts/hydrachain
| 406
|
12781954
|
from ethereum import tester
import hydrachain.native_contracts as nc
from fungible_contract import Fungible, Transfer, Approval
import ethereum.slogging as slogging
log = slogging.get_logger('test.fungible')
def test_fungible_instance():
state = tester.state()
creator_address = tester.a0
creator_key = tester.k0
nc.registry.register(Fungible)
# Create proxy
EUR_address = nc.tester_create_native_contract_instance(state, creator_key, Fungible)
fungible_as_creator = nc.tester_nac(state, creator_key, EUR_address)
# Initalize fungible with a fixed quantity of fungibles.
fungible_total = 1000000
fungible_as_creator.init(fungible_total)
assert fungible_as_creator.balanceOf(creator_address) == fungible_total
nc.registry.unregister(Fungible)
def test_fungible_template():
"""
Tests;
Fungible initialization as Creator,
Creator sends Fungibles to Alice,
Alice sends Fungibles to Bob,
Bob approves Creator to spend Fungibles on his behalf,
Creator allocates these Fungibles from Bob to Alice,
Testing of non-standardized functions of the Fungible contract.
Events;
Checking logs from Transfer and Approval Events
"""
# Register Contract Fungible
nc.registry.register(Fungible)
# Initialize Participants and Fungible contract
state = tester.state()
logs = []
creator_address = tester.a0
creator_key = tester.k0
alice_address = tester.a1
alice_key = tester.k1
bob_address = tester.a2
bob_key = tester.k2
# Create proxy
nc.listen_logs(state, Transfer, callback=lambda e: logs.append(e))
nc.listen_logs(state, Approval, callback=lambda e: logs.append(e))
fungible_as_creator = nc.tester_nac(state, creator_key, Fungible.address)
# Initalize fungible with a fixed quantity of fungibles.
fungible_total = 1000000
fungible_as_creator.init(fungible_total)
assert fungible_as_creator.balanceOf(creator_address) == fungible_total
# Creator transfers Fungibles to Alice
send_amount_alice = 700000
fungible_as_creator.transfer(alice_address, send_amount_alice)
assert fungible_as_creator.balanceOf(creator_address) == fungible_total - send_amount_alice
assert fungible_as_creator.balanceOf(alice_address) == send_amount_alice
# Check logs data of Transfer Event
assert len(logs) == 1
l = logs[0]
assert l['event_type'] == 'Transfer'
assert l['from'] == creator_address
assert l['to'] == alice_address
# Build transaction Log arguments and check sent amount
assert l['value'] == send_amount_alice
# Alice transfers Fungibles to Bob
send_amount_bob = 400000
# Create proxy for Alice
fungible_as_alice = nc.tester_nac(state, alice_key, Fungible.address)
fungible_as_alice.transfer(bob_address, send_amount_bob)
# Test balances of Creator, Alice and Bob
creator_balance = fungible_total - send_amount_alice
alice_balance = send_amount_alice - send_amount_bob
bob_balance = send_amount_bob
assert fungible_as_alice.balanceOf(creator_address) == creator_balance
assert fungible_as_alice.balanceOf(alice_address) == alice_balance
assert fungible_as_alice.balanceOf(bob_address) == bob_balance
# Create proxy for Bob
fungible_as_bob = nc.tester_nac(state, bob_key, Fungible.address)
approved_amount_bob = 100000
assert fungible_as_bob.allowance(creator_address) == 0
# Bob approves Creator to spend Fungibles
assert fungible_as_bob.allowance(creator_address) == 0
fungible_as_bob.approve(creator_address, approved_amount_bob)
assert fungible_as_bob.allowance(creator_address) == approved_amount_bob
# Test transferFrom function, i.e. direct debit.
fungible_as_creator.transferFrom(bob_address, alice_address, approved_amount_bob)
# Test balances
alice_balance += approved_amount_bob
bob_balance -= approved_amount_bob
assert fungible_as_alice.balanceOf(creator_address) == creator_balance
assert fungible_as_alice.balanceOf(alice_address) == alice_balance
assert fungible_as_alice.balanceOf(bob_address) == bob_balance
# Check logs data of Transfer Event
assert len(logs) == 4
l = logs[-1]
assert l['event_type'] == 'Transfer'
assert l['from'] == bob_address
assert l['to'] == alice_address
# Build transaction Log arguments and check sent amount
assert l['value'] == approved_amount_bob
# Testing account information
# Now we should have three Fungible accounts
assert 3 == fungible_as_alice.num_accounts()
r = fungible_as_creator.get_creator()
assert r == creator_address
r = fungible_as_creator.get_accounts()
assert set(r) == set([creator_address, alice_address, bob_address])
print logs
while logs and logs.pop():
pass
nc.registry.unregister(Fungible)
| 2.1875
| 2
|
CrittersProto/generator/fileout_test.py
|
nickjbenson/Kami
| 1
|
12781955
|
# fileout_test.py
fi =
| 0.929688
| 1
|
videoTOvideo_language.py
|
MuskanM1/Ingenious_hackathon_Enigma
| 2
|
12781956
|
<gh_stars>1-10
# Video to Audio
"""
import moviepy.editor as mp
clip = mp.VideoFileClip("/content/drive/My Drive/Hackathon_2020_Enigma/Video.mp4")
clip.audio.write_audiofile("/content/drive/My Drive/Hackathon_2020_Enigma/Video.wav")
'''
# convert mp4 to mp3
audio = AudioSegment.from_file("/content/drive/My Drive/Hackathon_2020_Enigma/Video.mp4", format="mp4")
audio.export("/content/drive/My Drive/Hackathon_2020_Enigma/audio.mp3", format="mp3")
-------------------
import subprocess
command = "ffmpeg -i /content/drive/My Drive/Hackathon_2020_Enigma/Video.mp4 -ab 160k -ac 2 -ar 44100 -vn /content/drive/My Drive/Hackathon_2020_Enigma/audio.wav"
subprocess.call(command, shell=True)
'''
# convert mp3 to wav
sound = AudioSegment.from_mp3("/content/drive/My Drive/Hackathon_2020_Enigma/audio.mp3")
sound.export("/content/drive/My Drive/Hackathon_2020_Enigma/audio.wav", format="wav")
"""# Audio to Text"""
!pip install SpeechRecognition
!pip install pydub
import speech_recognition as sr
from pydub import AudioSegment
from pydub.silence import split_on_silence
r = sr.Recognizer()
with sr.AudioFile('/content/drive/My Drive/Hackathon_2020_Enigma/Video-Text/audio.wav') as source:
audio = r.listen(source)
try:
#text = r.recognize_google(audio, language = 'en-UK')
text = r.recognize_google(audio)
print("Working...")
print(text)
except sr.UnknownValueError:
print("Could not understand audio")
except sr.RequestError as e:
print("Could not request results. check your internet connection")
except:
print("Sorry...")
from pydub.silence import split_on_silence
import speech_recognition as sr
from pydub import AudioSegment
from pydub.utils import make_chunks
'''
song = AudioSegment.from_wav('/content/drive/My Drive/Hackathon_2020_Enigma/Video-Text/audio.wav')
fh = open("/content/drive/My Drive/Hackathon_2020_Enigma/Video-Text/audio_text.txt", "w+")
chunks = split_on_silence(song,
min_silence_len = 5000,
silence_thresh = -16
)
i=0
# process each chunk
for chunk in chunks:
# Create 0.5 seconds silence chunk
#chunk_silent = AudioSegment.silent(duration = 10)
# add 0.5 sec silence to beginning and
# end of audio chunk. This is done so that
# it doesn't seem abruptly sliced.
#audio_chunk = chunk_silent + chunk + chunk_silent
audio_chunk = chunk
# export audio chunk and save it in
# the current directory.
print("saving chunk{0}.wav".format(i))
# specify the bitrate to be 192 k
audio_chunk.export("/content/drive/My Drive/Hackathon_2020_Enigma/Video-Text/audio_chunks/chunk{0}.wav".format(i), bitrate ='192k', format ="wav")
# the name of the newly created chunk
filename = '/content/drive/My Drive/Hackathon_2020_Enigma/Video-Text/audio_chunks/chunk'+str(i)+'.wav'
print("Processing chunk "+str(i))
# get the name of the newly created chunk
# in the AUDIO_FILE variable for later use.
file = filename
# create a speech recognition object
r = sr.Recognizer()
# recognize the chunk
with sr.AudioFile(file) as source:
# remove this if it is not working
# correctly.
#r.adjust_for_ambient_noise(source)
audio_listened = r.listen(source)
try:
# try converting it to text
rec = r.recognize_google(audio_listened)
print(rec)
# write the output to the file.
fh.write(rec+". ")
# catch any errors.
except sr.UnknownValueError:
print("Could not understand audio")
except sr.RequestError as e:
print("Could not request results. check your internet connection")
i += 1
'''
#-- Audio -> small chunks -> text -> srt -> files
import speech_recognition as sr
from pydub import AudioSegment
from pydub.utils import make_chunks
myaudio = AudioSegment.from_file("/content/drive/My Drive/Hackathon_2020_Enigma/Video-Text/audio.wav" , "wav")
chunk_length_ms = 5000 # pydub calculates in millisec
chunks = make_chunks(myaudio, chunk_length_ms) # Make chunks of five sec
#Export all of the individual chunks as wav files
full_text = ""
srt_text = ""
t_sec = 0
t_min = 0
t_hour = 0
for i, chunk in enumerate(chunks):
chunk_name = "/content/drive/My Drive/Hackathon_2020_Enigma/Video-Text/audio_chunks/chunk{0}.wav".format(i)
#print ("exporting", chunk_name)
chunk.export(chunk_name, format="wav")
r = sr.Recognizer()
with sr.AudioFile(chunk_name) as source:
audio = r.listen(source)
try:
#text = r.recognize_google(audio, language = 'en-UK')
text = r.recognize_google(audio)
# print("---------Working...-------------\n")
# print(text)
full_text = full_text + text + ' '
#print(srt_text)
except sr.UnknownValueError:
print("----------Could not understand audio----------\n")
except sr.RequestError as e:
print("----------Could not request results. check your internet connection----------\n")
except:
print("----------Sorry...---------\n")
t_sec = t_sec + 5
print(full_text)
full_text_file = open("/content/drive/My Drive/Hackathon_2020_Enigma/Video-Text/audio_chunks/recognized.txt", "w")
full_text_file.write(full_text)
full_text_file.close()
"""# Punctuation"""
!pip install punctuator
!gdown https://drive.google.com/uc?id=0B7BsN5f2F1fZd1Q0aXlrUDhDbnM
from punctuator import Punctuator
p = Punctuator('/content/drive/My Drive/Hackathon_2020_Enigma/Video-Text/Model/Demo-Europarl-EN.pcl')
punc_full_text = p.punctuate(full_text)
print(punc_full_text)
"""# Translation"""
!pip install googletrans
from googletrans import Translator
translator = Translator()
translated_text = translator.translate(punc_full_text, dest='hi').text
print(translated_text)
"""# Text to speech"""
!pip install pyttsx3==2.7
!pip install talkey
!pip install py-espeak-ng
!pip install gTTS
'''
import pyttsx3
engine = pyttsx3.init() # object creation
""" RATE"""
rate = engine.getProperty('rate') # getting details of current speaking rate
print (rate) #printing current voice rate
engine.setProperty('rate', 125) # setting up new voice rate
"""VOLUME"""
volume = engine.getProperty('volume') #getting to know current volume level (min=0 and max=1)
print (volume) #printing current volume level
engine.setProperty('volume',1.0) # setting up volume level between 0 and 1
"""VOICE"""
voices = engine.getProperty('voices') #getting details of current voice
#engine.setProperty('voice', voices[0].id) #changing index, changes voices. o for male
engine.setProperty('voice', voices[1].id) #changing index, changes voices. 1 for female
engine.say("Hello World!")
engine.say('My current speaking rate is ' + str(rate))
engine.runAndWait()
engine.stop()
'''
from gtts import gTTS
mytext = full_text
# Language in which you want to convert
language = 'en'
# Passing the text and language to the engine,
# here we have marked slow=False. Which tells
# the module that the converted audio should
# have a high speed
myobj = gTTS(text=mytext, lang=language, slow=True)
myobj.save("/content/drive/My Drive/Hackathon_2020_Enigma/Video-Text/welcome.wav")
"""# Audio Remove"""
import subprocess
command = 'for file in *.mp4; do ffmpeg -i "$file" -c copy -an "noaudio_$file"; done'
subprocess.call(command)
from google.colab import drive
drive.mount('/content/drive')
| 3.015625
| 3
|
UpdateAllRepositories.py
|
alencodes/SVN_Repo_Update
| 0
|
12781957
|
import os
import subprocess
import time
import logging
from re import sub
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-p", "--path", dest="root_path",
help="set root path to start search", metavar="PATH")
(options, args) = parser.parse_args()
root_path = options.root_path if options.root_path else '.'
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(levelname)s %(message)s',
filename='SVNUpdate.log',
filemode='a')
startupinfo = None
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
def main():
if is_svn_installed():
update_all_repo()
else:
print('Please install SVN command line tools to use this application')
def is_svn_installed():
cmd = 'svn --version'
try:
subprocess.Popen(cmd, startupinfo=startupinfo,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
return True
except Exception as e:
return False
def update_all_repo():
logging.info('Update started @ : {}'.format(time.asctime(time.localtime(time.time()))))
count = 0
print('Collecting SVN repositories')
for root, dirs, files in os.walk(root_path, topdown=False):
for name in dirs:
if name == '.svn':
count += 1
svn_dir = os.path.join(root, name)[2:-5]
print('Updating ' + svn_dir)
cmd = 'svn up "' + svn_dir + '"'
try:
p = subprocess.Popen(cmd, startupinfo=startupinfo, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
pout, _ = p.communicate()
pout = sub('[\n\r]', '', pout.decode('utf-8'))
pout = sub('[:]', ' is ', pout)
logging.info('{}'.format(pout))
p.wait()
except Exception as e:
print('Whoops !! Something went wrong, check log for more info')
logging.error('{}'.format(e))
print('Total svn repositories updated : {}'.format(str(count)))
logging.info('Total svn repositories updated : {}'.format(str(count)))
logging.info('Update done @ : {}'.format(time.asctime(time.localtime(time.time()))))
logging.shutdown()
if __name__ == '__main__':
main()
| 2.453125
| 2
|
tests/plugins/test_kubejobs.py
|
ufcg-lsd/asperathos-integration-tests
| 1
|
12781958
|
import requests
import pytest
import subprocess
from datetime import datetime
from helpers import wait_for_grafana_url_generation, create_job
from helpers import stop_job, MANAGER_URL, VISUALIZER_URL, get_jobs, delete_job
from helpers import restart_container, wait_for_job_complete
from helpers.fixtures import job_payload, manager_container_id
def test_create_job(job_payload):
""" Tests if a Job is being created successfully
Arguments:
job_payload {dict} -- A pytest fixture providing the Job
payload to be sent to Asperathos
Returns:
None
"""
response = requests.post(MANAGER_URL + '/submissions', json=job_payload)
response_payload = response.json()
assert response.ok
assert response_payload
stop_job(manager_url=MANAGER_URL, job_id=response_payload.get('job_id'))
def test_visualize_grafana():
""" Tests if the Grafana URL is being generated successfully
Arguments:
None
Returns:
None
"""
job_id = create_job(MANAGER_URL, 1)
grafana_url = wait_for_grafana_url_generation(VISUALIZER_URL, job_id)
assert requests.get(grafana_url).ok
stop_job(manager_url=MANAGER_URL, job_id=job_id)
def test_controller_scales_up():
""" Tests if the Controlling is able to scale
Arguments:
None
Returns:
None
"""
INITIAL_REPLICAS = 1
job_id = create_job(MANAGER_URL, 2)
wait_for_job_complete(MANAGER_URL, job_id, max_wait_time=180)
detailed_report = requests.get(MANAGER_URL + '/submissions/{}/report'\
.format(job_id))
data = detailed_report.json()
assertion = any(data[time]['replicas'] > INITIAL_REPLICAS for time in data)
assert assertion
stop_job(manager_url=MANAGER_URL, job_id=job_id)
def test_controller_scales_down():
""" Tests if the Controlling is able to scale
Arguments:
None
Returns:
None
"""
INITIAL_REPLICAS = 10
job_id = create_job(MANAGER_URL, 3)
wait_for_job_complete(MANAGER_URL, job_id, max_wait_time=180)
detailed_report = requests.get(MANAGER_URL + '/submissions/{}/report'\
.format(job_id))
data = detailed_report.json()
assertion = any(data[time]['replicas'] < INITIAL_REPLICAS for time in data)
assert assertion
stop_job(manager_url=MANAGER_URL, job_id=job_id)
def test_monitor_report_matches_detailed():
""" Tests if the metrics in the
simple report matches with the detailed one.
Arguments:
None
Returns:
None
"""
job_id = create_job(MANAGER_URL, 3)
wait_for_job_complete(MANAGER_URL, job_id, max_wait_time=180)
submission_url = MANAGER_URL + '/submissions/{}'.format(job_id)
report_url = submission_url + "/report"
monitor = requests.get(submission_url).json()
detailed = requests.get(report_url).json()
monitor_max_error,monitor_max_error_time = monitor['max_error']
monitor_min_error,monitor_min_error_time = monitor['min_error']
monitor_last_error,monitor_last_error_time = monitor['final_error']
detailed_report_max_error = detailed[monitor_max_error_time]['error']
assert detailed_report_max_error == monitor_max_error
detailed_report_max_error = detailed[monitor_min_error_time]['error']
assert detailed_report_max_error == monitor_min_error
date_format = "%Y-%m-%dT%H:%M:%SZ"
last_date = datetime.strptime(monitor_last_error_time,date_format)
dates = detailed.keys()
assertion = all(datetime.strptime(date,date_format) <= last_date\
for date in dates)
assert assertion
@pytest.mark.last
def test_persistence_works(manager_container_id):
""" Tests if Job persistence is working properly
when manager is restarted
Arguments:
None
Returns:
None
"""
# This test is here to ensure there will be more than 0 jobs registered
jobs = get_jobs(MANAGER_URL)
n_jobs = len(jobs)
assert n_jobs > 0
restart_container(manager_container_id)
assert n_jobs == len(get_jobs(MANAGER_URL))
delete_job(MANAGER_URL, list(jobs.keys())[0])
assert len(get_jobs(MANAGER_URL)) < n_jobs
| 2.1875
| 2
|
repos/system_upgrade/el7toel8/actors/addupgradebootentry/libraries/library.py
|
brammittendorff/leapp-repository
| 0
|
12781959
|
<reponame>brammittendorff/leapp-repository
import os
import re
from leapp.exceptions import StopActorExecutionError
from leapp.libraries.stdlib import api, run
from leapp.models import BootContent
def add_boot_entry():
debug = 'debug' if os.getenv('LEAPP_DEBUG', '0') == '1' else ''
kernel_dst_path, initram_dst_path = get_boot_file_paths()
run([
'/usr/sbin/grubby',
'--add-kernel', '{0}'.format(kernel_dst_path),
'--initrd', '{0}'.format(initram_dst_path),
'--title', 'RHEL-Upgrade-Initramfs',
'--copy-default',
'--make-default',
'--args', '{DEBUG} enforcing=0 rd.plymouth=0 plymouth.enable=0'.format(DEBUG=debug)
])
def get_boot_file_paths():
boot_content_msgs = api.consume(BootContent)
boot_content = next(boot_content_msgs, None)
if list(boot_content_msgs):
api.current_logger().warning('Unexpectedly received more than one BootContent message.')
if not boot_content:
raise StopActorExecutionError('Could not create a GRUB boot entry for the upgrade initramfs',
details={'details': 'Did not receive a message about the leapp-provided'
'kernel and initramfs'})
return boot_content.kernel_path, boot_content.initram_path
def write_to_file(filename, content):
with open(filename, 'w') as f:
f.write(content)
def fix_grub_config_error(conf_file):
with open(conf_file, 'r') as f:
config = f.read()
# move misplaced '"' to the end
pattern = r'GRUB_CMDLINE_LINUX=.+?(?=GRUB|\Z)'
original_value = re.search(pattern, config, re.DOTALL).group()
parsed_value = original_value.split('"')
new_value = '{KEY}"{VALUE}"{END}'.format(KEY=parsed_value[0], VALUE=''.join(parsed_value[1:]).rstrip(),
END=original_value[-1])
config = config.replace(original_value, new_value)
write_to_file(conf_file, config)
| 2.140625
| 2
|
porcupine/plugins/rstrip.py
|
rscales02/porcupine
| 0
|
12781960
|
<filename>porcupine/plugins/rstrip.py
"""Remove trailing whitespace when enter is pressed."""
from porcupine import get_tab_manager, tabs, utils
def after_enter(textwidget):
"""Strip trailing whitespace at the end of a line."""
lineno = int(textwidget.index('insert').split('.')[0]) - 1
line = textwidget.get('%d.0' % lineno, '%d.0 lineend' % lineno)
if len(line) != len(line.rstrip()):
textwidget.delete('%d.%d' % (lineno, len(line.rstrip())),
'%d.0 lineend' % lineno)
def on_new_tab(event):
if isinstance(event.data_widget, tabs.FileTab):
textwidget = event.data_widget.textwidget
def bind_callback(event):
textwidget.after_idle(after_enter, textwidget)
textwidget.bind('<Return>', bind_callback, add=True)
def setup():
utils.bind_with_data(get_tab_manager(), '<<NewTab>>', on_new_tab, add=True)
| 3.125
| 3
|
scripts/06_plot_orthogroups_venn.py
|
cmdoret/Acastellanii_genome_analysis
| 5
|
12781961
|
<gh_stars>1-10
import pandas as pd
from matplotlib_venn import venn2, venn3
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.use("Agg")
vdf = pd.read_csv(snakemake.input["pres_compact"], sep="\t").drop(
"Orthogroup", axis=1
)
vdf = vdf.astype(bool)
# vdf = vdf.rename(columns={'NEFF_v1_hgt_cds': 'v1'})
# Make Venn diagram. sets are A, B, AB, C, AC, BC, ABC
fig, ax = plt.subplots(1, 2, figsize=(15, 12))
venn3(
subsets=(
len(vdf.query(" c3 and not neff and not amoeba")), # A
len(vdf.query("not c3 and neff and not amoeba")), # B
len(vdf.query(" c3 and neff and not amoeba")), # AB
len(vdf.query("not c3 and not neff and amoeba")), # C
len(vdf.query(" c3 and not neff and amoeba")), # AC
len(vdf.query("not c3 and neff and amoeba")), # BC
len(vdf.query(" c3 and neff and amoeba")), # ABC
),
set_labels=("C3", "Neff", "Amoeba"),
ax=ax[0],
)
venn2(
subsets=(
len(vdf.query(" c3 and not neff")),
len(vdf.query("not c3 and neff")),
len(vdf.query(" c3 and neff")),
),
set_labels=("C3", "Neff"),
ax=ax[1],
)
fig.savefig(snakemake.output["venn"])
| 2.40625
| 2
|
var_conv_dq.py
|
TeodorPoncu/variational-conv-dequantization
| 0
|
12781962
|
<reponame>TeodorPoncu/variational-conv-dequantization
import torch
from torch import nn
from torch.nn import functional as F
from typing import Tuple
from torch.distributions import Normal
class TestConvDequantize(nn.Module):
"""
Please refer to VariationalConvDequantize class down bellow for implementation details
This is a dummy class with extended return types for testing purposes and sanity checks
"""
def __init__(self, in_channels: int, z_channels: int):
super(TestConvDequantize, self).__init__()
self.in_channels = in_channels
self.z_channels = z_channels
self.latent = nn.Sequential(
nn.Conv2d(in_channels, z_channels, kernel_size=(3, 3), padding=(1, 1)),
nn.LeakyReLU(),
)
self.mlp = nn.Sequential(
nn.Linear(z_channels, z_channels),
nn.LeakyReLU(),
nn.Linear(z_channels, 2)
)
def dequantize(self, x: torch.Tensor):
latents = self.latent(x)
b, c, h, w = latents.shape
latents = latents.permute(0, 2, 3, 1)
latents = latents.reshape(b * h * w, self.z_channels)
latents = self.mlp(latents)
mu, var = torch.chunk(latents, chunks=2, dim=1)
noise_original = torch.randn(size=(b * h * w, self.in_channels))
noise = (noise_original + mu) * var
noise = noise.view(b, h, w, self.in_channels)
noise = noise.permute(0, 3, 1, 2)
return noise, noise_original, mu, var
def forward(self, x: torch.Tensor):
variational, noise, mu, var = self.dequantize(x)
dequantize = x + variational
return dequantize, variational, noise, mu, var
class VariationalConvDequantize(nn.Module):
"""
Module that performs variational dequantization similarly to Flow++ (https://arxiv.org/abs/1902.00275)
Used when dealing with spatially dependent quantized embeddings, i.e mu and var are obtained from a feature
vector that is the result of a convolution operation with kernel_size > 1
The feature vector for z_{B x H x W} is obtained by performing a convolution around z_{B x H x W}, then a MLP
extracts mu_{B x H x W}, respectively var_{B x H x W}
"""
def __init__(self, in_channels: int, z_channels: int):
super(VariationalConvDequantize, self).__init__()
self.in_channels = in_channels
self.z_channels = z_channels
# use a convolution as a non-linear for spatial awareness and cheap dimensionality reduction
# can change kernel size to (1, 1) in order to obtain classic variational dequantization
self.latent = nn.Sequential(
nn.Conv2d(in_channels, z_channels, kernel_size=(3, 3), padding=(1, 1)),
nn.LeakyReLU(),
)
# use a mlp to get mean and variance
self.mlp = nn.Sequential(
nn.Linear(z_channels, z_channels),
nn.LeakyReLU(),
nn.Linear(z_channels, 2)
)
self.gaussian = Normal(loc=0., scale=1., )
def dequantize(self, x: torch.Tensor) -> torch.Tensor:
# reduce dimensionality
latents = self.latent(x)
# get latent sizes, only C dimension is different from input
b, c, h, w = latents.shape
# a linear takes input of form B x D
# swap axes to perform computation for each spatial position
# B, C, H, W -> B * H * W, C
latents = latents.permute(0, 2, 3, 1)
latents = latents.reshape(b * h * w, self.z_channels)
# get mu and var
latents = self.mlp(latents)
mu, var = torch.chunk(latents, chunks=2, dim=1)
# sample gaussian noise and add variational parameters
noise_original = torch.randn(size=(b * h * w, self.in_channels))
noise = (noise_original + mu) * var
# rehsape to original shape
noise = noise.view(b, h, w, self.in_channels)
# swap axis to preserve spatial ordering
noise = noise.permute(0, 3, 1, 2)
return noise
def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
# return variational noise
variational = self.dequantize(x)
# add gaussian noise
dequantize = x + variational
return dequantize, variational
def run_tests():
module = TestConvDequantize(in_channels=512, z_channels=32)
y = torch.randn(16, 512, 32, 32)
dequantized, noise, noise_original, mu, var = module.forward(y)
x = dequantized[0, :, 0, 0]
n = noise[0, :, 0, 0]
assert y.shape == noise.shape, "Failed noise generation, shape mismatch"
assert torch.allclose((x - n), y[0, :, 0, 0]), "Failed operation check, original input is not equal to self + noise"
assert torch.allclose(noise_original[0, :], n / var[0] - mu[0]) \
, "Failed operation order check, variational features do not match " \
"their original statistics at spatial positions "
if __name__ == '__main__':
run_tests()
| 2.515625
| 3
|
gauss.py
|
pituca292/calculo-numerico-algoritmos
| 0
|
12781963
|
#!/usr/bin/env
# -*- coding: utf-8 -*-
# Módulo de Gauss:
# Métodos de calculo da solução de um sistema linear por eliminação de gauss
# Método para calculo do erro da solução de gauss em relação a solução real
import numpy as np
import construtor
import solve
# Calcula o vetor solução pelo método de Gauss.
# Entradas: matriz, vetor de termos independentes, número de pontos
# Retorno: vetor solução
def v_sol(m, v, n):
# Verifica e escalona a matriz
for j in range(n):
if m[j][j] == 0:
k = j
while True:
if 0 == m[k][j]:
k += 1
if k == n:
print("Matriz inválida")
break
else:
temp = m[k].copy()
m[k] = m[j].copy()
m[j] = temp.copy()
break
for i in range(j + 1, n):
mult = - m[i][j] / m[j][j]
for k in range(j, n):
m[i][k] += mult * m[j][k]
v[i] += mult * v[j]
# Resolve a matriz escalonada
x = [None] * n
for i in range(n-1, -1, -1):
x[i] = v[i]
for j in range(i + 1, n):
x[i] -= m[i][j] * x[j]
x[i] = x[i] / m[i][i]
return x
# Calcula o vetor solução, para a matriz de uma equação, pelo método de Gauss.
# Entradas: q(x), r(x), malha de pontos, passo, número de pontos, y(a), y(b)
# Retorno: vetor solução
def v_sol_mh(q, r, x, h, n, a_, b_):
# Calcula a matriz e o vetor de termos independentes
m_h = construtor.matriz(q, x, h, n)
v_h = construtor.vetor(r, x, h, n, a_, b_)
# Calcula e retorna o vetor solução
return v_sol(m_h, v_h, n - 1)
# Calcula o vetor solução, para a matriz de uma equação e diversos valores de n, pelo método de Gauss.
# Compara os valores solução do método de Gauss com a solução real.
# Plota o gráfico do erro máximo para cada valor de n.
# Entradas: y(x), q(x), r(x), extremo inicial (a), extremo final (b), y(a), y(b)
# Retorno: vetor com o erro máximo para cada valor de n.
def erro_n(y, q, r, a, b, a_, b_, n, n_step):
# Erro entre valores obtidos pelo método de Gauss e a solução conhecida
e = []
# Erro máximo da cada iteração
e_max = []
for ni in range(5, n, n_step):
# Calcula o passo adequado ao intervalo
h = (b - a) / ni
# Cria a malha de pontos
x = []
for i in range(1, ni):
x.append(a + i * h)
# Calcula o vetor solução real
v_sol = solve.v_sol(y, x)
# Calcula o vetor solução pelo método de Gauss
v_gauss = v_sol_mh(q, r, x, h, ni, a_, b_)
# Compara as soluções
dif = [abs(i) for i in (np.array(v_sol) - np.array(v_gauss)).tolist()]
e.append(dif)
e_max.append(np.max(dif))
return e_max
# ----------------teste----------------
if __name__ == "__main__":
b = [[1, 2, 3], [4, 5, 8], [7, 8, 5]]
c = [10, 11, 12]
print(v_sol(b, c, 3))
| 3.5625
| 4
|
backend/course/views.py
|
ducluongtran9121/Web-Application-Project
| 1
|
12781964
|
<reponame>ducluongtran9121/Web-Application-Project
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.parsers import FormParser, MultiPartParser, JSONParser
from rest_framework.views import APIView
from rest_framework import mixins
from rest_framework import viewsets
from rest_framework.decorators import action
from django.shortcuts import get_object_or_404
from .serializers import *
from .models import *
from resource.models import File
from resource.serializers import FileSerializer
from account.serializers import MemberSerializer
from django.core.validators import validate_email
from django.core.exceptions import ValidationError
import json
# Create your views here.
class CourseApiStructure(APIView):
def get(self, request):
return Response({
"courses/":
{
"get": "return course list of member",
"post": "create course for member"
},
"courses/pk/":
{
"get": "returns the details of course",
"put": "update course",
"delete": "delete course",
},
"courses/pk/listMember/": "list course member",
"courses/pk/addMemberWithEmail/": "add(PUT) a member to course using email",
"courses/pk/removeMemberWithEmail/": "remove(PUT) a member to course using email",
"courses/course_pk/lesson/":
{
"get": "return lesson list",
"post": "create lesson"
},
"courses/course_pk/lesson/pk/":
{
"get": "returns the details of lesson",
"put": "update lesson",
"delete": "delete lesson",
},
"courses/course_pk/lesson/lesson_pk/files/":
{
"get": "return file list",
"post": "create file"
},
"courses/course_pk/lesson/lesson_pk/files/pk/":
{
"get": "returns the details of file",
"put": "update file",
"delete": "delete file",
}}
)
class CourseViewSet(viewsets.ViewSet, viewsets.GenericViewSet):
serializer_class = CourseSerializer
queryset = Course.objects.all()
permission_classes = (IsAuthenticated,)
def list(self, request):
member_pk = request.user.id
queryset = Course.objects.filter(course_member=member_pk)
serializer = CourseSerializer(queryset, many=True)
return Response(serializer.data)
def retrieve(self, request, pk=None):
member_pk = request.user.id
queryset = Course.objects.filter(pk=pk, course_member=member_pk)
if queryset.exists():
serializer = CourseSerializer(queryset[0])
return Response(serializer.data)
return Response({'errors': 'Objects not found'}, status=404)
def create(self, request, pk=None):
if not request.user.is_lecturer:
return Response({'error': 'You are not a lecturer'}, status=403)
member_pk = request.user.id
queryset = Member.objects.filter(pk=member_pk)
if queryset.exists():
serializer = CourseSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
instance = serializer.save()
instance.created_by = queryset[0]
instance.course_lecturer.add(queryset[0])
instance.save()
return Response(serializer.data, status=201)
return Response({'errors': 'Bad request'}, status=400)
def update(self, request, pk=None):
if not request.user.is_lecturer:
return Response({'error': 'You are not a lecturer'}, status=403)
member_pk = request.user.id
queryset = Course.objects.filter(pk=pk, course_member=member_pk)
if queryset.exists():
instance = queryset[0]
serializer = CourseSerializer(instance=instance, data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(serializer.data)
return Response({'errors': 'Bad request'}, status=400)
def destroy(self, request, pk=None):
if not request.user.is_lecturer:
return Response({'error': 'You are not a lecturer'}, status=403)
member_pk = request.user.id
queryset = Course.objects.filter(pk=pk, course_member=member_pk)
if queryset.exists():
instance = queryset[0]
instance.detele()
return Response(status=204)
return Response({'errors': 'Bad request'}, status=400)
@action(detail=True, methods=['get'])
def listMember(self, request, pk=None):
member_pk = request.user.id
queryset = Course.objects.filter(pk=pk, course_member=member_pk)
if queryset.exists():
serializer = MemberSerializer(queryset[0].course_member, many=True)
return Response(serializer.data)
class AddMemberWithEmail(APIView):
serializer_class = EmailSerializer
#queryset = Member.objects.filter(is_superuser=False)
permission_classes = (IsAuthenticated,)
def put(self, request, pk=None):
if not request.user.is_lecturer:
return Response({'error': 'You are not a lecturer'}, status=403)
member_pk = request.user.id
courseQueryset = Course.objects.filter(pk=pk, course_member=member_pk)
if courseQueryset.exists():
serializer = EmailSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
memberQuery = Member.objects.filter(
email=serializer.data['email'])
if not memberQuery.exists():
return Response({'errors': 'Bad request'}, status=400)
if memberQuery[0].is_superuser == True:
return Response({'errors': 'You don\'t have permission to add this user'}, status=403)
instance = courseQueryset[0]
instance.course_member.add(memberQuery[0])
if memberQuery[0].is_lecturer == True:
instance.course_lecturer.add(memberQuery[0])
return Response(serializer.data, status=200)
return Response({'errors': 'Bad request'}, status=400)
class RemoveMemberWithEmail(APIView):
serializer_class = EmailSerializer
#queryset = Member.objects.filter(is_superuser=False)
permission_classes = (IsAuthenticated,)
def put(self, request, pk=None):
if not request.user.is_lecturer:
return Response({'error': 'You are not a lecturer'}, status=403)
member_pk = request.user.id
courseQueryset = Course.objects.filter(pk=pk, course_member=member_pk)
if courseQueryset.exists():
serializer = EmailSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
memberQuery = courseQueryset.filter(
course_member__email=serializer.data['email'])
if not memberQuery.exists():
return Response({'errors': 'Member not found'}, status=400)
instance = courseQueryset[0]
member = Member.objects.get(email=serializer.data['email'])
instance.course_member.remove(member)
if member.is_lecturer == True:
instance.course_lecturer.remove(member)
return Response(serializer.data, status=200)
return Response({'errors': 'Bad request'}, status=400)
class LessonViewSet(viewsets.ViewSet, viewsets.GenericViewSet):
serializer_class = LessonSerializer
queryset = Lesson.objects.all()
permission_classes = (IsAuthenticated,)
def list(self, request, course_pk=None):
member_pk = request.user.id
queryset = Lesson.objects.filter(
course__course_member=member_pk, course=course_pk)
serializer = LessonSerializer(queryset, many=True)
return Response(serializer.data)
def retrieve(self, request, course_pk=None, pk=None):
member_pk = request.user.id
queryset = Lesson.objects.filter(
pk=pk, course=course_pk, course__course_member=member_pk)
if queryset.exists():
serializer = LessonSerializer(queryset[0])
return Response(serializer.data)
return Response({'errors': 'Objects not found'}, status=404)
def create(self, request, course_pk=None):
if not request.user.is_lecturer:
return Response({'error': 'You are not a lecturer'}, status=403)
member_pk = request.user.id
queryset = Course.objects.filter(pk=course_pk, course_member=member_pk)
if queryset.exists():
serializer = LessonSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
instance = serializer.save()
instance.course = queryset[0]
instance.save()
return Response(serializer.data, status=201)
return Response({'errors': 'Bad request'}, status=400)
def update(self, request, course_pk=None, pk=None):
if not request.user.is_lecturer:
return Response({'error': 'You are not a lecturer'}, status=403)
member_pk = request.user.id
queryset = Lesson.objects.filter(
pk=pk, course=course_pk, course__course_member=member_pk)
if queryset.exists():
instance = queryset[0]
serializer = LessonSerializer(instance=instance, data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(serializer.data)
return Response({'errors': 'Bad request'}, status=400)
def destroy(self, request, course_pk=None, pk=None):
if not request.user.is_lecturer:
return Response({'error': 'You are not a lecturer'}, status=403)
member_pk = request.user.id
queryset = Lesson.objects.filter(
pk=pk, course=course_pk, course__course_member=member_pk)
if queryset.exists():
instance = queryset[0]
instance.delete()
return Response(status=204)
return Response({'errors': 'Bad request'}, status=400)
class LessonFileViewSet(viewsets.ViewSet, viewsets.GenericViewSet):
serializer_class = FileSerializer
queryset = File.objects.all()
parser_classes = (FormParser, MultiPartParser, JSONParser,)
permission_classes = (IsAuthenticated,)
def list(self, request, course_pk=None, lesson_pk=None):
member_pk = request.user.id
queryset = File.objects.filter(
lesson=lesson_pk, lesson__course=course_pk, lesson__course__course_member=member_pk)
serializer = FileSerializer(queryset, many=True)
return Response(serializer.data)
def retrieve(self, request, pk=None, course_pk=None, lesson_pk=None):
member_pk = request.user.id
queryset = File.objects.filter(
pk=pk, lesson=lesson_pk, lesson__course=course_pk, lesson__course__course_member=member_pk)
if queryset.exists():
serializer = FileSerializer(queryset[0])
return Response(serializer.data)
return Response({'errors': 'Objects not found'}, status=404)
def create(self, request, course_pk=None, lesson_pk=None):
if not request.user.is_lecturer:
return Response({'error': 'You are not a lecturer'}, status=403)
member_pk = request.user.id
queryset = Lesson.objects.filter(
pk=lesson_pk, course=course_pk, course__course_member=member_pk)
if queryset.exists():
serializer = FileSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
instance = serializer.save()
instance.lesson = queryset[0]
instance.save()
return Response(serializer.data, status=201)
return Response(serializers.errors, status=400)
def update(self, request, lesson_pk=None, course_pk=None, pk=None):
if not request.user.is_lecturer:
return Response({'error': 'You are not a lecturer'}, status=403)
member_pk = request.user.id
queryset = File.objects.filter(
pk=pk, lesson=lesson_pk, lesson__course=course_pk, lesson__course__course_member=member_pk)
if queryset.exists():
instance = queryset[0]
serializer = FileSerializer(instance=instance, data=request.data)
serializer.is_valid(raise_exception=True)
instance.file_upload.delete()
serializer.save()
return Response(serializer.data)
return Response({'errors': 'Bad request'}, status=400)
def destroy(self, request, lesson_pk=None, course_pk=None, pk=None):
if not request.user.is_lecturer:
return Response({'error': 'You are not a lecturer'}, status=403)
member_pk = request.user.id
queryset = File.objects.filter(
pk=pk, lesson=lesson_pk, lesson__course=course_pk, lesson__course__course_member=member_pk)
if queryset.exists():
instance = queryset[0]
instance.delete()
return Response(status=204)
return Response({'errors': 'Bad request'}, status=400)
| 2.09375
| 2
|
pydefect/input_maker/defect.py
|
KazMorita/pydefect
| 20
|
12781965
|
# -*- coding: utf-8 -*-
# Copyright (c) 2020. Distributed under the terms of the MIT License.
import re
from dataclasses import dataclass
from typing import List, Optional
from monty.json import MSONable
@dataclass(frozen=True)
class Defect(MSONable):
name: str
charges: tuple
@property
def str_list(self):
return ["_".join([self.name, str(charge)]) for charge in self.charges]
@property
def charge_list(self):
return [charge for charge in self.charges]
class SimpleDefect(Defect):
def __init__(self, in_atom, out_atom, charge_list):
if in_atom is None:
in_atom = "Va"
super().__init__("_".join([in_atom, out_atom]), tuple(charge_list))
@property
def in_atom(self):
result = self.name.split("_")[0]
if result == "Va":
return
return result
@property
def out_atom(self):
return self.name.split("_")[1]
def screen_simple_defect(defect: SimpleDefect, keywords: List[str]
) -> Optional[SimpleDefect]:
charges = []
for charge in defect.charges:
full_name = "_".join([defect.name, str(charge)])
if any([re.search(keyword, full_name) for keyword in keywords]):
charges.append(charge)
if charges:
return SimpleDefect(defect.in_atom, defect.out_atom, tuple(charges))
else:
return
| 2.890625
| 3
|
pages/managers.py
|
GrAndSE/django-pages
| 0
|
12781966
|
'''Managers for pages classes, can be used to easies access for models
'''
from django.db import models
class ActiveQuerySet(models.query.QuerySet):
'''QuerySet has additional methods to siplify access to active items
'''
def active(self):
'''Get only active items
'''
return self.filter(is_active=True)
def inactive(self):
'''Get only inactive items
'''
return self.filter(is_active=False)
def make_active(self):
'''Mark items as active
'''
return self.update(is_active=True)
def mark_inactive(self):
'''Mark items as inactive
'''
return self.update(is_active=False)
class ActiveManager(models.Manager):
'''Manager that creates ActiveQuerySet
'''
def get_query_set(self):
'''Create an ActiveQuerySet
'''
return ActiveQuerySet(self.model, using=self._db)
def active(self):
'''Get only active items
'''
return self.get_query_set().active()
def inactive(self):
'''Get only inactive items
'''
return self.get_query_set().inactive()
class LayoutManager(ActiveManager):
'''Manager for Layout model. Contains method to sipmlify access to default
record
'''
def get_default(self):
'''Get default
'''
return self.get(is_default=True)
| 2.59375
| 3
|
Products/PloneGetPaid/browser/upgrade.py
|
collective/Products.PloneGetPaid
| 2
|
12781967
|
<filename>Products/PloneGetPaid/browser/upgrade.py<gh_stars>1-10
from Products.Five.browser import BrowserView
from Products.PloneGetPaid import generations
class AdminUpgrade( BrowserView ):
log = None
def __call__( self ):
if self.request.form.has_key('upgrade'):
self.log = self._upgrade()
return super( AdminUpgrade, self ).__call__()
def _upgrade( self ):
""" upgrade the application
"""
return generations.upgrade(self.context)
def softwareVersion( self ):
return generations.getAppSoftwareVersion()
def databaseVersion( self ):
return generations.getAppVersion( self.context )
def listUpgrades( self ):
return generations.getUpgrades( self.context )
| 2.3125
| 2
|
dictionary.py
|
rgoshen/Python-Dictionary
| 1
|
12781968
|
"""
This program allows the user to enter in a word, proper
noun or acronym to search in data file and returns definition(s).
Created by: <NAME>
Date: 10-25-2020
I followed a tutorial from a course named "The Python Mega Course:
Build 10 Real World Applications" on stackskills.
"""
import json
from difflib import get_close_matches
data = json.load(open("data.json"))
def look_up(word):
"""
This function looks up a word, proper noun, or acronym in data file
returns definition(s).
"""
word = word.lower()
if word in data:
return data[word]
elif word.title() in data: # ensures proper nouns can be found in data.
return data[word.title()]
elif word.upper() in data: # ensures acronyms can be found in data.
return data[word.upper()]
elif len(get_close_matches(word, data.keys())) > 0:
user_choice = input(f"Did you mean {get_close_matches(word, data.keys())[0]} instead? Enter 'Y' for yes of 'N' for no: ")
user_choice = user_choice.upper()
if user_choice == "Y":
return data[get_close_matches(word, data.keys())[0]]
elif user_choice == "N":
return f"{word} cannot be found!"
else:
return "I am sorry. I did not understand your entry."
else:
return f"{word} cannot be found!"
word = input('Enter in word: ')
output = look_up(word)
if type(output) == list:
for item in output:
print(item)
else:
print(output)
| 4.1875
| 4
|
bpy_utilities/material_loader/shaders/source1_shaders/refract.py
|
tltneon/SourceIO
| 199
|
12781969
|
<reponame>tltneon/SourceIO<filename>bpy_utilities/material_loader/shaders/source1_shaders/refract.py
import numpy as np
from typing import Iterable
from ...shader_base import Nodes
from ..source1_shader_base import Source1ShaderBase
class Refract(Source1ShaderBase):
SHADER: str = 'refract'
@property
def bumpmap(self):
texture_path = self._vavle_material.get_param('$normalmap', None)
if texture_path is not None:
image = self.load_texture_or_default(texture_path, (0.5, 0.5, 1.0, 1.0))
image = self.convert_normalmap(image)
image.colorspace_settings.is_data = True
image.colorspace_settings.name = 'Non-Color'
return image
return None
@property
def basetexture(self):
texture_path = self._vavle_material.get_param('$basetexture', None)
if texture_path is not None:
return self.load_texture_or_default(texture_path, (0.3, 0, 0.3, 1.0))
return None
@property
def color2(self):
color_value, value_type = self._vavle_material.get_vector('$color2', [1, 1, 1])
divider = 255 if value_type is int else 1
color_value = list(map(lambda a: a / divider, color_value))
if len(color_value) == 1:
color_value = [color_value[0], color_value[0], color_value[0]]
elif len(color_value) > 3:
color_value = color_value[:3]
return color_value
@property
def bluramount(self):
value = self._vavle_material.get_float('$bluramount', 0)
return value
@property
def color(self):
color_value, value_type = self._vavle_material.get_vector('$color', [1, 1, 1])
divider = 255 if value_type is int else 1
color_value = list(map(lambda a: a / divider, color_value))
if len(color_value) == 1:
color_value = [color_value[0], color_value[0], color_value[0]]
elif len(color_value) > 3:
color_value = color_value[:3]
return color_value
@property
def refracttint(self):
color_value, value_type = self._vavle_material.get_vector('$refracttint', [1, 1, 1])
divider = 255 if value_type is int else 1
color_value = list(map(lambda a: a / divider, color_value))
if len(color_value) == 1:
color_value = [color_value[0], color_value[0], color_value[0]]
return color_value
def create_nodes(self, material_name):
if super().create_nodes(material_name) in ['UNKNOWN', 'LOADED']:
return
self.bpy_material.blend_method = 'OPAQUE'
self.bpy_material.shadow_method = 'NONE'
self.bpy_material.use_screen_refraction = True
self.bpy_material.use_backface_culling = True
material_output = self.create_node(Nodes.ShaderNodeOutputMaterial)
shader = self.create_node(Nodes.ShaderNodeBsdfPrincipled, self.SHADER)
self.connect_nodes(shader.outputs['BSDF'], material_output.inputs['Surface'])
basetexture = self.basetexture
if basetexture:
self.create_and_connect_texture_node(basetexture, shader.inputs['Base Color'], name='$basetexture')
bumpmap = self.bumpmap
if bumpmap:
normalmap_node = self.create_node(Nodes.ShaderNodeNormalMap)
self.create_and_connect_texture_node(bumpmap, normalmap_node.inputs['Color'], name='$bumpmap')
self.connect_nodes(normalmap_node.outputs['Normal'], shader.inputs['Normal'])
shader.inputs['Transmission'].default_value = 1.0
shader.inputs['Roughness'].default_value = self.bluramount
| 2.34375
| 2
|
tests/features/steps/simple_class_test.py
|
Lreus/python-behave
| 0
|
12781970
|
<gh_stars>0
from sample.classes.simple_class import SimpleClass
from sample.classes.subclasses.simple_subclass import SimpleSubClass
from urllib.error import URLError, HTTPError
from http.client import HTTPResponse
from requests import Response
from behave import *
def class_mapping():
"""
:return:
:rtype: dict
"""
return {
'SimpleClass': SimpleClass,
'SimpleSubClass': SimpleSubClass,
}
@Given('I have a simple class')
def i_have_a_simple_class(context):
context.simpleClass = SimpleClass()
@When('I say hello')
def i_say_hello(context):
context.response = context.simpleClass.hello()
@Then('the class should wave')
def class_should_wave(context):
assert context.response == 'wave'
@Given('I have a simple subclass')
def i_have_a_simple_subclass(context):
context.Class = {'subclass': SimpleSubClass()}
@Then('the subclass shall inherit from "{class_name}"')
def the_subclass_shall_inherit_from(context, class_name):
try:
mapped_class = class_mapping()[class_name]
except KeyError:
print('available class to test are', [k for k in class_mapping()])
raise AssertionError
assert issubclass(
type(context.Class['subclass']),
mapped_class
)
@Given('I call the url "{url}"')
def i_call_the_url(context, url):
context.response = SimpleClass.go_to_url(url, "GET")
@Then('the response status code should be {code:d}')
def status_code_is(context, code):
assert isinstance(context.response, (HTTPResponse, HTTPError)), \
"Failed asserting %r is a HTTPResponse or an HTTPError" % context.response.__class__
assert context.response.code == code, \
"Failed asserting {} equals {} ".format(code, context.response.code)
@Given('i call the {url} with method {method}')
def i_call_the_url_with_method(context, url, method):
context.response = SimpleClass.go_to_request(url, method)
@Then('the request response status code should be {code:d}')
def response_status_code_is(context, code):
assert isinstance(context.response, Response), \
"Failed asserting {repr} is a {response_class}".format(
response_class='requests.models.Response',
repr=repr(context.response),
)
assert context.response.status_code == code, \
"Failed asserting {} equals {} ".format(code, context.response.status_code)
| 3
| 3
|
elements-of-programming-interviews/5-arrays/5.7-buy-and-sell-stock-twice/buy_and_sell_stock_twice.py
|
washimimizuku/python-data-structures-and-algorithms
| 0
|
12781971
|
'''
Write a program that computes the maximum profit that
can be made by buying and selling a share at most twice.
The second buy must be made on another date after the
first sale.
'''
def buy_and_sell_stock_twice(prices): # Time: O(n) | Space: O(n)
min_price_so_far = float('inf') # Infinite value
max_total_profit = 0.0
first_buy_sell_profits = [0] * len(prices)
# Forward phase. For each day, we record maximum
# profit if we sell on that day.
for i, price in enumerate(prices):
min_price_so_far = min(min_price_so_far, price)
max_total_profit = max(max_total_profit, price - min_price_so_far)
first_buy_sell_profits[i] = max_total_profit
# Backward phase. For each day, find the maximum
# profit if we nake the second buy on that day.
max_price_so_far = float('-inf')
for i, price in reversed(list(enumerate(prices[1:], 1))):
max_price_so_far = max(max_price_so_far, price)
max_total_profit = max(
max_total_profit,
max_price_so_far - price + first_buy_sell_profits[i - 1])
return max_total_profit
assert(buy_and_sell_stock_twice([12, 11, 13, 9, 12, 8, 14, 13, 15]) == 10)
assert(buy_and_sell_stock_twice(
[310, 315, 275, 295, 260, 270, 290, 230, 255, 250]) == 55)
| 3.890625
| 4
|
MalwareViz.py
|
MalwareViz/MalwareViz
| 8
|
12781972
|
#!/usr/bin/env python
# coding: utf8
"""
Download a webpage -> parse Name, IP, URL, Dropped Files.
Create Graphviz gv file -> convert to svg -> post to web.
------------ ------------
| URL | ----> | IP |
/ ------------ ------------
/
------------ -----------
|malware.exe |-| VirusTotal |
------------ -----------
\
\ ----------------
|droppedFiles.exe |
----------------
Samples:
https://malwr.com/analysis/OGNjMDBhMDg0YjAzNGQzYTkyZGZlZDhlNDc2NmMyNzY/
https://malwr.com/analysis/YzZiNmMzZGQ0NGNlNDM1ZWJlMmMxZGEyOTY1NWRkODU/
"""
'''
MIT License.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
__author__ = '<NAME>'
__license__ = 'MIT '
__version__ = '0.1'
__email__ = '<EMAIL>'
import re, subprocess, tempfile, sys, os, urllib, urllib2, webbrowser
#3rd Party
try:
import requests, simplejson
from BeautifulSoup import BeautifulSoup
except:
print "Check 3rd party libraries are installed. \n\tpip install requests\n\tsudo pip install simplejson\n\tpip install graphviz\n\tsudo pip install BeautifulSoup\n"
varURL = raw_input("What malwr.com url would you like to visualize?: \nhttps://malwr.com/analysis/OGNjMDBhMDg0YjAzNGQzYTkyZGZlZDhlNDc2NmMyNzY/\n")
VT_KEY = raw_input("Enter free VirusTotal API key: [ENTER = skip]\n")
dot_file = raw_input('Enter path of dot.exe (www.graphviz.org)\n [ENTER = Default] "C:\\Program Files (x86)\\Graphviz2.38\\bin\\dot.exe"\n [linux = /usr/bin/dot] if "cannot find the file" error try: sudo pip install graphviz\n>>>') \
or r"C:\Program Files (x86)\Graphviz2.36\bin\dot.exe"
#dot_file = r"C:\Program Files (x86)\Graphviz2.36\bin\dot.exe"
def checkURL():
#Make sure it's malwr.com
if varURL:
if varURL[:27] == 'https://malwr.com/analysis/':
return varURL
else:
error = 'URL "%s" does not look like this https://malwr.com/analysis/<hash>. Please try again.' % varURL
return error
sys.exit('')
#Configure proxies if you go through a proxy server to the Internet.
#proxies = {"http": "http://<IP of proxy>:80","https": "http://<IP of proxy>:80",}
proxies = {"": "","": "",}
try:
print "Getting data from malwr.com..."
r = requests.get(varURL, verify=False, proxies=proxies)
except Exception,e:
print 'Check URL: "%s". \nCheck proxy settings: %s"' % (varURL, proxies)
sys.exit('')
results = []
try:
soup = BeautifulSoup(r.text)
for tag in soup.findAll(True,{'id':True}) :
results.append(tag['id'])
except Exception,e:
x = str(e)+str(varURL)
print ('Failed at BeautifulSoup'+x)
sys.exit('')
try:
file_data = soup.find("section", {"id": "file"})
hosts = soup.find("section", {"id": "hosts"})
domains = soup.find("section", {"id": "domains"})
VirusTotalAlert = soup.find("section", {"id": "static_antivirus"})
dropped = soup.find("", {"id": "dropped"})
Tags = soup.find("", {"class": "alert alert-info"})
except Exception,e:
x = str(e)+str(varURL)
print ('Failed at sections'+x)
#tag
try:
tag_file_data = file_data.findAll('td')
tag_hosts = hosts.findAll('td')
tag_domains = domains.findAll('td')
tag_VT = VirusTotalAlert('td')
tag_dropped_details = dropped.findAll('td')
tag_dropped = dropped.findAll('b')
Tags = Tags.findAll('a')
except:
print ('Failed at td ')
def __Tags():
listTags = []
for i in Tags[0:1]:
listTags.append(str(i)[21:-4])
if len(listTags) == 0:
listTags = 0 #listTags = ['unknown']
return listTags
def __File_Name():
list_File = []
for i in range(len(tag_file_data))[0:1]:
list_File.append(str(tag_file_data[i].text))
return list_File
def __Details():
list_Details = []
for i in range(len(tag_file_data))[1:6]:
list_Details.append(str(tag_file_data[i].text))
del list_Details[1]
return list_Details
#Example: ['size', 'md5', 'sha1', 'sha256']
def __VTquery():
AVNameFinal=''
positives = ''
APT = 'False'
Zeus = 'False'
Adware = 'False'
AutoIt = 'False'
try:
url = "https://www.virustotal.com/vtapi/v2/file/report"
parameters = {"resource": __Details()[1], "apikey": VT_KEY}
data = urllib.urlencode(parameters)
req = urllib2.Request(url, data)
response = urllib2.urlopen(req)
json = response.read()
response_dict = simplejson.loads(json)
positives = response_dict.get("positives", {})
listVirusNames = []
for i in response_dict.get("scans", {}):
listVirusNames.append(i)
listAV_Names = []
for i in listVirusNames:
listAV_Names.append(response_dict.get("scans", {}).get(i, {}).get("result"))
AVNameFinal=[]
Taglist0 = ['Havex', 'havex']
Taglist = ['zeus', 'Zeus', 'zbot','Zbot', 'Kryptik',\
'FAREIT', 'Tepfer', 'ZPACK'\
,'FakeAV']
Taglist2 = ['Adware']
Taglist3 = ['Autoit', 'AutoIt']
for i in listAV_Names:
if i != None:
for m in Taglist0:
if m in i:
APT = "True"
for n in Taglist:
if n in i:
Zeus = "True"
for o in Taglist2:
if o in i:
Adware = 'True'
for p in Taglist3:
if p in i:
AutoIt = 'True'
if i == 'Trj/CI.A': #Non descriptive enough word
AVNameFinal.append([len(i) + 5,i]) #Add 5 so it won't be shortest.
else:
AVNameFinal.append([len(i),i])
except:
pass
if APT == "True":
MinName = "APT"
elif Zeus == "True":
MinName = "Zeus"
elif AutoIt == "True":
MinName = "AutoIt"
elif Adware == "True":
MinName = "Adware"
elif AVNameFinal:
MinName = str(min(AVNameFinal)[1])
else:
MinName = 0
return positives, MinName
def __VT():
#Return Number of AV alerts.
list_VT = []
for i in range(len(tag_VT)):
list_VT.append(str(tag_VT[i].text))
list_VT = zip(list_VT[::2],list_VT[1::2])
list_VT_count = []
for i in list_VT:
if i[1] != "Clean":
list_VT_count.append(i)
return len(list_VT_count)
#Example: 5
def __Hosts():
list_Hosts = []
for i in tag_hosts:
if str(i)[4:-5] not in list_Hosts:
list_Hosts.append(str(i)[4:-5])
gv_Hosts_nodes = ['subgraph cluster1 {label="Internet Traffic" color="red" fontname=Helvetica;']
for i in list_Hosts:
#To have IP addresses link to Robtex instead of VirusTotal.
#gv_Hosts_nodes.append('"'+str(i)+'" [fontsize = "10", fontname=Helvetica, fillcolor="lightblue" style="filled, rounded", fontcolor="darkblue", URL="https://www.robtex.com/ip/'+str(i)+'.html#map"];')
gv_Hosts_nodes.append('"'+str(i)+'" [fontsize = "10", fontname=Helvetica, fillcolor="lightblue" style="filled, rounded", fontcolor="darkblue", URL="https://www.virustotal.com/en/ip-address/'+str(i)+'/information/"];')
gv_Hosts_edges = []
for i in list_Hosts:
gv_Hosts_edges.append('point1->"'+str(i)+'";')
if gv_Hosts_edges:
global last_point
last_point = gv_Hosts_edges[8:-1]
return gv_Hosts_nodes, gv_Hosts_edges
def __Domains():
list_Domains = []
for i in tag_domains:
list_Domains.append(str(i)[4:-5])
if list_Domains:
global last_point
last_point = list_Domains[-1]
gv_list_DomainsNodes = []
for i in list_Domains:
is_valid = re.match("^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$", i)
if is_valid:
gv_list_DomainsNodes.append('"'+i+'" [fontsize = "10", fontname=Helvetica, fillcolor="lightblue" style="filled, rounded", fontcolor="darkblue", URL="https://www.virustotal.com/en/ip-address/'+str(i)+'/information/"];')
else:
gv_list_DomainsNodes.append('"'+i+'" [fontsize = "10", fontname=Helvetica, fillcolor="lightblue" style="filled, rounded", fontcolor="darkblue", URL="https://www.virustotal.com/en/domain/'+str(i)+'/information/"];')
gv_list_DomainsNodes.append('}')
gv_list_DomainsEdges = []
list_Domains = zip(list_Domains[::2],list_Domains[1::2])
for i in list_Domains:
gv_list_DomainsEdges.append('point1->"'+i[0]+'"->"'+i[1]+'";')
return gv_list_DomainsNodes, gv_list_DomainsEdges
def __VTquery2(MD5hash):
positives = ''
try:
url = "https://www.virustotal.com/vtapi/v2/file/report"
parameters = {"resource": MD5hash, "apikey": VT_KEY}
data = urllib.urlencode(parameters)
req = urllib2.Request(url, data)
response = urllib2.urlopen(req)
json = response.read()
response_dict = simplejson.loads(json)
#global positives
positives = response_dict.get("positives", {})
except:
print ("VirusTotal API key not used. No VT scan on dropped files. __VTquery2.")
if isinstance(positives, int):
if positives >= 1:
positives = 'True' #VT = 1+
elif positives == 0:
positives = 'False' #VT = 0
elif not positives:
positives = 'empty' #VT = never seen it.
else:
positives = 'False' #If in doubt it's 0.
return positives
#if positives == 0:
#if positives == 0: No VT hits.
#if positives == {}: File not found.
def __Dropped():
list_Dropped = []
for i in tag_dropped:
if str(i)[3:-4] not in list_Dropped:
list_Dropped.append(str(i)[3:-4])
list_matchMD5 = re.findall(r"<td>([a-fA-F\d]{32})\n</td>", str(tag_dropped_details))
gv_exe_dll_zip_sys = []
gv_dropped_NotExeFiles = []
#list_Executables = [".exe", ".EXE", ".bin", ".dll", ".zip", ".sys", ".jar", ".scr", ".pif", ".rar", ".ocx", ".msi", ".apk", ".cpl", ".vbs", ".pip"]
for i, n in zip(list_Dropped, list_matchMD5):
if ".exe" in i[-4:]:
VT_results = __VTquery2(n)
if VT_results == 'True':
gv_exe_dll_zip_sys.append('"'+str(i)+'" [fontsize = "10", fontname=Helvetica, color="red", penwidth=1, fillcolor="orange", style="filled", fontcolor="crimson", shape=box, URL="https://www.virustotal.com/latest-scan/'+n+'"];')
elif VT_results == 'empty':
gv_exe_dll_zip_sys.append('"'+str(i)+'" [fontsize = "10", fontname=times, color="black", penwidth=1, fillcolor="orange", style="filled", fontcolor="crimson", shape=box, URL="https://www.virustotal.com/latest-scan/'+n+'"];')
else:
gv_exe_dll_zip_sys.append('"'+str(i)+'" [fontsize = "10", fontname=Courier, color="black", penwidth=1, fillcolor="orange", style="filled", fontcolor="crimson", shape=box, URL="https://www.virustotal.com/latest-scan/'+n+'"];')
elif i[-4:] == ".EXE":
VT_results = __VTquery2(n)
if VT_results == 'True':
gv_exe_dll_zip_sys.append('"'+str(i)+'" [fontsize = "10", fontname=Helvetica, color="red", penwidth=1, fillcolor="orange", style="filled", fontcolor="crimson", shape=box, URL="https://www.virustotal.com/latest-scan/'+n+'"];')
elif VT_results == 'empty':
gv_exe_dll_zip_sys.append('"'+str(i)+'" [fontsize = "10", fontname=times, color="black", penwidth=1, fillcolor="orange", style="filled", fontcolor="crimson", shape=box, URL="https://www.virustotal.com/latest-scan/'+n+'"];')
else:
gv_exe_dll_zip_sys.append('"'+str(i)+'" [fontsize = "10", fontname=Courier, color="black", penwidth=1, fillcolor="orange", style="filled", fontcolor="crimson", shape=box, URL="https://www.virustotal.com/latest-scan/'+n+'"];')
elif i[-4:] == ".bin":
VT_results = __VTquery2(n)
if VT_results == 'True':
gv_exe_dll_zip_sys.append('"'+str(i)+'" [fontsize = "10", fontname=Helvetica, color="red", penwidth=1, fillcolor="orange", style="filled", fontcolor="crimson", shape=box, URL="https://www.virustotal.com/latest-scan/'+n+'"];')
elif VT_results == 'empty':
gv_exe_dll_zip_sys.append('"'+str(i)+'" [fontsize = "10", fontname=times, color="black", penwidth=1, fillcolor="orange", style="filled", fontcolor="crimson", shape=box, URL="https://www.virustotal.com/latest-scan/'+n+'"];')
else:
gv_exe_dll_zip_sys.append('"'+str(i)+'" [fontsize = "10", fontname=Courier, color="black", penwidth=1, fillcolor="orange", style="filled", fontcolor="crimson", shape=box, URL="https://www.virustotal.com/latest-scan/'+n+'"];')
elif i[-4:] == ".dll":
VT_results = __VTquery2(n)
if VT_results == 'True':
gv_exe_dll_zip_sys.append('"'+str(i)+'" [fontsize = "10", fontname=Helvetica, color="red", penwidth=1, fillcolor="orange", style="filled", fontcolor="crimson", shape=box, URL="https://www.virustotal.com/latest-scan/'+n+'"];')
elif VT_results == 'empty':
gv_exe_dll_zip_sys.append('"'+str(i)+'" [fontsize = "10", fontname=times, color="black", penwidth=1, fillcolor="orange", style="filled", fontcolor="crimson", shape=box, URL="https://www.virustotal.com/latest-scan/'+n+'"];')
else:
gv_exe_dll_zip_sys.append('"'+str(i)+'" [fontsize = "10", fontname=Courier, color="black", penwidth=1, fillcolor="orange", style="filled", fontcolor="crimson", shape=box, URL="https://www.virustotal.com/latest-scan/'+n+'"];')
elif i[-4:] == ".zip":
VT_results = __VTquery2(n)
if VT_results == 'True':
gv_exe_dll_zip_sys.append('"'+str(i)+'" [fontsize = "10", fontname=Helvetica, color="red", penwidth=1, fillcolor="orange", style="filled", fontcolor="crimson", shape=box, URL="https://www.virustotal.com/latest-scan/'+n+'"];')
elif VT_results == 'empty':
gv_exe_dll_zip_sys.append('"'+str(i)+'" [fontsize = "10", fontname=times, color="black", penwidth=1, fillcolor="orange", style="filled", fontcolor="crimson", shape=box, URL="https://www.virustotal.com/latest-scan/'+n+'"];')
else:
gv_exe_dll_zip_sys.append('"'+str(i)+'" [fontsize = "10", fontname=Courier, color="black", penwidth=1, fillcolor="orange", style="filled", fontcolor="crimson", shape=box, URL="https://www.virustotal.com/latest-scan/'+n+'"];')
elif i[-4:] == ".sys":
VT_results = __VTquery2(n)
if VT_results == 'True':
gv_exe_dll_zip_sys.append('"'+str(i)+'" [fontsize = "10", fontname=Helvetica, color="red", penwidth=1, fillcolor="orange", style="filled", fontcolor="crimson", shape=box, URL="https://www.virustotal.com/latest-scan/'+n+'"];')
elif VT_results == 'empty':
gv_exe_dll_zip_sys.append('"'+str(i)+'" [fontsize = "10", fontname=times, color="black", penwidth=1, fillcolor="orange", style="filled", fontcolor="crimson", shape=box, URL="https://www.virustotal.com/latest-scan/'+n+'"];')
else:
gv_exe_dll_zip_sys.append('"'+str(i)+'" [fontsize = "10", fontname=Courier, color="black", penwidth=1, fillcolor="orange", style="filled", fontcolor="crimson", shape=box, URL="https://www.virustotal.com/latest-scan/'+n+'"];')
elif i[-4:] == ".jar":
VT_results = __VTquery2(n)
if VT_results == 'True':
gv_exe_dll_zip_sys.append('"'+str(i)+'" [fontsize = "10", fontname=Helvetica, color="red", penwidth=1, fillcolor="orange", style="filled", fontcolor="crimson", shape=box, URL="https://www.virustotal.com/latest-scan/'+n+'"];')
elif VT_results == 'empty':
gv_exe_dll_zip_sys.append('"'+str(i)+'" [fontsize = "10", fontname=times, color="black", penwidth=1, fillcolor="orange", style="filled", fontcolor="crimson", shape=box, URL="https://www.virustotal.com/latest-scan/'+n+'"];')
else:
gv_exe_dll_zip_sys.append('"'+str(i)+'" [fontsize = "10", fontname=Courier, color="black", penwidth=1, fillcolor="orange", style="filled", fontcolor="crimson", shape=box, URL="https://www.virustotal.com/latest-scan/'+n+'"];')
elif i[-4:] == ".scr":
VT_results = __VTquery2(n)
if VT_results == 'True':
gv_exe_dll_zip_sys.append('"'+str(i)+'" [fontsize = "10", fontname=Helvetica, color="red", penwidth=1, fillcolor="orange", style="filled", fontcolor="crimson", shape=box, URL="https://www.virustotal.com/latest-scan/'+n+'"];')
elif VT_results == 'empty':
gv_exe_dll_zip_sys.append('"'+str(i)+'" [fontsize = "10", fontname=times, color="black", penwidth=1, fillcolor="orange", style="filled", fontcolor="crimson", shape=box, URL="https://www.virustotal.com/latest-scan/'+n+'"];')
else:
gv_exe_dll_zip_sys.append('"'+str(i)+'" [fontsize = "10", fontname=Courier, color="black", penwidth=1, fillcolor="orange", style="filled", fontcolor="crimson", shape=box, URL="https://www.virustotal.com/latest-scan/'+n+'"];')
elif i[-4:] == ".pif":
VT_results = __VTquery2(n)
if VT_results == 'True':
gv_exe_dll_zip_sys.append('"'+str(i)+'" [fontsize = "10", fontname=Helvetica, color="red", penwidth=1, fillcolor="orange", style="filled", fontcolor="crimson", shape=box, URL="https://www.virustotal.com/latest-scan/'+n+'"];')
elif VT_results == 'empty':
gv_exe_dll_zip_sys.append('"'+str(i)+'" [fontsize = "10", fontname=times, color="black", penwidth=1, fillcolor="orange", style="filled", fontcolor="crimson", shape=box, URL="https://www.virustotal.com/latest-scan/'+n+'"];')
else:
gv_exe_dll_zip_sys.append('"'+str(i)+'" [fontsize = "10", fontname=Courier, color="black", penwidth=1, fillcolor="orange", style="filled", fontcolor="crimson", shape=box, URL="https://www.virustotal.com/latest-scan/'+n+'"];')
elif i[-4:] == ".rar":
VT_results = __VTquery2(n)
if VT_results == 'True':
gv_exe_dll_zip_sys.append('"'+str(i)+'" [fontsize = "10", fontname=Helvetica, color="red", penwidth=1, fillcolor="orange", style="filled", fontcolor="crimson", shape=box, URL="https://www.virustotal.com/latest-scan/'+n+'"];')
elif VT_results == 'empty':
gv_exe_dll_zip_sys.append('"'+str(i)+'" [fontsize = "10", fontname=times, color="black", penwidth=1, fillcolor="orange", style="filled", fontcolor="crimson", shape=box, URL="https://www.virustotal.com/latest-scan/'+n+'"];')
else:
gv_exe_dll_zip_sys.append('"'+str(i)+'" [fontsize = "10", fontname=Courier, color="black", penwidth=1, fillcolor="orange", style="filled", fontcolor="crimson", shape=box, URL="https://www.virustotal.com/latest-scan/'+n+'"];')
elif i[-4:] == ".ocx":
VT_results = __VTquery2(n)
if VT_results == 'True':
gv_exe_dll_zip_sys.append('"'+str(i)+'" [fontsize = "10", fontname=Helvetica, color="red", penwidth=1, fillcolor="orange", style="filled", fontcolor="crimson", shape=box, URL="https://www.virustotal.com/latest-scan/'+n+'"];')
elif VT_results == 'empty':
gv_exe_dll_zip_sys.append('"'+str(i)+'" [fontsize = "10", fontname=times, color="black", penwidth=1, fillcolor="orange", style="filled", fontcolor="crimson", shape=box, URL="https://www.virustotal.com/latest-scan/'+n+'"];')
else:
gv_exe_dll_zip_sys.append('"'+str(i)+'" [fontsize = "10", fontname=Courier, color="black", penwidth=1, fillcolor="orange", style="filled", fontcolor="crimson", shape=box, URL="https://www.virustotal.com/latest-scan/'+n+'"];')
elif i[-4:] == ".msi":
VT_results = __VTquery2(n)
if VT_results == 'True':
gv_exe_dll_zip_sys.append('"'+str(i)+'" [fontsize = "10", fontname=Helvetica, color="red", penwidth=1, fillcolor="orange", style="filled", fontcolor="crimson", shape=box, URL="https://www.virustotal.com/latest-scan/'+n+'"];')
elif VT_results == 'empty':
gv_exe_dll_zip_sys.append('"'+str(i)+'" [fontsize = "10", fontname=times, color="black", penwidth=1, fillcolor="orange", style="filled", fontcolor="crimson", shape=box, URL="https://www.virustotal.com/latest-scan/'+n+'"];')
else:
gv_exe_dll_zip_sys.append('"'+str(i)+'" [fontsize = "10", fontname=Courier, color="black", penwidth=1, fillcolor="orange", style="filled", fontcolor="crimson", shape=box, URL="https://www.virustotal.com/latest-scan/'+n+'"];')
elif i[-4:] == ".apk":
VT_results = __VTquery2(n)
if VT_results == 'True':
gv_exe_dll_zip_sys.append('"'+str(i)+'" [fontsize = "10", fontname=Helvetica, color="red", penwidth=1, fillcolor="orange", style="filled", fontcolor="crimson", shape=box, URL="https://www.virustotal.com/latest-scan/'+n+'"];')
elif VT_results == 'empty':
gv_exe_dll_zip_sys.append('"'+str(i)+'" [fontsize = "10", fontname=times, color="black", penwidth=1, fillcolor="orange", style="filled", fontcolor="crimson", shape=box, URL="https://www.virustotal.com/latest-scan/'+n+'"];')
else:
gv_exe_dll_zip_sys.append('"'+str(i)+'" [fontsize = "10", fontname=Courier, color="black", penwidth=1, fillcolor="orange", style="filled", fontcolor="crimson", shape=box, URL="https://www.virustotal.com/latest-scan/'+n+'"];')
elif i[-4:] == ".cpl":
VT_results = __VTquery2(n)
if VT_results == 'True':
gv_exe_dll_zip_sys.append('"'+str(i)+'" [fontsize = "10", fontname=Helvetica, color="red", penwidth=1, fillcolor="orange", style="filled", fontcolor="crimson", shape=box, URL="https://www.virustotal.com/latest-scan/'+n+'"];')
elif VT_results == 'empty':
gv_exe_dll_zip_sys.append('"'+str(i)+'" [fontsize = "10", fontname=times, color="black", penwidth=1, fillcolor="orange", style="filled", fontcolor="crimson", shape=box, URL="https://www.virustotal.com/latest-scan/'+n+'"];')
else:
gv_exe_dll_zip_sys.append('"'+str(i)+'" [fontsize = "10", fontname=Courier, color="black", penwidth=1, fillcolor="orange", style="filled", fontcolor="crimson", shape=box, URL="https://www.virustotal.com/latest-scan/'+n+'"];')
elif i[-4:] == ".vbs":
VT_results = __VTquery2(n)
if VT_results == 'True':
gv_exe_dll_zip_sys.append('"'+str(i)+'" [fontsize = "10", fontname=Helvetica, color="red", penwidth=1, fillcolor="orange", style="filled", fontcolor="crimson", shape=box, URL="https://www.virustotal.com/latest-scan/'+n+'"];')
elif VT_results == 'empty':
gv_exe_dll_zip_sys.append('"'+str(i)+'" [fontsize = "10", fontname=times, color="black", penwidth=1, fillcolor="orange", style="filled", fontcolor="crimson", shape=box, URL="https://www.virustotal.com/latest-scan/'+n+'"];')
else:
gv_exe_dll_zip_sys.append('"'+str(i)+'" [fontsize = "10", fontname=Courier, color="black", penwidth=1, fillcolor="orange", style="filled", fontcolor="crimson", shape=box, URL="https://www.virustotal.com/latest-scan/'+n+'"];')
elif i[-4:] == ".pip":
VT_results = __VTquery2(n)
if VT_results == 'True':
gv_exe_dll_zip_sys.append('"'+str(i)+'" [fontsize = "10", fontname=Helvetica, color="red", penwidth=1, fillcolor="orange", style="filled", fontcolor="crimson", shape=box, URL="https://www.virustotal.com/latest-scan/'+n+'"];')
elif VT_results == 'empty':
gv_exe_dll_zip_sys.append('"'+str(i)+'" [fontsize = "10", fontname=times, color="black", penwidth=1, fillcolor="orange", style="filled", fontcolor="crimson", shape=box, URL="https://www.virustotal.com/latest-scan/'+n+'"];')
else:
gv_exe_dll_zip_sys.append('"'+str(i)+'" [fontsize = "10", fontname=Courier, color="black", penwidth=1, fillcolor="orange", style="filled", fontcolor="crimson", shape=box, URL="https://www.virustotal.com/latest-scan/'+n+'"];')
elif i[-5:] == '.html':
pass
elif i[-4:-3] != '.':
VT_results = __VTquery2(n)
if VT_results == 'True':
gv_exe_dll_zip_sys.append('"'+str(i)+'" [fontsize = "10", fontname=Helvetica, color="red", penwidth=1, fillcolor="orange", style="filled", fontcolor="crimson", shape=box, URL="https://www.virustotal.com/latest-scan/'+n+'"];')
elif VT_results == 'empty':
gv_exe_dll_zip_sys.append('"'+str(i)+'" [fontsize = "10", fontname=times, color="black", penwidth=1, fillcolor="orange", style="filled", fontcolor="crimson", shape=box, URL="https://www.virustotal.com/latest-scan/'+n+'"];')
else:
gv_exe_dll_zip_sys.append('"'+str(i)+'" [fontsize = "10", fontname=Courier, color="black", penwidth=1, fillcolor="orange", style="filled", fontcolor="crimson", shape=box, URL="https://www.virustotal.com/latest-scan/'+n+'"];')
else:
gv_dropped_NotExeFiles.append(str(i))
gv_dropped_last_node = ''
if gv_dropped_NotExeFiles:
gv_dropped_last_node = 'NotExeFiles [label = "%s", shape=record, fontsize = "10", fontname=Helvetica, fillcolor="orange" style="filled", fontcolor="crimson", URL="http://en.wikipedia.org/wiki/List_of_filename_extensions"];' % ("|".join(gv_dropped_NotExeFiles))
#Create nodes inside subgraph cluster2.
#Create edges outside subgraph cluster2
gv_create_subgraph = []
gv_Dropped_edges = []
if gv_exe_dll_zip_sys:
gv_create_subgraph.append('subgraph cluster2 {label="Created Files" color="darkorange" fontname=Helvetica;')
for i in gv_exe_dll_zip_sys:
gv_create_subgraph.append(str(i))
gv_Dropped_edges.append('point2->'+str(i)+'')
gv_create_subgraph.append(gv_dropped_last_node)
gv_create_subgraph.append('}\n')
# if no EXE files, but others.
if gv_dropped_NotExeFiles:
if not gv_exe_dll_zip_sys:
global subgraph2a
subgraph2a = 'subgraph cluster2 {label="Created Files" color="darkorange" fontname=Helvetica;'+gv_dropped_last_node+'}'
if gv_dropped_NotExeFiles:
gv_Dropped_edges.append('point2->NotExeFiles;')
return gv_create_subgraph, gv_Dropped_edges
def __Build_gv():
gv_head = 'digraph "MalwareViz_%s Malware Visualizer" {rankdir = "LR"; ranksep=".08 equally";' % fileMD5 #<title>
gv_node = 'node [fontsize = "10", fontname=Helvetica];' #, shape=box, style=rounded
gv_Start_color = "limegreen"
if VTvalue != 0:
gv_Start_color = "crimson"
if HostDomaincount > 1:
gv_Start_color = "crimson"
gv_Start = 'Start [label = "%s", shape="doublecircle", width=1.5, height=1.5, fillcolor="", color="%s", fontcolor="black", URL="%s"];' % (__File_Name()[0], gv_Start_color, varURL) # style="bold",
VT_color = "limegreen"
if VTvalue != 0:
VT_color = "red"
gv_VT = 'VirusTotal [label = "VirusTotal Alerts=%s", shape=box, style="filled, rounded", fillcolor="%s" fontcolor="black", URL="https://www.virustotal.com/latest-scan/%s"];' % (VTvalue, VT_color, fileMD5)
#gv_rank = 'rank = same {Start, VirusTotal};'
gv_edges = 'Start->VirusTotal [dir="none", penwidth=7];'
#gv_Lang = 'SUBLANG_NEUTRAL'
#Build File
f=open(static_dir+gvfilename, 'w')
gv_total = gv_head, gv_node, gv_Start, gv_VT, gv_edges
for i in gv_total:
print >> f, i
#Add empty points if hosts, domains, dropped is > 0
#print >> f, 'node [label="", width=.1618, height=.1618, shape="none"];'
points_count = []
if len(tag_hosts) or len(tag_domains) > 0:
points_count.append('point1 [label="", width=.01, height=.01, shape=circle, style="filled", fillcolor=black];')
#points_count.append('point2 [label="", width=.01, height=.01, shape=circle, style="filled"];') #Creates space
points_count.append('VirusTotal->point1 [dir="none"];')
#points_count.append('rank = same {VirusTotal, point1, point2};')
if "[<b>Sorry!</b>]" not in str(tag_dropped):
points_count.append('point2 [label="", width=.01, height=.01, shape=circle, style="filled", fillcolor=black];')
points_count.append('VirusTotal->point2 [dir="none"];')
for i in points_count:
print >> f, i
#turn node shapes back on
#print >> f, 'node [fontsize = "10", fontname=Helvetica, shape=box, style=rounded];'
#subgraph1 Hosts, Domains
for i in __Hosts()[0]:
print >> f, i
for i in __Domains()[0]:
print >> f, i
#edges
for i in __Hosts()[1]:
print >> f, i
for i in __Domains()[1]:
print >> f, i
#subgraph2 Created_Files
for i in __Dropped()[0]:
print >> f, i
try:
if subgraph2a:
print >> f, subgraph2a
except:
pass
for i in __Dropped()[1]:
print >> f, i
#Close bracket
print >> f, "}"
f.close()
return
def __addHyperLink(svgFile):
with open(svgFile, 'rb') as editWidth:
widthFile = editWidth.read()
match = re.sub('<a xlink:href="', '<a xlink:show="new" xlink:href="', widthFile, count=0) #Count is num of occurance. 0=all
with open(svgFile, 'wb') as editWidth:
widthFile = editWidth.write(match)
def __dot(gv_file):
svgFile = tempdir+fileMD5+'.svg'
if os.path.isfile(r"C:\Program Files (x86)\Graphviz2.36\bin\dot.exe"):
subprocess.call([dot_file, "-Tsvg", "-o", "%s" % svgFile, gv_file])
elif os.path.isfile("/usr/bin/dot"):
subprocess.call(["/usr/bin/dot", "-Tsvg", "-o", "%s" % svgFile, gv_file])
else:
print "Cannot find dot.exe, install http://www.graphviz.org/"
sys.exit('')
__addHyperLink(svgFile)
webbrowser.open(svgFile, new=2)
r = 0 #Clean
if __name__ == '__main__':
print "running...\n"
checkURL()
fileMD5 = __Details()[1]
tempdir = tempfile.gettempdir() + os.sep
static_dir = tempdir
gvfilename = fileMD5+'.gv'
fullPathgvfile = tempdir+gvfilename
VTvalue = __VTquery()[0] or __VT() or 0 #If VirusTotal from malwr or VirusTotal has a hire number use that.
if __Tags():
VTminName = __Tags()[0] #malwr.com Tag Name or Shortest Name in VT.
elif __VTquery()[1]:
VTminName = __VTquery()[1]
else:
VTminName = 'unknown'
HostDomaincount = len(tag_hosts) + (len(tag_domains)/2) #If VirusTotal is 0 but there are over 1 C2 then make red.
__Build_gv()
__dot(fullPathgvfile)
print "Malware Name: " + VTminName
| 2.09375
| 2
|
test_cvfill.py
|
xbcReal/ssd_fcn_multitask_text_detection_pytorch1.0
| 2
|
12781973
|
<filename>test_cvfill.py
import cv2
import numpy as np
image = np.ones((720,1280, 3), np.uint8) * 255
triangle = np.array([[1131,122,1161,125,1165,107,1119,123]])
triangle = np.reshape(triangle,(-1,4,2))
print(np.shape(triangle))
cv2.circle(image, (triangle[0][0][0], triangle[0][0][1]), 5, (0, 255, 0), 5)
cv2.circle(image, (triangle[0][1][0], triangle[0][1][1]), 5, (0, 0, 0), 5)
cv2.circle(image, (triangle[0][2][0], triangle[0][2][1]), 5, (255, 0, 0), 5)
cv2.circle(image, (triangle[0][3][0], triangle[0][3][1]), 5, (0, 0, 255), 5)
# triangle=np.int32(triangle)
# # print(np.shape(triangle))
# # print(triangle)
# img=cv2.fillPoly(img, triangle, (255, 255, 255))
#
cv2.imshow('img',image)
cv2.waitKey()
cv2.imwrite('black.jpg',image)
| 2.984375
| 3
|
text_classification/UCAS_NLP_TC/data_11_baidu_cws_api_fill.py
|
q759729997/qyt_clue
| 1
|
12781974
|
"""
百度词法分析API,补全未识别出的:
pip install baidu-aip
"""
import time
import os
import sys
import codecs
import json
import traceback
from tqdm import tqdm
from aip import AipNlp
sys.path.insert(0, './') # 定义搜索路径的优先顺序,序号从0开始,表示最大优先级
from data import baidu_config # noqa
""" 你的 APPID AK SK """
APP_ID = baidu_config.APP_ID # '你的 App ID'
API_KEY = baidu_config.API_KEY # '你的 Api Key'
SECRET_KEY = baidu_config.SECRET_KEY # '你的 Secret Key'
client = AipNlp(APP_ID, API_KEY, SECRET_KEY)
# text = "百度是一家高科技公司"
""" 调用词法分析 """
# print(client.lexer(text))
import myClue # noqa
print('myClue module path :{}'.format(myClue.__file__)) # 输出测试模块文件位置
from myClue.core import logger # noqa
from myClue.tools.file import read_file_texts # noqa
from myClue.tools.file import init_file_path # noqa
def get_baidu_cws(text):
for i in range(20):
try:
text = text.encode('gbk', errors='ignore').decode('gbk', errors='ignore') # 去掉GBK不识别的字符串,该接口接收GBK格式
cws_result = client.lexer(text)
if 'items' in cws_result:
return cws_result['items']
else:
continue
except Exception as e:
time.sleep(0.5)
print('text:{}, i:{}, exception:{}'.format(text, i, e))
traceback.print_exc()
return []
if __name__ == "__main__":
train_file_config = {
'dev': './data/UCAS_NLP_TC/data_baidu_cws/dev_cws.json',
'test': './data/UCAS_NLP_TC/data_baidu_cws/test_cws.json',
'train': './data/UCAS_NLP_TC/data_baidu_cws/train_cws.json',
}
for file_label, file_name in train_file_config.items():
logger.info('开始处理:{}'.format(file_label))
texts = read_file_texts(file_name)
with codecs.open(file_name, mode='w', encoding='utf8') as fw:
for text in tqdm(texts):
row_json = json.loads(text)
if len(row_json['cws_items']) == 0:
news_content = row_json['news_content']
if len(news_content) > 10000:
cws_items = get_baidu_cws(news_content[:10000])
time.sleep(0.3)
cws_items.extend(get_baidu_cws(news_content[10000:]))
else:
cws_items = get_baidu_cws(news_content)
time.sleep(0.3)
row_json['cws_items'] = cws_items
fw.write('{}\n'.format(json.dumps(row_json, ensure_ascii=False)))
time.sleep(0.3)
else:
fw.write('{}\n'.format(text))
| 2.359375
| 2
|
example/change_date.py
|
MadSkittles/Switch-Fightstick
| 0
|
12781975
|
from NXController import Controller
ctr = Controller()
for i in range(30):
ctr.A()
if i == 0:
ctr.RIGHT()
ctr.RIGHT()
else:
ctr.LEFT()
ctr.LEFT()
ctr.LEFT()
ctr.UP()
ctr.RIGHT(0.4)
ctr.A()
ctr.close()
| 2.609375
| 3
|
auth.py
|
computerbox124/Hey-Hajiyevs
| 0
|
12781976
|
from support import *
import chat
def main():
#Creating Login Page
global val, w, root,top,username,name
root = tk.Tk()
username = tk.StringVar()
name = tk.StringVar()
#root.attributes('-fullscreen',True)
top = Toplevel1 (root)
init(root, top)
root.mainloop()
def authentication():
#Logining and receiving token
global username_info,name_info,token,username,name
username_info=username.get()
name_info=name.get()
print("Username:",username_info)
print("Name:",name_info)
try:
response = requests.post(
'http://127.0.0.1:5000/send',
json={'name': name_info, 'username': username_info, 'text': '', 'status': 'login'}
)
except:
messagebox.showinfo("Error!","No connection with server!")
return
data = response.json()
if data['status'] == 'Ok':
chat.main(name_info,username_info)
else:
messagebox.showinfo("Error!","That username was used! Input your username again!")
return
#messagebox.showinfo("Wrong", "You entered wrong credentials!")
#print(r1.text)
class Toplevel1:
def __init__(self, top=None):
#This class contains information about our Window
_bgcolor = '#d9d9d9' # X11 color: 'gray85'
_fgcolor = '#000000' # X11 color: 'black'
_compcolor = '#d9d9d9' # X11 color: 'gray85'
_ana1color = '#d9d9d9' # X11 color: 'gray85'
_ana2color = '#ececec' # Closest X11 color: 'gray92'
top.geometry("{}x{}".format(size_x, size_y))
top.minsize(1, 1)
top.maxsize(size_x,size_y)
top.resizable(1, 1)
top.title("Login")
#Background image
self.img = tk.PhotoImage(file="images/bg.png")
self.my_canvas = tk.Canvas(top)
self.my_canvas.place(relx=0.0, rely=0.0,height=size_y,width=size_x)
self.my_canvas.create_image(0,0,image=self.img,anchor="nw")
#Entries
self.my_canvas.create_text(255,130,text="Username",font="-family {DejaVu Sans} -size 20")
self.Entry1 = tk.Entry(top,textvariable=username)
self.Entry1.place(relx=0.300, rely=0.420, height=23, relwidth=0.40)
self.Entry1.configure(background="white")
self.Entry1.configure(font="FixedFont")
self.my_canvas.create_text(255,220,text="<NAME>",font="-family {DejaVu Sans} -size 20")
self.Entry2 = tk.Entry(top,textvariable=name)
self.Entry2.place(relx=0.300, rely=0.650, height=23, relwidth=0.40)
self.Entry2.configure(background="white")
self.Entry2.configure(font="FixedFont")
#Login Button
self.butn1 = tk.Button(text='Login',command=authentication)
self.butn1.place(relx=0.440, rely=0.800,height=30,width=70)
| 2.96875
| 3
|
app/database.py
|
yaoelvon/flask-sqlalchemy-custom-query-demo
| 0
|
12781977
|
# -*- coding: utf-8 -*-
# @date 2016/06/03
# @author <EMAIL>
# @desc custom methods of the query class in Flask-SQLAlchemy
# @record
#
from flask import request
from flask_sqlalchemy import (
BaseQuery,
Model,
_BoundDeclarativeMeta,
SQLAlchemy as BaseSQLAlchemy,
_QueryProperty)
from sqlalchemy.ext.declarative import declarative_base
from flask_sqlalchemy._compat import iteritems, itervalues, xrange, \
string_types
class MyBaseQuery(BaseQuery):
# do stuff here
def all(self):
tenant_ctx = None if not request else request.environ.get('tenant_ctx')
if tenant_ctx is None or hasattr(tenant_ctx, 'db_filters')is False:
self = self
else:
for k, v in tenant_ctx.db_filters.items():
self = self.filter_by(**{k: v})
return list(self)
def first(self):
"""改写basequery的first方法. 增加过滤条件
"""
tenant_ctx = None if not request else request.environ.get('tenant_ctx')
if tenant_ctx is None or hasattr(tenant_ctx, 'db_filters')is False:
self = self
else:
for k, v in tenant_ctx.db_filters.items():
self = self.filter_by(**{k: v})
if self._statement is not None:
ret = list(self)[0:1]
else:
ret = list(self[0:1])
if len(ret) > 0:
return ret[0]
else:
return None
class MyModel(Model):
# in this case we're just using a custom BaseQuery class,
# but you can add other stuff as well
query_class = MyBaseQuery
def _my_declarative_constructor(self, tenant=None, **kwargs):
"""A simple constructor that allows initialization from kwargs.
Sets attributes on the constructed instance using the names and
values in ``kwargs``.
Only keys that are present as
attributes of the instance's class are allowed. These could be,
for example, any mapped columns or relationships.
"""
if tenant is not None:
setattr(self, "company_id", tenant.db_filters.get('company_id'))
cls_ = type(self)
for k in kwargs:
if not hasattr(cls_, k):
raise TypeError(
"%r is an invalid keyword argument for %s" %
(k, cls_.__name__))
setattr(self, k, kwargs[k])
class MySQLAlchemy(BaseSQLAlchemy):
def make_declarative_base(self, metadata=None):
# in this case we're just using a custom Model class,
# but you can change the DelcarativeMeta or other stuff as well
base = declarative_base(cls=MyModel,
name='Model',
metadata=metadata,
metaclass=_BoundDeclarativeMeta,
constructor=_my_declarative_constructor)
base.query = _QueryProperty(self)
return base
# Fixed Flask-Fixtures's Bug: https://github.com/croach/Flask-Fixtures/issues/22
db = MySQLAlchemy()
| 2.765625
| 3
|
datasets/sofc_materials_articles/sofc_materials_articles.py
|
NihalHarish/datasets
| 9
|
12781978
|
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TODO: Add a description here."""
from __future__ import absolute_import, division, print_function
import glob
import os
import pandas as pd
import datasets
_CITATION = """\
@misc{friedrich2020sofcexp,
title={The SOFC-Exp Corpus and Neural Approaches to Information Extraction in the Materials Science Domain},
author={<NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME>},
year={2020},
eprint={2006.03039},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
_DESCRIPTION = """\
The SOFC-Exp corpus consists of 45 open-access scholarly articles annotated by domain experts.
A corpus and an inter-annotator agreement study demonstrate the complexity of the suggested
named entity recognition and slot filling tasks as well as high annotation quality is presented
in the accompanying paper.
"""
_HOMEPAGE = "https://arxiv.org/abs/2006.03039"
_LICENSE = ""
_URL = "https://github.com/boschresearch/sofc-exp_textmining_resources/archive/master.zip"
class SOFCMaterialsArticles(datasets.GeneratorBasedBuilder):
""""""
VERSION = datasets.Version("1.1.0")
def _info(self):
features = datasets.Features(
{
"text": datasets.Value("string"),
"sentence_offsets": datasets.features.Sequence(
{"begin_char_offset": datasets.Value("int64"), "end_char_offset": datasets.Value("int64")}
),
"sentences": datasets.features.Sequence(datasets.Value("string")),
"sentence_labels": datasets.features.Sequence(datasets.Value("int64")),
"token_offsets": datasets.features.Sequence(
{
"offsets": datasets.features.Sequence(
{"begin_char_offset": datasets.Value("int64"), "end_char_offset": datasets.Value("int64")}
)
}
),
"tokens": datasets.features.Sequence(datasets.features.Sequence(datasets.Value("string"))),
"entity_labels": datasets.features.Sequence(
datasets.features.Sequence(
datasets.features.ClassLabel(
names=[
"B-DEVICE",
"B-EXPERIMENT",
"B-MATERIAL",
"B-VALUE",
"I-DEVICE",
"I-EXPERIMENT",
"I-MATERIAL",
"I-VALUE",
"O",
]
)
)
),
"slot_labels": datasets.features.Sequence(
datasets.features.Sequence(
datasets.features.ClassLabel(
names=[
"B-anode_material",
"B-cathode_material",
"B-conductivity",
"B-current_density",
"B-degradation_rate",
"B-device",
"B-electrolyte_material",
"B-experiment_evoking_word",
"B-fuel_used",
"B-interlayer_material",
"B-interconnect_material",
"B-open_circuit_voltage",
"B-power_density",
"B-resistance",
"B-support_material",
"B-thickness",
"B-time_of_operation",
"B-voltage",
"B-working_temperature",
"I-anode_material",
"I-cathode_material",
"I-conductivity",
"I-current_density",
"I-degradation_rate",
"I-device",
"I-electrolyte_material",
"I-experiment_evoking_word",
"I-fuel_used",
"I-interlayer_material",
"I-interconnect_material",
"I-open_circuit_voltage",
"I-power_density",
"I-resistance",
"I-support_material",
"I-thickness",
"I-time_of_operation",
"I-voltage",
"I-working_temperature",
"O",
]
)
)
),
"links": datasets.Sequence(
{
"relation_label": datasets.features.ClassLabel(
names=["coreference", "experiment_variation", "same_experiment", "thickness"]
),
"start_span_id": datasets.Value("int64"),
"end_span_id": datasets.Value("int64"),
}
),
"slots": datasets.features.Sequence(
{
"frame_participant_label": datasets.features.ClassLabel(
names=[
"anode_material",
"cathode_material",
"current_density",
"degradation_rate",
"device",
"electrolyte_material",
"fuel_used",
"interlayer_material",
"open_circuit_voltage",
"power_density",
"resistance",
"support_material",
"time_of_operation",
"voltage",
"working_temperature",
]
),
"slot_id": datasets.Value("int64"),
}
),
"spans": datasets.features.Sequence(
{
"span_id": datasets.Value("int64"),
"entity_label": datasets.features.ClassLabel(names=["", "DEVICE", "MATERIAL", "VALUE"]),
"sentence_id": datasets.Value("int64"),
"experiment_mention_type": datasets.features.ClassLabel(
names=["", "current_exp", "future_work", "general_info", "previous_work"]
),
"begin_char_offset": datasets.Value("int64"),
"end_char_offset": datasets.Value("int64"),
}
),
"experiments": datasets.features.Sequence(
{
"experiment_id": datasets.Value("int64"),
"span_id": datasets.Value("int64"),
"slots": datasets.features.Sequence(
{
"frame_participant_label": datasets.features.ClassLabel(
names=[
"anode_material",
"cathode_material",
"current_density",
"degradation_rate",
"conductivity",
"device",
"electrolyte_material",
"fuel_used",
"interlayer_material",
"open_circuit_voltage",
"power_density",
"resistance",
"support_material",
"time_of_operation",
"voltage",
"working_temperature",
]
),
"slot_id": datasets.Value("int64"),
}
),
}
),
}
)
return datasets.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# This defines the different columns of the dataset and their types
features=features, # Here we define them above because they are different between the two configurations
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage=_HOMEPAGE,
# License for the dataset if available
license=_LICENSE,
# Citation for the dataset
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
my_urls = _URL
data_dir = dl_manager.download_and_extract(my_urls)
data_dir = os.path.join(data_dir, "sofc-exp_textmining_resources-master/sofc-exp-corpus")
metadata = pd.read_csv(os.path.join(data_dir, "SOFC-Exp-Metadata.csv"), sep="\t")
text_base_path = os.path.join(data_dir, "texts")
text_files_available = [
os.path.split(i.rstrip(".txt"))[-1] for i in glob.glob(os.path.join(text_base_path, "*.txt"))
]
metadata = metadata[metadata["name"].map(lambda x: x in text_files_available)]
names = {}
splits = ["train", "test", "dev"]
for split in splits:
names[split] = metadata[metadata["set"] == split]["name"].tolist()
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"names": names["train"],
"data_dir": data_dir,
"split": "train",
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={"names": names["test"], "data_dir": data_dir, "split": "test"},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"names": names["dev"],
"data_dir": data_dir,
"split": "validation",
},
),
]
def _generate_examples(self, names, data_dir, split):
""" Yields examples. """
# The dataset consists of the original article text as well as annotations
textfile_base_path = os.path.join(data_dir, "texts")
annotations_base_path = os.path.join(data_dir, "annotations")
# The annotations are mostly references to offsets in the source text
# with corresponding labels, so we'll refer to them as `meta`
sentence_meta_base_path = os.path.join(annotations_base_path, "sentences")
tokens_meta_base_path = os.path.join(annotations_base_path, "tokens")
ets_meta_base_path = os.path.join(annotations_base_path, "entity_types_and_slots")
frame_meta_base_path = os.path.join(annotations_base_path, "frames")
# Define the headers for the sentence and token and entity metadata
sentence_meta_header = ["sentence_id", "label", "begin_char_offset", "end_char_offset"]
tokens_meta_header = ["sentence_id", "token_id", "begin_char_offset", "end_char_offset"]
ets_meta_header = [
"sentence_id",
"token_id",
"begin_char_offset",
"end_char_offset",
"entity_label",
"slot_label",
]
# Start the processing loop
# For each text file, we'll load all of the
# associated annotation files
for id_, name in enumerate(sorted(names)):
# Load the main source text
textfile_path = os.path.join(textfile_base_path, name + ".txt")
text = open(textfile_path, encoding="utf-8").read()
# Load the sentence offsets file
sentence_meta_path = os.path.join(sentence_meta_base_path, name + ".csv")
sentence_meta = pd.read_csv(sentence_meta_path, sep="\t", names=sentence_meta_header)
# Load the tokens offsets file
tokens_meta_path = os.path.join(tokens_meta_base_path, name + ".csv")
tokens_meta = pd.read_csv(tokens_meta_path, sep="\t", names=tokens_meta_header)
# Load the entity offsets file
ets_meta_path = os.path.join(ets_meta_base_path, name + ".csv")
ets_meta = pd.read_csv(ets_meta_path, sep="\t", names=ets_meta_header)
# Create a list of lists indexed as [sentence][token] for the entity and slot labels
entity_labels = ets_meta.groupby("sentence_id").apply(lambda x: x["entity_label"].tolist()).to_list()
slot_labels = ets_meta.groupby("sentence_id").apply(lambda x: x["slot_label"].tolist()).to_list()
# Create a list of lists for the token offsets indexed as [sentence][token]
# Each element will contain a dict with beginning and ending character offsets
token_offsets = (
tokens_meta.groupby("sentence_id")[["begin_char_offset", "end_char_offset"]]
.apply(lambda x: x.to_dict(orient="records"))
.tolist()
)
# Load the frames metadata. The frames file contains the data for all of the annotations
# in a condensed format that varies throughout the file. More information on this format
# can be found: https://framenet.icsi.berkeley.edu/fndrupal/
frames_meta_path = os.path.join(frame_meta_base_path, name + ".csv")
frames_meta = open(frames_meta_path, encoding="utf-8").readlines()
# Parse the sentence offsets, producing a list of dicts with the
# starting and ending position of each sentence in the original text
sentence_offsets = (
sentence_meta[["begin_char_offset", "end_char_offset"]].apply(lambda x: x.to_dict(), axis=1).tolist()
)
# The sentence labels are a binary label that describes whether the sentence contains
# any annotations
sentence_labels = sentence_meta["label"].tolist()
# Materialiaze a list of strings of the actual sentences
sentences = [text[ost["begin_char_offset"] : ost["end_char_offset"]] for ost in sentence_offsets]
# Materialize a list of lists of the tokens in each sentence.
# Annotation labels are aligned with these tokens, so be careful with
# alignment if using your own tokenization scheme with the sentences above
tokens = [
[s[tto["begin_char_offset"] : tto["end_char_offset"]] for tto in to]
for s, to in zip(sentences, token_offsets)
]
# The frames file first contains spans annotations (in one format),
# then contains experiments annotations (in another format),
# then links annotations (in yet another format).
# Here we find the beginning of the experiments and links sections of the file
# Additionally, each experiment annotation in the experiment annotations begins with a
# line starting with the word EXPERIMENT (in one format)
# followed by the annotations for that experiment (in yet _another_ format)
# Here we get the start positions for each experiment _within_ the experiments
# section of the frames data
experiment_starts = [i for i, line in enumerate(frames_meta) if line.startswith("EXPERIMENT")]
experiment_start = min(experiment_starts)
link_start = min([i for i, line in enumerate(frames_meta) if line.startswith("LINK")])
# Pick out the spans section of the data for parsing
spans_raw = frames_meta[:experiment_start]
# Iterate through the spans data
spans = []
for span in spans_raw:
# Split out the elements in each tab-delimited line
_, span_id, entity_label_or_exp, sentence_id, begin_char_offset, end_char_offset = span.split("\t")
# The entity label for experiment spans have a sub-label,
# called the experiment mention type,
# which is sub-delimited by a ':'
# The code below standardizes the fields produced by
# each line to a common schema, some fields of which may
# be empty depending on the data available in the line
if entity_label_or_exp.startswith("EXPERIMENT"):
exp, experiment_mention_type = entity_label_or_exp.split(":")
entity_label = ""
else:
entity_label = entity_label_or_exp
exp = ""
experiment_mention_type = ""
s = {
"span_id": span_id,
"entity_label": entity_label,
"sentence_id": sentence_id,
"experiment_mention_type": experiment_mention_type,
"begin_char_offset": int(begin_char_offset),
"end_char_offset": int(end_char_offset),
}
spans.append(s)
# Pull out the links annotations for from the frames data
links_raw = [f.rstrip("\n") for f in frames_meta[link_start:]]
# Iterate through the links data, which is in a simple tab-delimited format
links = []
for link in links_raw:
_, relation_label, start_span_id, end_span_id = link.split("\t")
link_out = {
"relation_label": relation_label,
"start_span_id": int(start_span_id),
"end_span_id": int(end_span_id),
}
links.append(link_out)
# Iterate through the experiments data and parse each experiment
experiments = []
# Zip the experiment start offsets to get start/end position tuples
# for each experiment in the experiments data
for start, end in zip(experiment_starts[:-1], experiment_starts[1:]):
current_experiment = frames_meta[start:end]
# The first line of each experiment annotation contains the
# experiment id and the span id
_, experiment_id, span_id = current_experiment[0].rstrip("\n").split("\t")
exp = {"experiment_id": int(experiment_id), "span_id": int(span_id)}
# The remaining lines in the experiment annotations contain
# slot level information for each experiment.
slots = []
for e in current_experiment[1:]:
e = e.rstrip("\n")
_, frame_participant_label, slot_id = e.split("\t")
to_add = {"frame_participant_label": frame_participant_label, "slot_id": int(slot_id)}
slots.append(to_add)
exp["slots"] = slots
experiments.append(exp)
# Yield the final parsed example output
# NOTE: the `token_offsets` is converted to a list of
# dicts to accommodate processing to the arrow files
# in the `features` schema defined above
yield id_, {
"text": text,
"sentence_offsets": sentence_offsets,
"sentences": sentences,
"sentence_labels": sentence_labels,
"token_offsets": [{"offsets": to} for to in token_offsets],
"tokens": tokens,
"entity_labels": entity_labels,
"slot_labels": slot_labels,
"links": links,
"slots": slots,
"spans": spans,
"experiments": experiments,
}
| 1.601563
| 2
|
scheduler/migrations/0001_initial.py
|
FelixTheC/beak_emailer_list
| 0
|
12781979
|
<reponame>FelixTheC/beak_emailer_list<gh_stars>0
# Generated by Django 3.1.1 on 2021-04-15 20:05
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='ScheduleCommander',
fields=[
('id', models.UUIDField(default=uuid.uuid4, primary_key=True, serialize=False)),
('earliest_execution_date', models.DateTimeField()),
('module', models.CharField(max_length=255)),
('func', models.CharField(max_length=255)),
('args', models.CharField(max_length=255)),
('kwargs', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='ScheduleResult',
fields=[
('id', models.UUIDField(default=uuid.uuid4, primary_key=True, serialize=False)),
('created_at', models.DateTimeField(auto_now_add=True)),
('result', models.JSONField()),
],
),
]
| 1.796875
| 2
|
python_modules/libraries/dagster-papertrail/dagster_papertrail/loggers.py
|
rpatil524/dagster
| 4,606
|
12781980
|
<gh_stars>1000+
import logging
import socket
from dagster import Field, IntSource, StringSource, logger
class ContextFilter(logging.Filter):
hostname = socket.gethostname()
def filter(self, record):
record.hostname = ContextFilter.hostname
return True
@logger(
{
"log_level": Field(StringSource, is_required=False, default_value="INFO"),
"name": Field(StringSource, is_required=False, default_value="dagster_papertrail"),
"papertrail_address": Field(StringSource, description="Papertrail URL", is_required=True),
"papertrail_port": Field(IntSource, description="Papertrail port", is_required=True),
},
description="A JSON-formatted console logger",
)
def papertrail_logger(init_context):
"""Use this logger to configure your Dagster pipeline to log to Papertrail. You'll need an
active Papertrail account with URL and port.
Example:
.. code-block:: python
@job(logger_defs={
"console": colored_console_logger,
"papertrail": papertrail_logger,
})
def simple_job():
...
simple_job.execute_in_process(
run_config={
"loggers": {
"console": {
"config": {
"log_level": "INFO",
}
},
"papertrail": {
"config": {
"log_level": "INFO",
"name": "hello_pipeline",
"papertrail_address": "127.0.0.1",
"papertrail_port": 12345,
}
},
}
}
)
"""
level, name, papertrail_address, papertrail_port = (
init_context.logger_config.get(k)
for k in ("log_level", "name", "papertrail_address", "papertrail_port")
)
klass = logging.getLoggerClass()
logger_ = klass(name, level=level)
log_format = "%(asctime)s %(hostname)s " + name + ": %(message)s"
formatter = logging.Formatter(log_format, datefmt="%b %d %H:%M:%S")
handler = logging.handlers.SysLogHandler(address=(papertrail_address, papertrail_port))
handler.addFilter(ContextFilter())
handler.setFormatter(formatter)
logger_.addHandler(handler)
return logger_
| 2.5
| 2
|
riskGame/classes/agent/real_time_aStar.py
|
AmrHendy/RiskGame
| 4
|
12781981
|
from sys import maxsize
from riskGame.classes.evaluations.sigmoidEval import SigmoidEval
from riskGame.classes.agent.passive_agent import Passive
class RTAStar:
def __init__(self, evaluation_heuristic=SigmoidEval()):
self.__hash_table = {}
self.__evaluate = evaluation_heuristic
self.__passive_agent = Passive()
self.__memo = {}
def dfs(self, curr_state, distance_from_root, limit):
if limit == 0 or curr_state.get_winner():
# end of limit
return SigmoidEval().score(curr_state) + distance_from_root
if curr_state.__hash__() in self.__memo:
return self.__memo[curr_state.__hash__()]
my_turn_state = self.__passive_agent.play(curr_state)
child_states = my_turn_state.expand()
child_states = child_states[: min(5 * limit, len(child_states))]
min_cost = maxsize
for child in child_states:
if child.get_winner():
return distance_from_root
child_cost = self.dfs(child, distance_from_root + 1, limit - 1)
min_cost = min(min_cost, child_cost)
self.__memo[curr_state.__hash__()] = min_cost
return min_cost
def play(self, state):
# Plan phase
limit = 3
print("At the RTA*\n")
child_states = state.expand()
min_cost = maxsize
second_min_cost = -1
next_state = None
for child in child_states:
if child in self.__hash_table:
child_cost = self.__hash_table[child]
else:
child_cost = self.dfs(child, 1, limit - 1)
if child_cost < min_cost:
second_min_cost = min_cost
min_cost = child_cost
next_state = child
# Execute phase
self.__hash_table[state] = second_min_cost if second_min_cost != maxsize else min_cost
print('RTA* choose the best state to be: ')
next_state.print_state()
return next_state
| 2.71875
| 3
|
labml_db/serializer/pickle.py
|
labmlai/db
| 4
|
12781982
|
<reponame>labmlai/db<filename>labml_db/serializer/pickle.py
import pickle
from typing import Optional
from . import Serializer
from ..types import ModelDict
class PickleSerializer(Serializer):
file_extension = 'pkl'
is_bytes = True
def to_string(self, data: ModelDict) -> bytes:
return pickle.dumps(data)
def from_string(self, data: Optional[bytes]) -> Optional[ModelDict]:
if data is None:
return None
return pickle.loads(data)
| 2.65625
| 3
|
bead/tech/persistence.py
|
krisztianfekete/lib
| 1
|
12781983
|
<gh_stars>1-10
'''
Functions to persist python structures or load them.
'''
import io
import json
# json is used for serializing objects for persistence as it is
# - in the standard library from >=2.6 (including 3.*)
# - 2.7 version decodes strings as unicode, unlike e.g. the csv module
# - the interface is also becoming the standard one for serialization,
# e.g. the competing plistlib's interface was deprecated in 3.4 in
# favor of a new json-like one
ReadError = json.JSONDecodeError
JSON_SAVE_OPTIONS = dict(
indent=4,
sort_keys=True,
ensure_ascii=True,
)
def load(istream):
return json.load(istream)
def loads(string):
return json.loads(string)
def dumps(content):
return json.dumps(content, **JSON_SAVE_OPTIONS)
def dump(content, ostream):
json.dump(content, ostream, **JSON_SAVE_OPTIONS)
def zip_load(zipfile, path):
with zipfile.open(path) as f:
return load(io.TextIOWrapper(f, encoding='utf-8'))
def zip_dump(content, zipfile, path):
with zipfile.open(path, 'w') as f:
return dump(content, io.TextIOWrapper(f, encoding='utf-8'))
def file_load(path):
with open(path) as f:
return load(f)
def file_dump(content, path):
with open(path, 'w') as f:
dump(content, f)
| 3.5625
| 4
|
sacrerouge/datasets/duc_tac/tac2009/__init__.py
|
danieldeutsch/decomposed-rouge
| 81
|
12781984
|
from sacrerouge.datasets.duc_tac.tac2009.subcommand import TAC2009Subcommand
| 1.039063
| 1
|
quotes/luckyitem.py
|
sumaneko/discordpy-startup
| 0
|
12781985
|
from mylib.mymodule import get_quotes
from mymodule.ryonage_bot import RyonageBot
def get_lucky(bot, m):
pre = ""
suf = ""
name = m.author.name if m.author.nick is None else m.author.nick
#元気状態なら
if bot.dying_hp < bot.get_hp():
pre = f"{name}さんのラッキーアイテムは・・・・・・『"
quotes = [
[100 , "アナルバイブ"],
[100 , "湯呑"],
[100 , "ビニール傘"],
[100 , "ギロチン台"],
[100 , "ローター"],
[100 , "ペンタブ"],
[100 , "プロテイン"],
[100 , "疑似精子"],
[100 , "マヨネーズ"],
[100 , "鶏むね肉"],
[100 , "ゆで卵"],
[100 , "銀のスプーン"],
[100 , "生首"],
[100 , "包丁"],
[100 , "チェーンソー"],
[100 , "Steamの積みゲー"],
[100 , "プラスチックのコップ"],
[100 , "バナナ"],
[100 , "ゴールデンキウイ"],
[100 , "爪楊枝"],
[100 , "アナルパール"],
[100 , "エロフィギュア"],
[100 , "javascript"],
[100 , "Unity"],
[100 , "RPGツクール"],
[100 , "アクションゲームツクール"],
[100 , "カピバラ"],
[100 , "手袋"],
[100 , "掃除機"],
[100 , "ホウキ"],
[100 , "ツヴァイヘンダー"],
[100 , "日本刀"],
[100 , "ハルバード"],
[100 , "メッサー(グロスメッサー)"],
[100 , "プレートアーマー"],
[100 , "クロスボウ"],
[100 , "ロングボウ"],
[100 , "牛刀"],
[100 , "肩ロース肉"],
[100 , "エロ漫画"],
[100 , "アナルもののAV"],
[100 , "手鏡"],
[100 , "イラスト参考書"],
[100 , "猫のぬいぐるみ"],
[100 , "耳掛け型イヤホン"],
[100 , "ブックスタンド"],
[100 , "レモン"],
[100 , "トマト"],
[100 , "スピーカー"],
[100 , "ミネラルウォーター"],
[100 , "アジャスタブルダンベル"],
[100 , "ゲーミングマウス"],
[100 , "液タブ"],
[100 , "コピー用紙"],
[100 , "プリン"],
[100 , "ハイカカオチョコレート"],
[100 , "アーモンド"],
[100 , "彫刻刀"],
[100 , "ハサミ"],
[100 , "手首"],
[100 , "足首"],
[100 , "スカート"],
[100 , "コスプレグッズ"],
[100 , "ラブドール"],
[100 , "カチューシャ"],
[100 , "ヘアピン"],
[100 , "お寿司"],
[100 , "冷凍マグロ"],
[100 , "しいたけ"],
[100 , "折りたたみ椅子"],
[100 , "シャーペン"],
[100 , "ボールペン"],
[100 , "ピンセット"],
[100 , "浣腸用のシリンジ"],
[100 , "サバイバルナイフ"],
[100 , "遮光カーテン"],
[100 , "大福"],
[100 , "練乳"],
[100 , "キッチンカー"],
[100 , "脚立"],
[100 , "歯ブラシ"],
[100 , "デンタルフロス"],
[100 , "デッサン人形"],
[100 , "30cm定規"],
[100 , "接着剤"],
[100 , "USBメモリ"],
[100 , "電卓"],
[100 , "カレンダー"],
[100 , "コーヒー"],
[100 , "おっぱい"],
[100 , "おまんこ"],
[100 , "Suica"],
[100 , "C++"],
[100 , "薙刀"],
[100 , "段ボール箱"],
[100 , "ティッシュ"],
[100 , "片手鍋"],
[100 , "乳首に刺す名札"],
[100 , "片手斧"],
[100 , "ショートソード"],
[100 , "アーミングソード"],
[100 , "ロングソード"],
[100 , "アルテマウェポン"],
[100 , "ロトの剣"],
[100 , "チェインメイル"],
[100 , "三色ボールペン"],
[100 , "焼き鳥の缶詰"],
[100 , "乾パン"],
[100 , "駆逐艦"],
[100 , "石"],
[100 , "コンクリートブロック"],
[100 , "レンガ"],
[100 , "豆腐"],
[100 , "スライム"],
[100 , "ローション"],
[100 , "うさみみバンド"],
[100 , "バニースーツ"],
[100 , "バイアグラ"],
[100 , "媚薬"],
[100 , "ぷっちょのケース"],
[100 , "たけのこの里"],
[100 , "きのこの山"],
[100 , "チョコモナカジャンボ"],
[100 , "バトルドーム"],
[100 , "砥石"],
[100 , "リオレウス"],
[100 , "超大型巨人"],
[100 , "ミギー"],
[100 , "バキSAGA"],
[100 , "雀牌"],
[100 , "足の爪"],
[100 , "ジャポニカ学習帳"],
[100 , "DXライブラリ"],
[100 , "Godot"],
[100 , "ドラえもん(のぶ代ボイス)"],
[100 , "ポニーテール"],
[100 , "ボンデージ"],
[100 , "新しいPC"],
[100 , "5円玉"],
[100 , "1万円札"],
[100 , "サングラス"],
[100 , "ブルーライトカットメガネ"],
[100 , "チョコパフェ"],
[100 , "堅揚げポテト"],
[100 , "お団子"],
[100 , "A4ファイル"],
[100 , "野太刀"],
[100 , "エアコン"],
[100 , "バランスボール"],
[100 , "算数ドリル"],
[100 , "殺虫スプレー"],
[100 , "ベープマット"],
[100 , "虫取り網"],
[100 , "ロープ"],
[100 , "Tシャツ"],
[100 , "エッチな下着"],
[100 , "魚雷"],
[100 , "かつおぶし"],
[100 , "パンツ"],
[100 , "心霊写真"],
[100 , "ハンガー"],
[100 , "爪切り"],
[100 , "お米"],
[100 , "唐揚げ"],
[100 , "漂白剤"],
[100 , "湯たんぽ"],
[100 , "シャンプーのボトル"],
[100 , "After Effects"],
[100 , "Photoshop"],
[100 , "クリップスタジオ"],
[100 , "触手"],
[100 , "消臭スプレー"],
[100 , "消毒用エタノール"],
[100 , "自転車"],
[100 , "ビー玉"],
[100 , "ハイパーヨーヨー"],
[100 , "ミニ四駆"],
[100 , "緑茶"],
[100 , "紅茶"],
[100 , "野菜ジュース"],
[100 , "トマト"],
[100 , "懐中時計"],
[100 , "懐中電灯"],
[100 , "防災リュック"],
[100 , "ハンドガン"],
[100 , "トミーガン"],
[100 , "ロケットランチャー"],
[100 , "四次元ポケット"],
[100 , "1.5Lのペットボトル"],
[100 , "方位磁針"],
[100 , "羅針盤"],
[100 , "漢字ドリル"],
[100 , "ファミコン"],
[100 , "カセットテープ"],
[100 , "呪いのビデオ"],
[100 , "ニプレス"],
[100 , "猫のヒゲ"],
[100 , "ゲームボーイ"],
[100 , "ガントレット"],
[100 , "サバトン"],
[100 , "アーメット"],
[100 , "バルビュート"],
[100 , "アナルフック"],
[100 , "ベーコン"],
[100 , "パンの耳"],
[100 , "高級食パン"],
[100 , "甘酒"],
[100 , "ガチャポンのカプセル"],
[100 , "木刀"],
[100 , "お土産の剣型キーホルダー"],
[100 , "幸運を呼ぶツボ"],
[100 , "硯"],
[100 , "筆"],
[100 , "電極"],
[100 , "スタンガン"],
[100 , "キャットナインテイル"],
[100 , "レイピア"],
[100 , "こんにゃく"],
[100 , "黒マテリア"],
[100 , "コメドプッシャー(ニキビ潰し)"],
[100 , "毛抜き"],
[100 , "山芋"],
[100 , "海老の天ぷら"],
[100 , "食塩"],
[100 , "ブランデー"],
[100 , "ビール"],
[100 , "バファリン"],
[100 , "モンエナ"],
[100 , "オロナミンC"],
[100 , "アクエリアス"],
[100 , "ポカリスエット"],
[100 , "パトランプ"],
[100 , "へぇボタン"],
[100 , "チャージマン研DVDBOX"],
[100 , "蹄鉄"],
[100 , "バスターソード"],
[100 , "バスタードソード"],
[100 , "蛇口"],
[100 , "ネジ"],
[100 , "六角ボルト"],
[100 , "餃子"],
[100 , "肉まん"],
[100 , "ピザマン"],
[100 , "伊達メガネ"],
[100 , "バンダナ"],
[100 , "ラブレター"],
[100 , "紐水着"],
[100 , "スクール水着"],
[100 , "アナル型オナホール(非貫通タイプ)"],
[100 , "妖精さん"],
[100 , "猫耳美少女"],
[100 , "マスカラ"],
[100 , "ランニングシューズ"],
[100 , "懸垂スタンド"],
[100 , "バスタオル"],
[100 , "塩麹"],
[100 , "ケチャップ"],
[100 , "クリピアス"],
[100 , "乳首ピアス"],
[100 , "手錠"],
[100 , "足枷"],
[100 , "珪藻土コースター"],
[100 , "ワカメ"],
[100 , "昆布"],
[100 , "だしパック"],
[100 , "ウニ"],
[100 , "ピッケル"],
[100 , "ツルハシ"],
[100 , "ギター"],
[100 , "リュート"],
[100 , "レオタード"],
[100 , "ドラム缶"],
[100 , "フライパン"],
[100 , "三角コーナー"],
[100 , "マニキュア"],
[100 , "洗濯バサミ"],
[100 , "ピカチュウ"],
[100 , "スーパーマリオ"],
[100 , "ドラえもん(CV:大山のぶ代)"],
[100 , "ハローキティ"],
[100 , "ラップの芯"],
[100 , "トイレットペーパー"],
[100 , "かまぼこの板"],
[100 , "ストロー"],
[100 , "針金"],
[100 , "豚骨ラーメン"],
[100 , "レバー"],
[100 , "変身ステッキ"],
[100 , "メイス"],
[100 , "お馬さんのおちんちん"],
[100 , "栗おこわ"],
[100 , "アナルプラグ"],
[100 , "セミの抜け殻"],
[100 , "マイクロファイバーの雑巾"],
[100 , "サランラップ"],
[100 , "お箸"],
[100 , "スタンド使い"],
[100 , "紙粘土"],
[100 , "つけまつげ"],
[100 , "おろし金"],
[100 , "グランドピアノ"],
[100 , "リコーダー"],
[100 , "月の石"],
[100 , "万華鏡"],
[100 , "畳"],
[100 , "虫眼鏡"],
[100 , "利尿剤"],
[100 , "大胸筋矯正サポーター"],
[100 , "おちんぽミルク"],
[100 , "ベニヤ板"],
[100 , "スレッジハンマー"],
[100 , "五寸釘"],
[100 , "そうめん"],
[100 , "カツオのたたき"],
[100 , "藁人形"],
[100 , "セーター"],
[100 , "金塊"],
[100 , "梅干し"],
[100 , "チェダーチーズ"],
[100 , "チャーシュー"],
[100 , "上履き"],
[100 , "ブルマ"],
[100 , "バファリン"],
[100 , "単2電池"],
[100 , "鎖鎌"],
[100 , "ひまわりの種"],
[100 , "母乳"],
[100 , "おしっこ"],
[100 , "リュックサック"]
]
#わ・た・しの確率を2%にするためあとから追加
num = len(quotes) * 0.02 * 100
quotes.append([num , "わ・た・し♥"])
suf = "』っ!ですっ!"
else:
#瀕死の時
quotes = [
[100 , "知りま、せん・・・そんなの・・・"],
[100 , f"私、を・・・虐める{name}さんに・・・ラッキーアイテムなん、て・・・無いです・・・"],
[100 , "私が欲しい、ですよ・・・そんなの・・・"]
]
return pre + get_quotes(quotes) + suf
| 2.1875
| 2
|
camael/regression_models.py
|
CastaChick/Camael
| 2
|
12781986
|
<gh_stars>1-10
import numpy as np
from numpy import linalg as LA
class LinearRegression:
"""
線形回帰を行うモデル
Parameters
----------
intercept: boolean(default=True)
切片要素を入れるかどうか
Examples
---------
>>> from load_data import load_boston
>>> (X_train, y_train), (X_test, y_test) = load_boston()
>>> reg = LinearRegression()
>>> reg.fit(X_train, y_train)
>>> reg.b
array([ 4.02936706e+01, -1.19997513e-01, 5.70003304e-02, 3.98379660e-03,
4.12698187e+00, -2.05002963e+01, 3.38024903e+00, 7.56807584e-03,
-1.71189793e+00, 3.34747537e-01, -1.17797225e-02, -9.02318039e-01,
8.71912756e-03, -5.55842510e-01])
>>> reg.score(X_test, y_test)
23.19559925642053
"""
def __init__(self, intercept=True):
self.intercept = intercept
def fit(self, X, y):
"""
学習データにフィットさせる
Parameters
----------
X: array, shape=(samples, colunms)
説明変数の行列
y: vector, len=(samples)
目的変数のベクトル
Attributes
----------
b: vector
係数のベクトル
_error: float
最適な係数の誤差
"""
if self.intercept:
self.b = self._solve(np.hstack((np.ones((X.shape[0], 1)), X)), y)
else:
self.b = self._solve(X, y)
self._error = self._culc_error(y, self.predict(X))
def predict(self, X):
"""
fitメソッドで算出した係数ベクトルを用いて予測を行う
Parameters
----------
X: array, shape=(samples, columns)
予測したいデータの説明変数
Returns
-------
y: vector, len=(samples)
予測された目的変数
"""
if self.intercept:
y = np.hstack((np.ones((X.shape[0], 1)), X)).dot(self.b)
else:
y = X.dot(self.b)
return y
def _solve(self, X, y):
return LA.inv(X.T.dot(X)).dot(X.T.dot(y))
def _culc_error(self, y, y_pred):
return LA.norm(y - y_pred)**2 / y.shape[0]
def score(self, X, y):
"""
モデルの平均二乗誤差を求める
Parameters
----------
X: array, shape=(samples, columns)
説明変数の行列
y: vector, len=(samples)
目的変数のベクトル
Returns
-------
error: float
誤差
"""
return self._culc_error(y, self.predict(X))
class KNNRegression:
"""
K近傍法(k-nearest neighbor)による回帰を行う
Parameters
----------
k: int (default=5)
考慮する最近傍データの数
weights: str (default="same")
重み付けの有無(デフォルトは重み付け無し)
距離に応じた重みを考慮するときは"distance"を指定
practice: int (default=2)
距離計算方法
* 1: マンハッタン距離
* 2: ユークリッド距離
* <3: 任意の次元のミンコフスキー距離
Examples
--------
>>> from load_data import load_boston
>>> (X_train, y_train), (X_test, y_test) = load_boston()
>>> reg = KNNRegression()
>>> reg.fit(X_train, y_train)
>>> reg.score(X_test, y_test)
36.969313725490196
"""
def __init__(self, k=5, weight="same", practice=2):
if type(k) is not int:
raise TypeError(
"k should be int.")
if weight not in ["same", "distance"]:
raise ValueError(
"weight not recognized: should be 'same' or 'distance'.")
if type(practice) is not int:
raise TypeError(
"practice should be int.")
self.k = k
self.weight = weight
self.practice = practice
def fit(self, X, y):
"""
学習データをインプットする
Parameters
----------
X: array, shape=(samples, columns)
説明変数の行列
y: vector, len=(samples)
目的変数のベクトル
"""
self.X = X
self.y = y
def _culc_distance(self, sample):
"""
あるsampleについてトレーニングデータとの距離を求める
Parameters
----------
sample: vector
サンプルの特徴量を並べたベクトル
Returns
-------
distance: vector
各トレーニングデータとの距離
"""
distance = np.abs(self.X - sample)**self.practice
return np.sum(distance, axis=1)
def predict(self, samples):
"""
複数のsampleについて予測を行う
Parameters
----------
samples: array, shape=(samples, columns)
予測したいサンプルの行列
Returns
-------
y: vector, len=(samples)
予測されたカテゴリ
"""
y = np.zeros(samples.shape[0])
for i, sample in enumerate(samples):
y[i] = self._predict_one(sample)
return y
def _predict_one(self, sample):
"""
1つのサンプルがどのカテゴリに入っているかを確認する
Parameters
----------
sample: vector
サンプルの特徴量を並べたベクトル
Returns
-------
result: int
予測されたカテゴリ番号
"""
dis = self._culc_distance(sample)
index = np.arange(self.X.shape[0])
index = index[np.argsort(dis, axis=0)]
result = 0
if self.weight == "same":
for i in range(self.k):
result += self.y[index[i]]
result /= self.k
elif self.weight == "distance":
distance = 0
for i in range(self.k):
result += self.y[index[i]] * dis[index[i]]
distance += dis[index[i]]
result /= distance
return result
def score(self, X, y):
"""
モデルの平均二乗誤差を求める
Parameters
----------
X: array, shape=(samples, columns)
説明変数の行列
y: vector, len=(samples)
目的変数のベクトル
Returns
-------
error: float
誤差
"""
return self._culc_error(y, self.predict(X))
def _culc_error(self, y, y_pred):
return LA.norm(y - y_pred)**2 / y.shape[0]
| 3.015625
| 3
|
downloadbot_common/messaging/consuming/handlers.py
|
dnguyen0304/downloadbot.common
| 0
|
12781987
|
# -*- coding: utf-8 -*-
import abc
class Handler(metaclass=abc.ABCMeta):
@abc.abstractmethod
def handle(self, message):
"""
Parameters
----------
message : downloadbot_common.messaging.messages.Message
Returns
-------
None
Raises
------
downloadbot_common.messaging.consuming.exceptions.HandleError
If there was an error handling the message.
"""
raise NotImplementedError
| 2.875
| 3
|
angr/knowledge_plugins/patches.py
|
Kyle-Kyle/angr
| 6,132
|
12781988
|
from typing import Optional, List, Dict
from cle.address_translator import AddressTranslator
from sortedcontainers import SortedDict
from .plugin import KnowledgeBasePlugin
# TODO: Serializable
class Patch:
def __init__(self, addr, new_bytes, comment: Optional[str]=None):
self.addr = addr
self.new_bytes = new_bytes
self.comment = comment
def __len__(self):
return len(self.new_bytes)
class PatchManager(KnowledgeBasePlugin):
"""
A placeholder-style implementation for a binary patch manager. This class should be significantly changed in the
future when all data about loaded binary objects are loaded into angr knowledge base from CLE. As of now, it only
stores byte-level replacements. Other angr components may choose to use or not use information provided by this
manager. In other words, it is not transparent.
Patches should not overlap, but it's user's responsibility to check for and avoid overlapping patches.
"""
def __init__(self, kb):
super().__init__()
self._patches: Dict[int,Patch] = SortedDict()
self._kb = kb
def add_patch(self, addr, new_bytes, comment: Optional[str]=None):
self._patches[addr] = Patch(addr, new_bytes, comment=comment)
def add_patch_obj(self, patch: Patch):
self._patches[patch.addr] = patch
def remove_patch(self, addr):
if addr in self._patches:
del self._patches[addr]
def patch_addrs(self):
return self._patches.keys()
def get_patch(self, addr):
"""
Get patch at the given address.
:param int addr: The address of the patch.
:return: The patch if there is one starting at the address, or None if there isn't any.
:rtype: Patch or None
"""
return self._patches.get(addr, None)
def get_all_patches(self, addr, size):
"""
Retrieve all patches that cover a region specified by [addr, addr+size).
:param int addr: The address of the beginning of the region.
:param int size: Size of the region.
:return: A list of patches.
:rtype: list
"""
patches = [ ]
for patch_addr in self._patches.irange(maximum=addr+size-1, reverse=True):
p = self._patches[patch_addr]
if self.overlap(p.addr, p.addr + len(p), addr, addr+size):
patches.append(p)
else:
break
return patches[::-1]
def keys(self):
return self._patches.keys()
def items(self):
return self._patches.items()
def values(self):
return self._patches.values()
def copy(self):
o = PatchManager(self._kb)
o._patches = self._patches.copy()
@staticmethod
def overlap(a0, a1, b0, b1):
return a0 <= b0 < a1 or a0 <= b1 < a1 or b0 <= a0 < b1
def apply_patches_to_binary(self, binary_bytes: Optional[bytes]=None, patches: Optional[List[Patch]]=None) -> bytes:
if patches is None:
patches = sorted(list(self._patches.values()), key=lambda x: x.addr)
if binary_bytes is None:
with open(self._kb._project.loader.main_object.binary, "rb") as f:
binary_bytes = f.read()
for patch in patches:
# convert addr to file offset
at = AddressTranslator.from_mva(patch.addr, self._kb._project.loader.main_object)
file_offset = at.to_raw()
if file_offset < len(binary_bytes) and file_offset + len(patch.new_bytes) < len(binary_bytes):
binary_bytes = binary_bytes[:file_offset] + \
patch.new_bytes + \
binary_bytes[file_offset + len(patch.new_bytes):]
return binary_bytes
KnowledgeBasePlugin.register_default('patches', PatchManager)
| 2.390625
| 2
|
gtm_manager/manager.py
|
trakken/gtm_manager
| 7
|
12781989
|
"""manager.py"""
import logging
from googleapiclient import errors
import gtm_manager.account
class GTMManager(gtm_manager.base.GTMBase):
"""Authenticates a users base gtm access.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.accounts_service = self.service.accounts() # pylint: disable=E1101
def list_accounts(self):
"""Loads from the API and lists all GTM Accounts that a user has access to.
Returns:
A list of :class:`gtm_manager.account.GTMAccount` that the user has access to.
"""
try:
request = self.accounts_service.list()
response = request.execute()
return [
gtm_manager.account.GTMAccount(account=x, service=self.service)
for x in response.get("account") or []
]
except errors.HttpError as error:
logging.error(error)
return []
| 2.859375
| 3
|
busca-jogos/busca/classes/patio.py
|
IvanBrasilico/AI-NanoDegree
| 0
|
12781990
|
<reponame>IvanBrasilico/AI-NanoDegree<gh_stars>0
from collections import OrderedDict
from typing import Any, List, Optional, Set, Tuple, Union
from busca.classes import ALTURAS, COLUNAS
from busca.utils.logconf import logger
colunas_dict = {k: ind for ind, k in enumerate(COLUNAS)}
alturas_dict = {k: ind for ind, k in enumerate(ALTURAS)}
class Container():
def __init__(self, numero, time_to_leave=5):
# type: (str, int) -> None
self._numero = numero
self._time_to_leave = time_to_leave
@property
def time_to_leave(self):
# type: () -> int
return self._time_to_leave
@time_to_leave.setter
def time_to_leave(self, time_to_leave):
# type: (int) -> None
self._time_to_leave = time_to_leave
def __str__(self):
# type: () -> str
return self._numero
def __repr__(self):
# type: () -> str
return self._numero
class Pilha():
"""Define uma pilha de largura [A-E] e altura [0-7]"""
LEFT = 2
UP = 3
RIGHT = 4
def __init__(self, nome,
max_altura=len(ALTURAS),
max_largura=len(COLUNAS)):
# type: (str, int, int) -> None
self._pilha: dict = OrderedDict()
self._nome = nome
self._altura = max_altura
self._largura = max_largura
for coluna in COLUNAS:
self._pilha[coluna] = OrderedDict()
for altura in ALTURAS:
self._pilha[coluna][altura] = None
def position_totuple(self,
position: str
) -> Tuple[Optional[str], Optional[str]]:
coluna = None
altura = None
try:
coluna = position[0]
altura = position[1]
except (IndexError, TypeError) as err:
logger.debug(
f'position {position} invalid passed to position_totuple')
logger.debug(str(err))
pass
return coluna, altura
def get_containerinposition(self, position):
# type: (str) -> Optional[Container]
coluna, altura = self.position_totuple(position)
if coluna is not None:
_coluna = self._pilha.get(coluna)
if _coluna:
return _coluna.get(altura)
return None
def side_locked_position(self, position):
# type: (str) -> Set[int]
coluna, altura = self.position_totuple(position)
if coluna and altura:
return self.side_locked(coluna, altura)
return set()
def side_locked(self, pcoluna, paltura):
# type: (str, str) -> Set[int]
ind_col = colunas_dict[pcoluna]
ind_alt = alturas_dict[paltura]
sides_locked = set()
if ind_col + 1 < len(COLUNAS):
coluna = COLUNAS[ind_col + 1]
altura = ALTURAS[ind_alt]
if self._pilha[coluna][altura] is not None:
sides_locked.add(Pilha.RIGHT)
if ind_col > 0:
coluna = COLUNAS[ind_col - 1]
altura = ALTURAS[ind_alt]
if self._pilha[coluna][altura] is not None:
sides_locked.add(Pilha.LEFT)
return sides_locked
def up_locked_position(self, position: str) -> bool:
coluna, altura = self.position_totuple(position)
if coluna and altura:
return self.up_locked(coluna, altura)
return True
def up_locked(self, pcoluna, paltura):
# type: (str, str) -> bool
ind_alt = alturas_dict[paltura]
if ind_alt == len(ALTURAS) - 1:
return True
altura = ALTURAS[ind_alt + 1]
if self._pilha[pcoluna][altura] is not None:
return True
return False
def time_mean(self):
# type: () -> float
soma = 0
qtde = 0
for coluna in self._pilha.values():
for container in coluna.values():
if container:
soma += container.time_to_leave
qtde += 1
if qtde == 0:
return 0
return soma / qtde
def sides_locked(self, coluna, altura):
# type: (str, str) -> Set[int]
"""Retorna posicoes livres se tem carga na posicao, senao True
:param posicao: String 'coluna'+'altura'. Caso nao passada,
retorna primeira livre
"""
sides_free = set({Pilha.LEFT, Pilha.UP, Pilha.RIGHT})
if coluna and altura:
if self.up_locked(coluna, altura):
sides_free.remove(Pilha.UP)
sides_locked = self.side_locked(coluna, altura)
if sides_locked:
sides_free.remove(sides_locked)
return sides_free
def is_locked(self, coluna, altura):
# type: (str, str) -> bool
if self.up_locked(coluna, altura) or \
self.sides_locked(coluna, altura) == {Pilha.LEFT, Pilha.RIGHT}:
return True
return False
def is_position_locked(self, position):
coluna, altura = self.position_totuple(position)
return self.is_locked(coluna, altura)
def is_acessible(self, coluna, altura):
# type: (str, str) -> bool
"""Testa se é uma posição válida para armazenagem"""
ind_alt = alturas_dict[altura]
if ind_alt == len(ALTURAS) - 1:
return False
up = ALTURAS[ind_alt + 1]
if ind_alt == 0:
down = None
else:
down = ALTURAS[ind_alt - 1]
col_right = colunas_dict[coluna] + 1
col_left = colunas_dict[coluna] - 1
# print(up, down, col_left, col_right)
if (col_left > 0):
left = COLUNAS[col_left]
if self._pilha[left].get(up) is not None:
logger.debug('Lado esquerdo ocupado!!!')
return False
if (col_right < len(COLUNAS)):
right = COLUNAS[col_right]
if self._pilha[right].get(up) is not None:
logger.debug('Lado direito ocupado!!!')
return False
if self._pilha[coluna].get(up) is not None:
logger.debug('Em cima ocupado!!!')
return False
if down and self._pilha[coluna].get(down) is None:
logger.debug('Embaixo vazio!!!')
return False
return True
def first_free_position(self):
# type: () -> Tuple[str, str]
for coluna in COLUNAS:
for altura in ALTURAS:
if self._pilha[coluna][altura] is None:
return coluna, altura
return False, False
def is_position_free(self,
position: str = None,
):
# type: (...) -> Tuple[Union[bool, str], Union[bool, str]]
"""Retorna posicao se livre, senao None
:param posicao: String 'coluna'+'altura'. Caso nao passada,
retorna primeira livre
"""
if position:
coluna, altura = self.position_totuple(position)
if self._pilha[coluna][altura] is None and \
self.is_acessible(coluna, altura):
return coluna, altura
return False, False
else:
return self.first_free_position()
def _atualiza_posicao(self, coluna, altura, container):
# type: (str, str, Optional[Container]) -> None
self._pilha[coluna][altura] = container
def remove(self, position, container):
# type: (Optional[str], Container) -> bool
coluna, altura = self.position_totuple(position)
if not coluna or self.is_locked(coluna, altura):
return False
# print(coluna, altura)
if coluna:
stacked_container = self._pilha[coluna][altura]
# print(stacked_container)
if stacked_container == container:
self._atualiza_posicao(coluna, altura, None)
return True
return False
def stack(self,
container, # type: Container
position=None, # type: Union[None, Tuple[str, str], str]
):
# type: (...) -> Union[bool, str]
coluna, altura = self.is_position_free(position)
if coluna:
self._atualiza_posicao(coluna, altura, container)
return coluna + altura
return False
def has_space(self):
# type: () -> bool
for coluna in COLUNAS:
for altura in ALTURAS:
if self._pilha[coluna][altura] is None:
return True
return False
class Patio():
def __init__(self, nome=''):
# type: (str) -> None
self._nome = nome
self._pilhas: dict = OrderedDict()
self._containers: dict = OrderedDict()
self._history: dict = OrderedDict()
def add_pilha(self, nome_pilha=None):
# type: (str) -> None
self._pilhas[nome_pilha] = Pilha(nome_pilha)
def stack(self, container, nome_pilha=None, position=None):
# type: (Container, str, Optional[str]) -> Union[bool, str]
pilha = self._pilhas.get(nome_pilha)
if pilha:
position = pilha.stack(container, position)
if position:
self._containers[container._numero] = \
(nome_pilha, position, container)
return position
return False
def unstack(self, container, nome_pilha=None, position=None):
# type: (Container, str, Optional[str]) -> bool
pilha = self._pilhas.get(nome_pilha)
if pilha is not None:
success = pilha.remove(position, container)
if success:
self._history[container._numero] = \
self._containers.pop(container._numero)
return True
return False
def add_container(self, container, nome_pilha=None, posicao=None):
# type: (Container, Optional[str], Optional[Any]) -> str
"""Adiciona container na pilha, ou no pátio.
Adiciona pilha se pilha cheia.
:param container: Objeto Container
:param nome_pilha: Nome da pilha a utilizar.
Se não passado, procura em todas
:param posicao: String 'B5' 'coluna_altura'
:return: None se pilha/pátio cheio, senão posição
"""
# print(len(self._pilhas), len(self._containers), nome_pilha)
if nome_pilha is None:
for pilha in self._pilhas.values():
posicao = self.add_container(container, pilha._nome, posicao)
if posicao:
break
# if len(self._containers) >= 30:
# print(posicao, nome_pilha)
if not posicao: # pilhas cheias, criar nova
nome_pilha = '{0:04d}'.format(len(self._pilhas) + 1)
# print('Add pilha %s ' % nome_pilha)
self.add_pilha(nome_pilha)
posicao = self.stack(container, nome_pilha, posicao)
else:
posicao = self.stack(container, nome_pilha, posicao)
return posicao
def get_container_tuple(self, numero: str
) -> \
Tuple[Optional[str], Optional[str], Optional[Container]]:
nome_pilha, position, container = \
self._containers.get(numero, (None, None, None))
return nome_pilha, position, container
def get_container_numero(self, numero):
# type: (str) -> Optional[Container]
nome_pilha, position, container = self.get_container_tuple(numero)
if nome_pilha:
return container
return None
def remove_container(self, container: Container) -> bool:
if container is None or not (isinstance(container, Container)):
return False
nome_pilha, position, container = \
self.get_container_tuple(container._numero)
if position is None:
return False
return self.remove_position(container, nome_pilha, position)
def remove_position(self, container: Container,
nome_pilha: str, position: str):
return self.unstack(container, nome_pilha, position)
def pilhas_com_espaco(self):
# type: () -> List[Pilha]
result = []
for pilha in self._pilhas.values():
if pilha.has_space():
result.append(pilha)
return result
| 2.734375
| 3
|
python/sherpa/__init__.py
|
MattRickS/sherpa
| 1
|
12781991
|
<gh_stars>1-10
from sherpa.exceptions import FormatError, ParseError, PathResolverError
from sherpa.resolver import PathResolver
| 1.195313
| 1
|
fuzzing/kernel/syzkaller-configs/generate_config.py
|
DBGilles/retrowrite
| 478
|
12781992
|
<reponame>DBGilles/retrowrite<gh_stars>100-1000
#!/usr/bin/python3
import argparse
import json
import os
def main():
parser = argparse.ArgumentParser(
description='Generate a configuration file for syzkaller')
parser.add_argument('--workdir', help='workdir for syzkaller', required=True)
parser.add_argument('--kernel', help='path to the kernel directory', required=True)
parser.add_argument('--initramfs', help='path to the initramfs', required=True)
parser.add_argument('--image', help='path to the disk image', required=True)
parser.add_argument('--sshkey', help='path to the VM\'s SSH key', required=True)
parser.add_argument('--syzkaller', help='path to syzkaller', required=True)
parser.add_argument('--vms', help='number of VMs', type=int, default=8)
parser.add_argument('--cpus', help='CPUs per VM', type=int, default=2)
parser.add_argument('--mem', help='memory per VM', type=int, default=2048)
parser.add_argument('config', help='path to the original config')
args = parser.parse_args()
with open(args.config) as f:
config = json.load(f)
config['reproduce'] = False
config['vm']['count'] = args.vms
config['vm']['kernel'] = os.path.join(args.kernel, 'arch', 'x86', 'boot',
'bzImage')
config['vm']['initrd'] = args.initramfs
config['vm']['cpu'] = args.cpus
config['vm']['mem'] = args.mem
config['workdir'] = args.workdir
config['kernel_obj'] = args.kernel
config['image'] = args.image
config['sshkey'] = args.sshkey
config['syzkaller'] = args.syzkaller
print(json.dumps(config, indent=4))
if __name__ == '__main__':
main()
| 2.265625
| 2
|
mpnetcdf4/dc.py
|
GeoscienceAustralia/dea-netcdf-benchmark
| 1
|
12781993
|
"""
Datacube interop functions are here
"""
import numpy as np
from itertools import chain
from types import SimpleNamespace
from datacube.storage.storage import measurement_paths
from datacube.utils import uri_to_local_path
from datacube.api import GridWorkflow
def flatmap(f, items):
return chain.from_iterable(map(f, items))
def first_val(x):
return next(iter(x.values()))
def list_native_cell(product, cell_index, dc, **query):
index = dc.index
p = index.products.get_by_name(product)
if p.grid_spec is None:
raise ValueError('Supplied product does not have a grid spec')
gw = GridWorkflow(index, grid_spec=p.grid_spec)
tile = gw.list_cells(cell_index=cell_index,
product=product,
**query)[cell_index]
return list(flatmap(lambda x: x, tile.sources.values))
def group_by_storage(dss, bands=None):
"""
returns [StorageResource]
StorageResource
.uri - string, URI of the resource
.local_path - PosixPath, path on a filesystem, could be None if not a file resource
.bands - Dictionary of bands (copied from Dataset)
.time - np.ndarray<datetime64[ns]> Timestamps to be read from this resource
.datasets - List<datacube.Dataset> referencing this resource
"""
su_all = {}
if bands is None:
def check_band(band):
return True
else:
bands = set(bands)
def check_band(band):
return band in bands
def local_path(uri):
try:
return uri_to_local_path(uri)
except ValueError:
return None
def update(su, ds, band=None):
if band is None:
bb = {k: ds.measurements[k]
for k in ds.measurements if check_band(k)}
else:
bb = {band: ds.measurements[band]}
if su not in su_all:
su_all[su] = SimpleNamespace(bands=bb,
uri=su,
local_path=local_path(su),
datasets=[ds])
else:
su_all[su].datasets.append(ds)
for ds in dss:
pp = measurement_paths(ds)
paths = set(pp.values())
if len(paths) == 1: # All bands in one file
update(paths.pop(), ds)
elif len(paths) == len(pp): # Each band in it's own file
for band, file in pp.items():
if check_band(band):
update(file, ds, band)
else:
raise ValueError('Not supporting multiple multi-band files')
for s in su_all.values():
s.time = np.array([ds.center_time for ds in s.datasets], dtype='datetime64[ns]')
return sorted(su_all.values(), key=lambda s: s.time[0])
def compute_time_slice(requested_time, file_time):
"""
Given requested time stamps and available timestamps (both assumed to be
sorted in ascending order), computes roi such that
requested_time in file_time[roi]
Returns (roi, contigous, complete)
Where:
roi: slice object
contigous: True|False if False not all file stamps in the range are needed
complete: True|False, if False some requested timestamps were not found
"""
assert requested_time.dtype == file_time.dtype
ii = np.where((file_time >= requested_time.min()) * (file_time <= requested_time.max()))[0]
if len(ii) == 0:
raise ValueError("No overlap")
roi = slice(ii[0], ii[-1]+1)
file_time = set(file_time[roi])
requested_time = set(requested_time)
contigous = (file_time == requested_time)
complete = requested_time.issubset(file_time)
return roi, contigous, complete
| 2.28125
| 2
|
src/calculate_ranks.py
|
BorgwardtLab/graphkernels-review
| 3
|
12781994
|
<reponame>BorgwardtLab/graphkernels-review
#!/usr/bin/env python3
#
# calculate_ranks.py: calculates the ranks of individual graph kernels
# on the benchmark data sets.
import pandas as pd
import sys
from collections import Counter
if __name__ == '__main__':
df = pd.read_csv(sys.argv[1], header=0, index_col=0)
df = df.rank(axis=0, ascending=False, method='average')
mean = df.mean(axis=1)
std = df.std(axis=1)
df['mean'] = mean
df['std'] = std
df = df[['mean', 'std']]
pd.options.display.float_format = '{:,.2f}'.format
print(df.transpose())
| 2.75
| 3
|
trainer/train_elmpronet.py
|
fortoon21/detecthangul
| 0
|
12781995
|
import os
import time
import torch
import torch.optim
import torch.nn.functional as F
import torch.nn.init as init
from torch.autograd import Variable
from loss.ssd_loss import SSDLoss
from metrics.voc_eval import voc_eval
from modellibs.s3fd.box_coder import S3FDBoxCoder
from utils.average_meter import AverageMeter
class Trainer(object):
def __init__(self, opt, train_dataloader, valid_dataloader, model):
self.opt = opt
self.current_lr = opt.lr
self.start_epoch = opt.start_epochs
self.train_dataloader = train_dataloader
self.valid_dataloader = valid_dataloader
self.max_iter_train = len(self.train_dataloader)
self.max_iter_valid = len(self.valid_dataloader)
self.model = model
self.criterion_first = torch.nn.CrossEntropyLoss().cuda()
self.criterion_middle = torch.nn.CrossEntropyLoss().cuda()
self.criterion_last = torch.nn.CrossEntropyLoss().cuda()
self.criterion_config = torch.nn.CrossEntropyLoss().cuda()
self.optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr, weight_decay=opt.weight_decay)
self.best_loss = float('inf')
if opt.resume:
self.optimizer.load_state_dict(torch.load(opt.resume_path)['optimizer'])
def train_model(self, max_epoch, learning_rate, layers=None):
self.max_epoch = max_epoch
for epoch in range(self.start_epoch, self.max_epoch):
self.adjust_learning_rate(self.optimizer, epoch)
self.train_epoch(epoch)
self.valid_epoch(epoch)
print('')
print('optimization done')
save_dir = 'experiments/%s_%s_%s' % (self.opt.dataset, self.opt.task, self.opt.model)
file_name = '%s_%s_%s_best_loss_%f' % (self.opt.dataset, self.opt.task, self.opt.model, self.best_loss)
os.rename(self.opt.expr_dir, os.path.join(save_dir,file_name))
def train_epoch(self, epoch):
""" training """
self.model.train()
self.optimizer.zero_grad()
train_loss = 0
for batch_idx, (inputs, label_first,label_middle, label_last, label_config) in enumerate(self.train_dataloader):
# label_first = labels[0]
# label_middle = labels[1]
# label_last = labels[2]
# label_config = labels[3]
inputs = inputs.to(self.opt.device)
label_first = label_first.to(self.opt.device)
label_middle = label_middle.to(self.opt.device)
label_last = label_last.to(self.opt.device)
label_config = label_config.to(self.opt.device)
output_first, output_middle, output_last, output_config = self.model(inputs)
loss_first = self.criterion_first(output_first, label_first)
loss_middle = self.criterion_middle(output_middle, label_middle)
loss_last = self.criterion_last(output_last, label_last)
loss_config = self.criterion_config(output_config, label_config)
loss = loss_first + loss_middle + loss_last + loss_config
loss.backward()
if ((batch_idx + 1) % self.opt.accum_grad == 0) or ((batch_idx+1) == self.max_iter_train):
self.optimizer.step()
self.model.zero_grad()
self.optimizer.zero_grad()
train_loss += loss.item()
if batch_idx % self.opt.print_freq == 0:
print('Epoch[%d/%d] Iter[%d/%d] Learning Rate: %.6f Total Loss: %.4f, First Loss: %.4f, Middle Loss: %.4f, Last Loss: %.4f, Config Loss: %.4f' %
(epoch, self.max_epoch, batch_idx, self.max_iter_train, self.current_lr, loss.item(), loss_first.item(), loss_middle.item(), loss_last.item(), loss_config.item()))
def valid_epoch(self, epoch):
correct_f = 0
correct_m = 0
correct_l = 0
correct_c = 0
""" validate """
self.model.eval()
test_loss = 0
for batch_idx, (inputs, label_first,label_middle, label_last, label_config) in enumerate(self.valid_dataloader):
with torch.no_grad():
inputs = inputs.to(self.opt.device)
label_first = label_first.to(self.opt.device)
label_middle = label_middle.to(self.opt.device)
label_last = label_last.to(self.opt.device)
label_config = label_config.to(self.opt.device)
output_first, output_middle, output_last, output_config = self.model(inputs)
loss_first = self.criterion_first(output_first, label_first)
loss_middle = self.criterion_middle(output_middle, label_middle)
loss_last = self.criterion_last(output_last, label_last)
loss_config = self.criterion_config(output_config, label_config)
loss = loss_first + loss_middle + loss_last + loss_config
pred_f = output_first.data.max(1, keepdim=True)[1].cpu()
pred_m = output_middle.data.max(1, keepdim=True)[1].cpu()
pred_l = output_last.data.max(1, keepdim=True)[1].cpu()
pred_c = output_config.data.max(1, keepdim=True)[1].cpu()
correct_f += pred_f.eq(label_first.cpu().view_as(pred_f)).sum()
correct_m += pred_m.eq(label_middle.cpu().view_as(pred_m)).sum()
correct_l += pred_l.eq(label_last.cpu().view_as(pred_l)).sum()
correct_c += pred_c.eq(label_config.cpu().view_as(pred_c)).sum()
test_loss += loss.item()
if batch_idx % self.opt.print_freq_eval == 0:
print('Validation[%d/%d] Total Loss: %.4f, First Loss: %.4f, Middle Loss: %.4f, Last Loss: %.4f, Conf Loss: %.4f' %
(batch_idx, len(self.valid_dataloader), loss.item(), loss_first.item(), loss_middle.item(), loss_last.item(), loss_config.item()))
num_test_data = len(self.valid_dataloader.dataset)
accuracy_f = 100. * correct_f / num_test_data
accuracy_m = 100. * correct_m / num_test_data
accuracy_l = 100. * correct_l / num_test_data
accuracy_c = 100. * correct_c / num_test_data
test_loss /= len(self.valid_dataloader)
if test_loss < self.best_loss:
print('Saving..')
state = {
'model': self.model.module.state_dict(),
'optimizer': self.optimizer.state_dict(),
'best_loss': test_loss,
'epoch': epoch,
}
torch.save(state, os.path.join(self.opt.expr_dir, 'model_best.pth'))
self.best_loss = test_loss
print('[*] Model %s,\tCurrent Loss: %f\tBest Loss: %f' % (self.opt.model, test_loss, self.best_loss))
print('Val Accuracy_F: {}/{} ({:.0f}%) | Val Accuracy_M: {}/{} ({:.0f}%) | Val Accuracy_L: {}/{} ({:.0f}%) | Val Accuracy_C: {}/{} ({:.0f}%)\n'.format(
correct_f, num_test_data, accuracy_f,
correct_m, num_test_data, accuracy_m,
correct_l, num_test_data, accuracy_l,
correct_c, num_test_data, accuracy_c))
def adjust_learning_rate(self, optimizer, epoch):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
self.current_lr = self.opt.lr * (0.1 ** (epoch // 50))
for param_group in optimizer.param_groups:
param_group['lr'] = self.current_lr
def make_dir(self, dir_path):
if not os.path.exists(os.path.join(self.opt.expr_dir, dir_path)):
os.mkdir(os.path.join(self.opt.expr_dir, dir_path))
| 2.078125
| 2
|
python/testData/types/AwaitOnImportedCoroutine/mycoroutines.py
|
jnthn/intellij-community
| 2
|
12781996
|
<reponame>jnthn/intellij-community<gh_stars>1-10
from typing import Any
async def mycoroutine() -> Any:
pass
| 1.132813
| 1
|
recupero/migrations/0002_auto_20191103_1159.py
|
cluster311/ggg
| 6
|
12781997
|
# Generated by Django 2.2.4 on 2019-11-03 14:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('recupero', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='prestacion',
name='nomenclador',
),
migrations.AddField(
model_name='tipoprestacion',
name='arancel',
field=models.DecimalField(decimal_places=2, default=0.0, max_digits=11),
),
migrations.AddField(
model_name='tipoprestacion',
name='codigo',
field=models.CharField(blank=True, help_text='Código del servicio (de nomenclador si corresponde)', max_length=30, null=True),
),
migrations.AddField(
model_name='tipoprestacion',
name='descripcion',
field=models.TextField(blank=True, null=True),
),
migrations.AddField(
model_name='tipoprestacion',
name='observaciones',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='tipoprestacion',
name='tipo',
field=models.PositiveIntegerField(choices=[(0, 'Desconocido'), (100, 'Consulta'), (200, 'Práctica'), (300, 'Internación'), (400, 'Laboratorio')], default=100),
),
]
| 1.742188
| 2
|
opensearch/results.py
|
akuanti/opensearch
| 0
|
12781998
|
from opensearch import osfeedparser
import logging
logger = logging.getLogger(__name__)
class Results(object):
def __init__(self, query, agent=None):
self.agent = agent
self._fetch(query)
self._iter = 0
def __iter__(self):
self._iter = 0
return self
def __len__(self):
return self.totalResults
def next(self):
# just keep going like the energizer bunny
while True:
# return any item we haven't returned
if self._iter < len(self.items):
self._iter += 1
return self.items[self._iter-1]
# if there appears to be more to fetch
if \
self.totalResults != 0 \
and self.totalResults > self.startIndex + self.itemsPerPage - 1:
# get the next query
next_query = self._get_next_query()
# if we got one executed it and go back to the beginning
if next_query:
self._fetch(next_query)
# very important to reset this counter
# or else the return will fail
self._iter = 0
else:
# deal with malformed templates
# stop if there isn't anything
raise StopIteration
else:
raise StopIteration
def _fetch(self, query):
url = query.url()
logger.debug("fetching %s" % url)
feed = osfeedparser.opensearch_parse(url, agent=self.agent)
self.feed = feed
# general channel stuff
channel = feed['feed']
self.title = _pick(channel,'title')
self.link = _pick(channel,'link')
self.description = _pick(channel,'description')
self.language = _pick(channel,'language')
self.copyright = _pick(channel,'copyright')
# get back opensearch specific values
self.totalResults = _pick(channel,'opensearch_totalresults',0)
self.startIndex = _pick(channel,'opensearch_startindex',1)
self.itemsPerPage = _pick(channel,'opensearch_itemsperpage',0)
# alias items from the feed to our results object
self.items = feed['items']
# set default values if necessary
if self.startIndex == 0:
self.startIndex = 1
if self.itemsPerPage == 0 and len(self.items) > 0:
self.itemsPerPage = len(self.items)
# store away query for calculating next results
# if necessary
self.last_query = query
def _get_next_query(self):
# update our query to get the next set of records
query = self.last_query
# use start page if the query supports it
if query.has_macro('startPage'):
# if the query already defined the startPage
# we just need to increment it
if hasattr(query, 'startPage'):
query.startPage += 1
# to issue the first query startPage might not have
# been specified, so set it to 2
else:
query.startPage = 2
return query
# otherwise the query should support startIndex
elif query.has_macro('startIndex'):
# if startIndex was used before we just add the
# items per page to it to get the next set
if hasattr(query, 'startIndex'):
query.startIndex += self.itemsPerPage
# to issue the first query the startIndex may have
# been left blank in that case we assume it to be
# the item just after the last one on this page
else:
query.startIndex = self.itemsPerPage + 1
return query
# doesn't look like there is another stage to this query
return None
# helper for pulling values out of a dictionary if they're there
# and returning a default value if they're not
def _pick(d,key,default=None):
# get the value out
value = d.get(key)
# if it wasn't there return the default
if value == None:
return default
# if they want an int try to convert to an int
# and return default if it fails
if type(default) == int:
try:
return int(d[key])
except:
return default
# otherwise we're good to return the value
return value
| 2.671875
| 3
|
app/pkgs/query.py
|
minhtuan221/architecture-collection
| 3
|
12781999
|
<reponame>minhtuan221/architecture-collection<filename>app/pkgs/query.py
from .type_check import type_check
@type_check
def get_start_stop_pos(page: int, size: int):
page = max(1, page)
size = max(1, size)
offset = (page - 1) * size
limit = size
return offset, limit
| 1.90625
| 2
|
tests/test_inventory.py
|
mavroskardia/oworld
| 0
|
12782000
|
#
# Tests for Overworld character inventory
#
import sys
sys.path.append('../components')
from items import NewInventory as Inventory
from items import NewItem as Item
from items import Material
class Test_Inventory:
def setup_class(cls):
cls.inv = Inventory()
def test_construction(self):
assert self.inv is not None
def test_properties(self):
assert type(self.inv.items) is tuple
assert len(self.inv.items) == 0
def test_inventory_add_items(self):
item = Item()
item.add_material(Material())
self.inv.add_item(item)
| 2.78125
| 3
|
tests/test_utils.py
|
gramaziokohler/total_station_robot_localization
| 0
|
12782001
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from compas_mobile_robot_reloc.utils import _ensure_rhino
from pytest import raises
def test__ensure_rhino():
with raises(ImportError):
_ensure_rhino()
| 1.4375
| 1
|
scripts/07_lemma/main.py
|
sdspieg/naeval
| 36
|
12782002
|
from os.path import (
expanduser,
join as join_path
)
from IPython.display import HTML
from tqdm.notebook import tqdm as log_progress
from naeval.const import (
NEWS, WIKI, FICTION, SOCIAL, POETRY,
DATASET, JL, GZ
)
from naeval.io import (
format_jl,
parse_jl,
load_gz_lines,
dump_gz_lines,
)
from naeval.record import (
as_jsons,
from_jsons
)
from naeval.dataset import sample
from naeval.readme import patch_readme
from naeval.lemma.datasets import load_dataset
from naeval.lemma.markup import (
Markup,
show_markup
)
CORUS_DATA_DIR = expanduser('~/proj/corus-data/gramru')
CORUS_FILES = {
NEWS: [
'dev/GramEval2020-RuEval2017-Lenta-news-dev.conllu',
'train/MorphoRuEval2017-Lenta-train.conllu',
],
WIKI: [
'dev/GramEval2020-GSD-wiki-dev.conllu',
'train/GramEval2020-GSD-train.conllu'
],
FICTION: [
'dev/GramEval2020-SynTagRus-dev.conllu',
'train/GramEval2020-SynTagRus-train-v2.conllu',
'train/MorphoRuEval2017-JZ-gold.conllu'
],
SOCIAL: [
'dev/GramEval2020-RuEval2017-social-dev.conllu',
'train/GramEval2020-Taiga-social-train.conllu',
'train/MorphoRuEval2017-VK-gold.conllu'
],
POETRY: [
'dev/GramEval2020-Taiga-poetry-dev.conllu',
'train/GramEval2020-Taiga-poetry-train.conllu'
],
}
DATASETS = [NEWS, WIKI, FICTION, SOCIAL, POETRY]
DATA_DIR = expanduser('~/proj/naeval/data/lemma')
LEMMA = 'lemma'
README = expanduser('~/proj/naeval/README.md')
| 1.59375
| 2
|
devel/grobid/process/parse_tei.py
|
arrismo/tripods-testing
| 2
|
12782003
|
<reponame>arrismo/tripods-testing<filename>devel/grobid/process/parse_tei.py<gh_stars>1-10
import sys
import re
from xml.etree import ElementTree as ET
total_matches = 0
pattern = re.compile(r"[A-Z]{2,6}\s{,1}-{,1}[0-9]{2,6}")
def traverse_node(root, pattern):
"""
Find matches at current node, then move on to child nodes
"""
if root.text is not None:
matches = pattern.findall(root.text)
if matches:
print("Full text:")
print(root.text)
print("Matches:")
counter = 1
for match in matches:
print("%d. %s" % (counter, match))
counter += 1
global total_matches
total_matches += 1
print()
for child in root:
traverse_node(child, pattern)
def main(argv):
target = "../data/out/Alma.tei.xml"
if len(argv) > 1:
target = argv[1]
# read xml file as a tree
tree = ET.parse(target, ET.XMLParser(encoding="utf-8"))
root = tree.getroot()
# traverse all nodes recursively and collect pattern matches
global pattern # indicate that we're using pattern as defined at the top of this file
traverse_node(root, pattern)
global total_matches
print("\nTotal matches: %d." % total_matches)
if __name__ == "__main__":
main(sys.argv)
| 3.03125
| 3
|
pymain.py
|
Almasumgazi/download-manager
| 0
|
12782004
|
import requests
from bs4 import BeautifulSoup
import re
import webbrowser
import time
from qbittorrent import Client
movie = input("Enter What You Want To Download : ")
movie_name = movie
if(len(movie.split()) > 1):
movie = movie.split()
movie = '%20'.join(movie)
else:
movie = movie
url = f'https://thepiratebay10.org/search/{movie}/1/99/100,200,300,400,600'
r = requests.get(url)
htmlcontent = r.content
soup = BeautifulSoup(htmlcontent, 'html.parser')
anchors = soup.find_all("a")
all_links = []
all_names = []
all_search_links = []
for link in anchors:
if(link.get('href') != '#'):
linkName = link.get('title')
linkText = link.get('href')
all_links.append(linkText)
if(linkName != None):
all_names.append(linkName)
all_links = set(all_links)
all_links = list(all_links)
subsName = "Details for"
nameFinder = [i for i in all_names if subsName in i]
namelist = []
for name in nameFinder:
if(name.startswith(subsName)):
names = name[len(subsName)+1:]
namelist.append(names)
for index, s in enumerate(namelist):
print(str(index+1)+". "+s)
number_for_download = int(input("Enter number you want to download : "))-1
movie_title = namelist[number_for_download]
print("you're downloading : "+movie_title)
movie_title = movie_title.split()
movie_title = '%20'.join(movie_title)
url_selected = f'https://thepiratebay10.org/search/{movie_title}/1/99/100,200,300,400,600'
req = requests.get(url_selected)
htmlcontents = req.content
soup_selected = BeautifulSoup(htmlcontents, 'html.parser')
anchors_selected = soup_selected.find_all("a")
all_links_selected = []
for link_selected in anchors_selected:
if(link_selected.get('href') != '#'):
linkText_selected = link_selected.get('href')
all_links_selected.append(linkText_selected)
all_links_selected = set(all_links_selected)
all_links_selected = list(all_links_selected)
subs2 = "magnet"
magnet_links2 = [i for i in all_links_selected if subs2 in i]
qb = Client("http://127.0.0.1:3500/")
qb.login("admin", "adminadmin")
magnet_url = magnet_links2[0]
qb.download_from_link(magnet_url)
| 3.09375
| 3
|
skcosmo/linear_model/__init__.py
|
andreanelli/scikit-cosmo
| 16
|
12782005
|
<reponame>andreanelli/scikit-cosmo<gh_stars>10-100
from ._base import OrthogonalRegression
from ._ridge import RidgeRegression2FoldCV
__all__ = ["OrthogonalRegression", "RidgeRegression2FoldCV"]
| 1.15625
| 1
|
drl/envs/wrappers/stateful/abstract.py
|
lucaslingle/pytorch_drl
| 0
|
12782006
|
"""
Abstract wrapper definitions.
"""
from typing import Mapping, Any
import abc
import torch as tc
from drl.envs.wrappers.stateless import Wrapper
class TrainableWrapper(Wrapper, metaclass=abc.ABCMeta):
"""
Wrapper with trainable parameters.
"""
@abc.abstractmethod
def learn(
self, minibatch: Mapping[str, tc.Tensor],
**kwargs: Mapping[str, Any]) -> None:
"""
Args:
minibatch (Mapping[str, torch.Tensor]): Minibatch of experience.
**kwargs (Mapping[str, Any]): Keyword args.
Returns:
None.
"""
# todo(lucaslingle):
# make this return a possibly-empty dictionary of tensorboard metrics.
raise NotImplementedError
| 2.9375
| 3
|
src/pds_doi_service/core/outputs/web_client.py
|
NASA-PDS/doi-service
| 0
|
12782007
|
#
# Copyright 2020–21, by the California Institute of Technology. ALL RIGHTS
# RESERVED. United States Government Sponsorship acknowledged. Any commercial
# use must be negotiated with the Office of Technology Transfer at the
# California Institute of Technology.
#
"""
=============
web_client.py
=============
Contains the abstract base class for interfacing with a DOI submission service
endpoint.
"""
import pprint
from typing import Optional
import requests
from pds_doi_service.core.entities.exceptions import WebRequestException
from pds_doi_service.core.outputs.doi_record import CONTENT_TYPE_XML
from pds_doi_service.core.outputs.web_parser import DOIWebParser
from pds_doi_service.core.util.config_parser import DOIConfigUtil
from requests.auth import HTTPBasicAuth
WEB_METHOD_GET = "GET"
WEB_METHOD_POST = "POST"
WEB_METHOD_PUT = "PUT"
WEB_METHOD_DELETE = "DELETE"
VALID_WEB_METHODS = [WEB_METHOD_GET, WEB_METHOD_POST, WEB_METHOD_PUT, WEB_METHOD_DELETE]
"""Constants for HTTP method types"""
class DOIWebClient:
"""Abstract base class for clients of an HTTP DOI service endpoint"""
_config_util = DOIConfigUtil()
_service_name: Optional[str]
_service_name = None
_web_parser: Optional[DOIWebParser]
_web_parser = None
_content_type_map: dict[str, str] = {}
def _submit_content(self, payload, url, username, password, method=WEB_METHOD_POST, content_type=CONTENT_TYPE_XML):
"""
Submits a payload to a DOI service endpoint via the POST action.
The action taken by the service is determined by the contents of the
payload.
Parameters
----------
payload : str
Payload to submit to the DOI service.
url : str
The URL of the DOI service endpoint.
username : str
The user name to authenticate to the DOI service as.
password : str
The password to authenticate to the DOI service with.
method : str, optional
The HTTP method type to use with the request. Should be one of
GET, POST, PUT or DELETE. Defaults to POST.
content_type : str, optional
The content type to specify the format of the payload, as well as
the format of the response from the endpoint. Defaults to
xml.
Returns
-------
response_text : str
Body of the response text from the endpoint.
"""
if method not in VALID_WEB_METHODS:
raise ValueError(f"Invalid method requested, must be one of {','.join(VALID_WEB_METHODS)}")
if content_type not in self._content_type_map:
raise ValueError(
f"Invalid content type requested, must be one of {','.join(list(self._content_type_map.keys()))}"
)
auth = HTTPBasicAuth(username, password)
headers = {"Accept": self._content_type_map[content_type], "Content-Type": self._content_type_map[content_type]}
response = requests.request(method, url, auth=auth, data=payload, headers=headers)
try:
response.raise_for_status()
except requests.exceptions.HTTPError as http_err:
# Detail text is not always present, which can cause json parsing
# issues
details = f"Details: {pprint.pformat(response.text)}" if response.text else ""
raise WebRequestException(
f"DOI submission request to {self._service_name} service failed, reason: {str(http_err)}\n{details}"
)
return response.text
def submit_content(
self, payload, url=None, username=None, password=<PASSWORD>, method=WEB_METHOD_POST, content_type=CONTENT_TYPE_XML
):
"""
Submits the provided payload to a DOI service endpoint via the POST
action.
Inheritors of DOIWebClient should pull any required endpoint specific
parameters (URL, username, password, etc...) from the configuration
util bundled with the class.
Inheritors should also take the extra step of parsing and returning any
Doi objects from the response text (if request was successful).
Parameters
----------
payload : str
Payload to submit to the DOI service. Should only correspond
to a single DOI record.
url : str, optional
The URL to submit the request to. If not submitted, it is pulled
from the INI config for the appropriate service provider.
username : str, optional
The username to authenticate the request as. If not submitted, it
is pulled from the INI config for the appropriate service provider.
password : str, optional
The password to authenticate the request with. If not submitted, it
is pulled from the INI config for the appropriate service provider.
method : str, optional
The HTTP method type to use with the request. Should be one of
GET, POST, PUT or DELETE. Defaults to POST.
content_type : str, optional
The content type to specify the format of the payload, as well as
the format of the response from the endpoint. Defaults to xml.
Returns
-------
doi : Doi
Doi object parsed from the response text.
response_text : str
Body of the response text.
"""
raise NotImplementedError(
f"Subclasses of {self.__class__.__name__} must provide an implementation for submit_content()"
)
def query_doi(self, query, url=None, username=None, password=<PASSWORD>, content_type=CONTENT_TYPE_XML):
"""
Queries the DOI endpoint for the status of a DOI submission.
The query utilizes the GET HTTP method of the URL endpoint.
Inheritors of DOIWebClient should pull any required endpoint specific
parameters (URL, username, password, etc...) from the configuration
util bundled with the class for optional arguments not provided by
the user.
Parameters
----------
query : dict
Key/value pairs to append as parameters to the URL for the GET
endpoint.
url : str, optional
The URL to submit the request to. If not submitted, it is pulled
from the INI config for the appropriate service provider.
username : str, optional
The username to authenticate the request as. If not submitted, it
is pulled from the INI config for the appropriate service provider.
password : str, optional
The password to authenticate the request with. If not submitted, it
is pulled from the INI config for the appropriate service provider.
content_type : str, optional
The content type to specify the the format of the response from the
endpoint. Defaults to xml.
Returns
-------
response_text : str
Body of the response text from the endpoint.
"""
raise NotImplementedError(
f"Subclasses of {self.__class__.__name__} must provide an implementation for query_doi()"
)
def endpoint_for_doi(self, doi, action):
"""
Returns the proper HTTP verb and URL that form a request endpoint for
the provided DOI object.
Parameters
----------
doi : Doi
The DOI object to determine the endpoint for.
action : str
Name of the action to be performed on the provided Doi object.
Returns
-------
method : str
The HTTP verb to use for the request.
url: str
The URL to use for the request.
"""
raise NotImplementedError(
f"Subclasses of {self.__class__.__name__} must provide an implementation for endpoint_for_doi()"
)
| 2.390625
| 2
|
lectures/solutions/tile_rectify.py
|
ritamonteiroo/scikit
| 453
|
12782008
|
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from skimage import transform
from skimage.transform import estimate_transform
source = np.array([(129, 72),
(302, 76),
(90, 185),
(326, 193)])
target = np.array([[0, 0],
[400, 0],
[0, 400],
[400, 400]])
tf = estimate_transform('projective', source, target)
H = tf.params # in older versions of skimage, this should be
# H = tf._matrix
print(H)
# H = np.array([[ 3.04026872e+00, 1.04929628e+00, -4.67743998e+02],
# [ -1.44134582e-01, 6.23382067e+00, -4.30241727e+02],
# [ 2.63620673e-05, 4.17694527e-03, 1.00000000e+00]])
def rectify(xy):
x = xy[:, 0]
y = xy[:, 1]
# You must fill in your code here.
#
# Handy functions are:
#
# - np.dot (matrix multiplication)
# - np.ones_like (make an array of ones the same shape as another array)
# - np.column_stack
# - A.T -- type .T after a matrix to transpose it
# - x.reshape -- reshapes the array x
# We need to provide the backward mapping
HH = np.linalg.inv(H)
homogeneous_coordinates = np.column_stack([x, y, np.ones_like(x)])
xyz = np.dot(HH, homogeneous_coordinates.T)
# We want one coordinate per row
xyz = xyz.T
# Turn z into a column vector
z = xyz[:, 2]
z = z.reshape([len(z), 1])
xyz = xyz / z
return xyz[:, :2]
image = plt.imread('../../images/chapel_floor.png')
out = transform.warp(image, rectify, output_shape=(400, 400))
f, (ax0, ax1) = plt.subplots(1, 2, figsize=(8, 4))
ax0.imshow(image)
ax1.imshow(out)
plt.show()
| 2.671875
| 3
|
test/test_combine_overlap_stats.py
|
MelbourneGenomics/iq
| 0
|
12782009
|
import StringIO
import unittest
import iq.combine_overlap_stats
class TestCombineOverlapStats(unittest.TestCase):
def test_simple(self):
exons = ['A1CF\t1\t2\t50.00\tALT1,ALT2', 'A2M\t3\t4\t75.00\t']
cds = ['A2M\t5\t6\t83.33\tALT3']
target = StringIO.StringIO()
log = StringIO.StringIO()
iq.combine_overlap_stats.combine(exons, cds, target, log)
lines = target.getvalue().split('\n')
assert len(lines) == 4
assert lines[1] == 'A1CF\t0\t1\t0\t2\t0\t50.00\tALT1,ALT2' # no cds data
assert lines[2] == 'A2M\t5\t3\t6\t4\t83.33\t75.00\tALT3' # data for both
assert lines[3] == ''
def test_case(self):
exons = ['a1CF\t1\t2\t50.00\tALT1,ALT2', 'A2m\t3\t4\t75.00\t']
cds = ['A2M\t5\t6\t83.33\tALT3']
target = StringIO.StringIO()
log = StringIO.StringIO()
iq.combine_overlap_stats.combine(exons, cds, target, log)
lines = target.getvalue().split('\n')
assert len(lines) == 4
assert lines[1] == 'a1CF\t0\t1\t0\t2\t0\t50.00\tALT1,ALT2' # no cds data
assert lines[2] == 'A2m\t5\t3\t6\t4\t83.33\t75.00\tALT3' # data for both
assert lines[3] == ''
if __name__ == '__main__':
unittest.main()
| 2.46875
| 2
|
src/webapp/dummy_data.py
|
janLo/meet-and-eat-registration-system
| 0
|
12782010
|
<reponame>janLo/meet-and-eat-registration-system<filename>src/webapp/dummy_data.py
from math import sqrt
from random import random
import database as db
from database.model import Team, Members, Location
def make_dummy_data(num_teams, confirmed=True):
for idx in range(num_teams):
team = Team(name="Team %d" % idx,
confirmed=confirmed)
db.session.add(team)
for member_idx in range(3):
member = Members(name="Member%d from team%d" % (member_idx, idx),
team=team)
db.session.add(member)
lat_rand = (0.5 - random()) * 0.1
lon_rand = (0.5 - random()) * 0.1
pseudo_dist = sqrt(lat_rand ** 2 + lon_rand ** 2)
lat = 51.0322627 + lat_rand
lon = 13.7071665 + lon_rand
location = Location(street="Teststreet %d" % idx,
zip_no="01217",
extra="",
lat=lat,
lon=lon,
center_distance=pseudo_dist,
team=team)
db.session.add(location)
db.session.commit()
| 2.921875
| 3
|
operators/self_patch.py
|
zhou3968322/dl-lab
| 0
|
12782011
|
<gh_stars>0
# -*- coding:utf-8 -*-
# email:<EMAIL>
# create: 2020/12/17
import torch
import torch.nn as nn
class Selfpatch(object):
def buildAutoencoder(self, target_img, target_img_2, target_img_3, patch_size=1, stride=1):
nDim = 3
assert target_img.dim() == nDim, 'target image must be of dimension 3.'
C = target_img.size(0)
self.Tensor = torch.cuda.FloatTensor if torch.cuda.is_available else torch.Tensor
patches_features = self._extract_patches(target_img, patch_size, stride)
patches_features_f = self._extract_patches(target_img_3, patch_size, stride)
patches_on = self._extract_patches(target_img_2, 1, stride)
return patches_features_f, patches_features, patches_on
def build(self, target_img, patch_size=5, stride=1):
nDim = 3
assert target_img.dim() == nDim, 'target image must be of dimension 3.'
C = target_img.size(0)
self.Tensor = torch.cuda.FloatTensor if torch.cuda.is_available else torch.Tensor
patches_features = self._extract_patches(target_img, patch_size, stride)
return patches_features
def _build(self, patch_size, stride, C, target_patches, npatches, normalize, interpolate, type):
# for each patch, divide by its L2 norm.
if type == 1:
enc_patches = target_patches.clone()
for i in range(npatches):
enc_patches[i] = enc_patches[i]*(1/(enc_patches[i].norm(2)+1e-8))
conv_enc = nn.Conv2d(npatches, npatches, kernel_size=1, stride=stride, bias=False, groups=npatches)
conv_enc.weight.data = enc_patches
return conv_enc
# normalize is not needed, it doesn't change the result!
if normalize:
raise NotImplementedError
if interpolate:
raise NotImplementedError
else:
conv_dec = nn.ConvTranspose2d(npatches, C, kernel_size=patch_size, stride=stride, bias=False)
conv_dec.weight.data = target_patches
return conv_dec
def _extract_patches(self, img, patch_size, stride):
n_dim = 3
assert img.dim() == n_dim, 'image must be of dimension 3.'
kH, kW = patch_size, patch_size
dH, dW = stride, stride
input_windows = img.unfold(1, kH, dH).unfold(2, kW, dW)
i_1, i_2, i_3, i_4, i_5 = input_windows.size(0), input_windows.size(1), input_windows.size(2), input_windows.size(3), input_windows.size(4)
input_windows = input_windows.permute(1,2,0,3,4).contiguous().view(i_2*i_3, i_1, i_4, i_5)
patches_all = input_windows
return patches_all
| 2.1875
| 2
|
mmseg/datasets/pipelines/transforms_new.py
|
shinianzhihou/ChangeDetection
| 95
|
12782012
|
<reponame>shinianzhihou/ChangeDetection
import albumentations as A
from albumentations import DualTransform
from albumentations.pytorch import ToTensorV2
from numpy import random
from ..builder import PIPELINES
PIPELINES.register_module(module=ToTensorV2)
| 1.132813
| 1
|
every_election/apps/elections/migrations/0048_seed_status.py
|
DemocracyClub/EveryElection
| 8
|
12782013
|
<reponame>DemocracyClub/EveryElection<filename>every_election/apps/elections/migrations/0048_seed_status.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def load_init_data(apps, schema_editor):
ModerationStatus = apps.get_model("elections", "ModerationStatus")
recs = [
ModerationStatus(
short_label="Suggested", long_label="Suggested by an anonymous user"
),
ModerationStatus(short_label="Rejected", long_label="Rejected by a moderator"),
ModerationStatus(short_label="Approved", long_label="Approved by a moderator"),
ModerationStatus(
short_label="Deleted", long_label="Deleted (because it was added in error)"
),
]
ModerationStatus.objects.bulk_create(recs)
def delete_init_data(apps, schema_editor):
ModerationStatus = apps.get_model("elections", "ModerationStatus")
ModerationStatus.objects.all().delete()
class Migration(migrations.Migration):
dependencies = [("elections", "0047_auto_20181005_1320")]
operations = [migrations.RunPython(load_init_data, delete_init_data)]
| 1.953125
| 2
|
src/day006.py
|
zhangxinyong12/my-python-demo
| 0
|
12782014
|
# #
# def func():
# n = 0
# while True:
# n += 1
# yield n # yield = return + 暂停
#
#
# # g = func()
# # print(g)
# # print(g.__next__())
# # print(next(g))
#
#
# def fid(length):
# a, b = 0, 1
# n = 0
# while n < length:
# yield b
# a, b = b, a + b
# n += 1
# return '结束'
#
#
# g = fid(8)
# print(next(g))
# print(next(g))
# print(next(g))
# print(next(g))
# print(next(g))
# print(next(g))
# print(next(g))
# print(next(g))
# print(next(g))
# print(next(g))
# print(next(g))
# print(next(g))
# print(next(g))
# print(next(g))
# print(next(g))
# print(next(g))
# def gen():
# i = 0
# while i < 5:
# temp = yield i
# print('temp=', temp)
# i += 1
# return '没有更多'
#
#
# g = gen()
# g.send(None)
# n1 = g.send('abc')
# print(n1)
# n2 = g.send('erdf')
# print(n2)
# 进程 > 线程 > 协程
#
# def task1(n):
# for i in range(n):
# print('正在搬第{}块砖'.format(i))
# yield
#
#
# def task2(n):
# for i in range(n):
# print('这么着听第{}首有音乐'.format(i))
# yield
#
#
# g1 = task1(10)
# g2 = task2(5)
#
# while True:
# try:
# next(g1)
# next(g2)
# except:
# break
# 可迭代的对象
# 生成器
# 元组
# 列表
# 集合
# 字典
# 字符串
from collections.abc import Iterable
list1 = [1, 2, 3, 4]
print('list1', isinstance(list1, Iterable))
str1 = '1111'
print('str1', isinstance(str1, Iterable))
g = (x for x in range(10))
print('g', isinstance(g, Iterable))
# 迭代器
''''
迭代器
'''
list1 = iter(list1)
print(next(list1))
# p 142
| 3.859375
| 4
|
nvrtc/enums.py
|
uchytilc/PyCu
| 0
|
12782015
|
<reponame>uchytilc/PyCu
from ctypes import c_int
enum = c_int
nvrtcResult = enum
NVRTC_SUCCESS = nvrtcResult(0)
NVRTC_ERROR_OUT_OF_MEMORY = nvrtcResult(1)
NVRTC_ERROR_PROGRAM_CREATION_FAILURE = nvrtcResult(2)
NVRTC_ERROR_INVALID_INPUT = nvrtcResult(3)
NVRTC_ERROR_INVALID_PROGRAM = nvrtcResult(4)
NVRTC_ERROR_INVALID_OPTION = nvrtcResult(5)
NVRTC_ERROR_COMPILATION = nvrtcResult(6)
NVRTC_ERROR_BUILTIN_OPERATION_FAILURE = nvrtcResult(7)
NVRTC_ERROR_NO_NAME_EXPRESSIONS_AFTER_COMPILATION = nvrtcResult(8)
NVRTC_ERROR_NO_LOWERED_NAMES_BEFORE_COMPILATION = nvrtcResult(9)
NVRTC_ERROR_NAME_EXPRESSION_NOT_VALID = nvrtcResult(10)
NVRTC_ERROR_INTERNAL_ERROR = nvrtcResult(11)
| 1.929688
| 2
|
pyga/termination_condition/stagnation.py
|
Eyjafjallajokull/pyga
| 0
|
12782016
|
<gh_stars>0
from .termination_condition import TerminationCondition
class Stagnation(TerminationCondition):
"""
Implements TerminationCondition to terminate evolution when there is no increase of best fitness value.
:param max_fitness_age: int number of generations after which to terminate
:param use_average: bool when False - measuring best fitness, when False - measuring average population fitness
"""
def __init__(self, max_fitness_age, use_average=False):
self.use_average = use_average
self.max_fitness_age = max_fitness_age
self.best_fitness = None
self.best_fitness_age = None
def should_terminate(self, population):
"""
Executed by EvolutionEngine to determine when to end process. Returns True if evolution process should be
terminated.
Returns True when populations best fitness did't change for subsequent max_fitness_age generations.
Note: this method has internal counter of generations, each function call increases counter by one.
:param population: Population
:return: boolean
"""
current_best_fitness = population.get_best().fitness
if self.best_fitness is None or self.best_fitness != current_best_fitness:
self.best_fitness = current_best_fitness
self.best_fitness_age = 1
else:
self.best_fitness_age += 1
return self.max_fitness_age <= self.best_fitness_age
| 3.484375
| 3
|
therminator/sensors/pi.py
|
jparker/therminator_client
| 0
|
12782017
|
<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
import re
import time
def read(file='/sys/class/thermal/thermal_zone0/temp'):
"""Return the internal temperature.
Keyword arguments:
file -- path to kernal interface file for internal temperature
The default value for file, /sys/class/thermal/thermal_zone0/temp, is a
safe bet for a device as simple as a Raspberry Pi. On a device with
multiple internal temperature zones, you can consult the type file in the
same directory to identify the correct zone.
"""
logger = logging.getLogger(__name__)
logger.debug('Started reading sensor')
t1 = time.time()
with open(file) as f:
temp = float(f.read()) / 1000
t2 = time.time()
logger.info('int_temp={:.1f}C'.format(temp))
logger.debug('Finished reading sensor ({:.1f}s)'.format(t2-t1))
return temp
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-F', '--fahrenheit',
action='store_const',
const=lambda x: '{:.1f}°F'.format(x * 9/5 + 32),
default=lambda x: '{:.1f}°C'.format(x),
dest='convert',
help='Convert results to °F (default: °C)')
parser.add_argument('-f', '--file',
default='/sys/class/thermal/thermal_zone0/temp',
help='Path to kernel interface to CPU temperature' \
' (default: /sys/class/thermal/thermal_zone0/temp)')
args = parser.parse_args()
temp = read(args.file)
print('temp={}'.format(args.convert(temp)))
| 3.25
| 3
|
helper/main_class.py
|
AVM-Martin/CTF-EncryptionAlgorithm
| 0
|
12782018
|
class Cryptography():
def __init__(self, key):
self.key = key
def encrypt(self, text):
return text
def decrypt(self, text):
return text
def get_key(self):
return self.key
| 3.125
| 3
|
tests/test_twoChoice.py
|
SebastianAllmeier/rmf_tool
| 1
|
12782019
|
"""
Test that computes the refined mean field approximation for the two-choice model
(with order 1 and 2 and a few parameter)
Compare the computed value with a value already stored in a pickle file
"""
import pickle
import numpy as np
from approximately_equal import approximately_equal
import os
PWD=os.getcwd()
if PWD[-5:] == 'tests':
CACHE_DIR = 'output_tests'
else:
CACHE_DIR = 'tests/output_tests'
import sys
sys.path.append('../')
sys.path.append('.')
import src.rmf_tool as rmf
def dChoiceModel(K, rho, d):
ddpp = rmf.DDPP()
# The vector 'e(i)' is a vector where the $i$th coordinate is equal to $1$ (the other being equal to $0$)
def e(i):
l = np.zeros(K)
l[i] = 1
return l
# We then add the transitions :
for i in range(K):
if i >= 1:
ddpp.add_transition(e(i),eval('lambda x: {}*(x[{}]**{} - x[{}]**{} )'.format(rho, i-1, d, i, d)))
if i < K-1:
ddpp.add_transition(-e(i),eval('lambda x: (x[{}] - x[{}])'.format(i,i+1) ))
ddpp.add_transition(e(0), lambda x : eval('{}*(1-x[0]**{})'.format(rho,d)))
ddpp.add_transition(-e(K-1), lambda x : x[K-1])
ddpp.set_initial_state(e(0))
return ddpp
def generate_data():
"""
Generate all data and store them in a pickle file
(to be used one times when the test is initialized)
"""
data = dict([])
for rho in [0.6, 0.7, 0.8, 0.9]:
for d in [2, 3]:
for K in [5, 9, 15, 20]:
for order in ([1, 2] if K <= 5 else [1]):
ddpp = dChoiceModel(K, rho, d)
data[(K, rho, d, order)] = ddpp.meanFieldExpansionSteadyState(order=order)
with open('{}/d_choice.pickle'.format(CACHE_DIR), 'wb') as f:
# Pickle the 'data' dictionary using the highest protocol available.
pickle.dump(data, f, pickle.HIGHEST_PROTOCOL)
def test_two_choice():
"""
Compare the new data with previously computed data.
"""
with open('{}/d_choice.pickle'.format(CACHE_DIR), 'rb') as f:
# The protocol version used is detected automatically, so we do not
# have to specify it.
data = pickle.load(f)
for key in data:
(K,rho,d,order) = key
print(key)
ddpp = dChoiceModel(K, rho, d)
new_data = ddpp.meanFieldExpansionSteadyState(order=order)
test_data = data[key]
assert approximately_equal(new_data, test_data) <= 1e-8
#generate_data()
#test_two_choice()
| 2.84375
| 3
|
LanguageConstructs/DataModel/Inheritance/main.py
|
ha-khan/PythonPractice
| 0
|
12782020
|
# id()
# hash()
# __mro__
# __basis__
# __name__
# __class__
class Orchestrator:
# class attribute; global to
instance_counter = 0
def __init__(self, type: str) -> None:
# need to use Orchestrator class instance attribute selector
# to access class variables, in this scope
cls = self.__class__
cls.instance_counter += 1
# instance attribute
self.type = type
pass
def check_state(self):
print("checking state ...")
print("state check ok.")
if not True:
raise Exception
@classmethod
def get_instance_count(cls) -> int:
return cls.instance_counter
class VMOrchestrator(Orchestrator):
"""
sub class from base orchestrator to orchestrate resources
"""
def __init__(self) -> None:
"""
constructor
"""
print("init VMOrchestrator")
# user super() to access direct parent class object
# call its __init__, after this class's __init__ is finished executing
# method resolution order (__mro__)
super().__init__("VM")
def check_state(self):
print("checking VM state ...")
# return super().check_state()
def request_vm(self):
try:
self.check_state
except:
print("unable to request vm")
def __del__(self):
"""
destructor
"""
print("releasing vm")
@staticmethod
def invoke(*args, **kwargs):
for arg in args:
print("invoke({})".format(arg))
def main():
# Calls constructor; creates an instance object where we reference it with the label r
r = VMOrchestrator()
# pointer to object's class object
# creates an instance object; not assigned to any label
# since no reference will have destructor called
r.__class__()
# access class method by traversing up method resolution chain of VMOrchestrator class object
print(VMOrchestrator.__mro__[1].__dict__['instance_counter'])
VMOrchestrator.invoke(1, 2, 3, 4)
r.invoke()
# Calls direct base class's check_state method
r.request_vm()
assert VMOrchestrator.__name__ == 'VMOrchestrator'
print(VMOrchestrator.get_instance_count())
print(isinstance(r, VMOrchestrator))
print(issubclass(VMOrchestrator, Orchestrator))
# Calls destructor
del r
if __name__ == '__main__':
main()
| 3.046875
| 3
|
selenium_util.py
|
dertilo/selenium-scraping
| 0
|
12782021
|
import os
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
def build_chrome_driver(download_dir: str, headless=True,window_size=(1920,1080)):
os.makedirs(download_dir, exist_ok=True)
options = webdriver.ChromeOptions()
if headless:
options.add_argument("headless")
w,h = window_size
options.add_argument(f"--window-size={w},{h}")
# driver.execute_script("document.body.style.zoom='80 %'")
prefs = {
"download.default_directory": download_dir,
"plugins.always_open_pdf_externally": True, # don't open pdfs in browser but instead download them
}
options.add_experimental_option("prefs", prefs)
driver = webdriver.Chrome(
r"/usr/bin/chromedriver", chrome_options=options
) # provide the chromedriver execution path in case of error
driver.implicitly_wait(10) # seconds
return driver
def enter_keyboard_input(wd, xpath: str, value: str, clear_it=False,press_enter=False):
# wait = WebDriverWait(wd, 10)
# wait.until(EC.presence_of_element_located((By.xpath(value), "content")))
e = wd.find_element_by_xpath(xpath)
if clear_it:
e.clear()
e.send_keys(value)
if press_enter:
e.send_keys(Keys.ENTER)
def click_it(wd, xpath):
element = wd.find_element_by_xpath(xpath)
element.click()
| 2.875
| 3
|
src/training/models/MainTrainCNN.py
|
rogov-dvp/medical-imaging-matching
| 1
|
12782022
|
import time
import numpy as np
from sklearn.model_selection import train_test_split
from keras.optimizers import Adam
from keras.utils import plot_model
from CNNTripletModel import build_network, build_model
from BatchBuilder import get_batch_random_demo
input_shape = (28, 28, 1)
evaluate_every = 5
n_val = 5
batch_size = 20
data = np.load("/Users/niklastecklenburg/Desktop/Test/Data/images.npy")
labels = np.load("/Users/niklastecklenburg/Desktop/Test/Data/labels.npy")
data_train, data_test, labels_train, labels_test = train_test_split(
data, labels, test_size=0.2, random_state=42
)
network = build_network(input_shape, embeddingsize=10)
network_train = build_model(input_shape, network)
optimizer = Adam(lr=0.00006)
network_train.compile(loss=None, optimizer=optimizer)
network_train.summary()
plot_model(
network_train, show_shapes=True, show_layer_names=True, to_file="02 model.png"
)
print(network_train.metrics_names)
network_train.load_weights("mnist-160k_weights.h5")
t_start = time.time()
n_iteration = 0
for i in range(30):
# triplets = get_batch_hard(200,16,16,network)
triplets = get_batch_random_demo(data_train, labels_train, batch_size)
loss = network_train.train_on_batch(triplets, None)
print(loss)
# n_iteration += 1
# if i % evaluate_every == 0:
# print("\n ------------- \n")
# print("[{3}] Time for {0} iterations: {1:.1f} mins, Train Loss: {2}".format(i, (time.time()-t_start)/60.0,loss,n_iteration))
# probs,yprob = compute_probs(network,test_images[:n_val,:,:,:],y_test_origin[:n_val])
| 2.625
| 3
|
main.py
|
Dixneuf19/dank-face-bot
| 0
|
12782023
|
<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Simple Bot to reply to Telegram messages.
This program is dedicated to the public domain under the CC0 license.
This Bot uses the Updater class to handle the bot.
First, a few handler functions are defined. Then, those functions are passed to
the Dispatcher and registered at their respective places.
Then, the bot is started and runs until we press Ctrl-C on the command line.
Usage:
Basic Echobot example, repeats messages.
Press Ctrl-C on the command line or send a signal to the process to stop the
bot.
"""
import os
import time
from random import randint
import requests
from dotenv import load_dotenv
from loguru import logger
from path import Path
from requests.exceptions import HTTPError
from telegram import ForceReply, Update
from telegram.ext import (
CallbackContext,
CommandHandler,
Filters,
MessageHandler,
Updater,
)
load_dotenv()
TELEGRAM_TOKEN = os.getenv("TELEGRAM_TOKEN")
INSULT_JMK_ADDRESS = f'{os.getenv("INSULT_JMK_HOST", default="localhost")}:{os.getenv("INSULT_JMK_PORT", default="80")}'
FUZZY_OCTO_DISCO_ADDRESS = f'{os.getenv("FUZZY_OCTO_DISCO_HOST", default="http://localhost")}:{os.getenv("FUZZY_OCTO_DISCO_PORT", default="80")}'
FIND_FACES_PIC_FOLDER = os.getenv("DOWNLOAD_FOLDER", default="/tmp/pics")
DEFAULT_TIMEOUT = int(os.getenv("DEFAULT_TIMEOUT", default="20"))
HONK_FILE_ID = os.getenv(
"HONK_FILE_ID",
"CQACAgQAAxkBAAICGmHI3e4vqCQLZctOeQsR7iStMr0VAAL9CQACZUZJUr9MlakXmVAzIwQ",
)
# Define a few command handlers. These usually take the two arguments bot and
# update. Error handlers also receive the raised TelegramError object in error.
def start(update: Update, context: CallbackContext):
"""Send a message when the command /start is issued."""
logger.info("Received /start command from %s" % update.message.from_user.username)
update.message.reply_text("Hi!")
def help(update: Update, context: CallbackContext):
"""Send a message when the command /help is issued."""
logger.info("Received /help command from %s" % update.message.from_user.username)
update.message.reply_text("DANK FACE BOT")
def echo(update: Update, context: CallbackContext):
"""Echo the user message."""
update.message.reply_text(update.message.text)
# def insult_jmk(update: Update, context: CallbackContext, args=[], groups=("",)):
# """insult jmk"""
# logger.info(
# f"Received an insult request from '{update.message.from_user.name}' in chat '{update.message.chat.title}'"
# )
# name = groups[0]
# if len(args) > 0:
# name = args[0]
# logger.info("Received /insult command from %s" % update.message.from_user.username)
# insult = insult_jmk_client.get_insult(INSULT_JMK_ADDRESS, name)
# logger.info("Replied '%s' to '%s'" % (insult, update.message.text))
# update.message.reply_text(insult)
def bonne_annee(update: Update, context: CallbackContext):
from_user = update.message.from_user.first_name
update.message.reply_text(
"🎉🎉🎉\nBonne année %s !\n🥂🥂🥂\nDoot doot spam !\n🎊🎊🎊" % from_user
)
def honk(update: Update, context: CallbackContext):
logger.info(
f"Received an honk request from '{update.message.from_user.name}' in chat '{update.message.chat.title}'"
)
update.message.reply_audio(
audio=HONK_FILE_ID,
caption="HONK! HONK! HONK!",
)
def dank_face(update: Update, context: CallbackContext):
"""Send you back your image."""
logger.info(
f"Received a image from '{update.message.from_user.name}' in chat '{update.message.chat.title}'"
)
try:
newPhoto = update.message.photo[-1].get_file()
fileName = newPhoto.file_id + ".jpg"
filePath = Path(FIND_FACES_PIC_FOLDER) / Path(fileName)
newPhoto.download(filePath.abspath())
logger.info("Picture saved at %s" % filePath)
try:
res = requests.get(
f"{FUZZY_OCTO_DISCO_ADDRESS}/faces", params={"pic_path": filePath}
)
res.raise_for_status()
result = res.json()
if result["status"] == "SUCCESS":
logger.info("Found %d faces" % result["nbFaces"])
for i in range(int(result["nbFaces"])):
try:
# TODO: send as an album https://python-telegram-bot.readthedocs.io/en/stable/telegram.bot.html?highlight=album#telegram.Bot.send_media_group
update.message.reply_photo(
photo=open(result["paths"][i], "rb"),
timeout=DEFAULT_TIMEOUT,
)
except Exception as error:
logger.warning("Failed to send face %d : %s" % (i, error))
pass
finally:
try:
# Remove the file
Path(result["paths"][i]).remove_p()
except Exception as error:
logger.debug("Failed to remove face %d : %s" % (i, error))
pass
elif result["status"] in ("NO_FACE_FOUND", "FAILED_ALL_FACES"):
dog_number = randint(1, 43)
update.message.reply_photo(
photo=open(f"./amazon_dogs/{dog_number}.-TTD-c.jpg", "rb"),
caption="Sorry, didn't find any faces 😢",
timeout=DEFAULT_TIMEOUT,
)
else:
logger.warning(
f"Received {result['status']} from fuzzy-octo-disco: {result['message']}"
)
except HTTPError as e:
logger.error(e)
raise e
finally:
filePath.remove_p()
except Exception as error:
logger.error("Error in dank_face: %s" % error)
if "Not enough rights to send photos to the chat" in str(error):
update.message.reply("Give me right to send photos or kick me!")
else:
raise error
def error_handler(update: object, context: CallbackContext):
"""Log Errors caused by Updates."""
logger.info("Entered in error function")
logger.warning(f"Update {update} caused error {context.error}")
def main():
"""Start the bot."""
# Create the EventHandler and pass it your bot's token.
updater = Updater(TELEGRAM_TOKEN)
# Get the dispatcher to register handlers
dp = updater.dispatcher
# on different commands - answer in Telegram
dp.add_handler(CommandHandler("start", start))
dp.add_handler(CommandHandler("help", help))
# dp.add_handler(CommandHandler("insult", insult_jmk, pass_args=True))
dp.add_handler(CommandHandler("honk", honk))
dp.add_handler(MessageHandler(Filters.photo, dank_face))
# dp.add_handler(RegexHandler("(?i)(jmk|jean michel|gaston|jeanmich|jean-mich)", insult_jmk, pass_groups=True))
# on noncommand i.e message - echo the message on Telegram
# dp.add_handler(MessageHandler(Filters.text, bonne_annee))
# log all errors
dp.add_error_handler(error_handler)
# Start the Bot
updater.start_polling()
# Run the bot until you press Ctrl-C or the process receives SIGINT,
# SIGTERM or SIGABRT. This should be used most of the time, since
# start_polling() is non-blocking and will stop the bot gracefully.
logger.info("Dank Face Bot is launched !")
updater.idle()
logger.info("Dank Face Bot stopped")
if __name__ == "__main__":
main()
| 3.15625
| 3
|
students/K33421/Golub_Anna/LR_3/library/library_project/library_app/views.py
|
aytakr/ITMO_ICT_WebDevelopment_2021-2022
| 7
|
12782024
|
from django.db.models import Sum
from .serializers import *
from rest_framework.generics import *
class BookListAPIView(ListAPIView):
serializer_class = BookSerializer
queryset = Book.objects.all()
class BookCreateAPIView(CreateAPIView):
serializer_class = BookSerializer
queryset = Book.objects.all()
class BookRetrieveUpdateDestroyAPIView(RetrieveUpdateDestroyAPIView):
serializer_class = BookSerializer
queryset = Book.objects.all()
class BookRetrieveAPIView(RetrieveAPIView):
serializer_class = BookRetrieveSerializer
queryset = Book.objects.all()
class ReaderListAPIView(ListAPIView):
serializer_class = ReaderSerializer
queryset = Reader.objects.all()
class ReaderCreateAPIView(CreateAPIView):
serializer_class = ReaderSerializer
queryset = Reader.objects.all()
class ReaderRetrieveUpdateDestroyAPIView(RetrieveUpdateDestroyAPIView):
serializer_class = ReaderSerializer
queryset = Reader.objects.all()
class ReaderRetrieveAPIView(RetrieveAPIView):
serializer_class = ReaderRetrieveSerializer
queryset = Reader.objects.all()
| 2.046875
| 2
|
src/utils.py
|
OsciiArt/Freesound-Audio-Tagging-2019
| 22
|
12782025
|
import numpy as np
from torch.optim.lr_scheduler import _LRScheduler
from torch.utils.data.dataset import Dataset
from math import cos, pi
import librosa
from scipy.io import wavfile
import random
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def cycle(iterable):
"""
convert dataloader to iterator
:param iterable:
:return:
"""
while True:
for x in iterable:
yield x
class CosineLR(_LRScheduler):
"""cosine annealing.
"""
def __init__(self, optimizer, step_size_min=1e-5, t0=100, tmult=2, curr_epoch=-1, last_epoch=-1):
self.step_size_min = step_size_min
self.t0 = t0
self.tmult = tmult
self.epochs_since_restart = curr_epoch
super(CosineLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
self.epochs_since_restart += 1
if self.epochs_since_restart > self.t0:
self.t0 *= self.tmult
self.epochs_since_restart = 0
lrs = [self.step_size_min + (
0.5 * (base_lr - self.step_size_min) * (1 + cos(self.epochs_since_restart * pi / self.t0)))
for base_lr in self.base_lrs]
return lrs
class MelDataset(Dataset):
def __init__(self, X, y, crop=-1,
mixup=False, freqmask=False, gain=False,
crop_mode='original',crop_rate=0.25
):
self.X= X
self.y= y
self.crop = crop
self.mixup = mixup
self.freqmask = freqmask
self.gain = gain
self.crop_mode = crop_mode
self.crop_rate = crop_rate
def do_additional_crop(self, img):
len_img = img.shape[1]
img_new = np.zeros([img.shape[0], self.crop], np.float32)
rate = np.random.random() * (1 - self.crop_rate) + self.crop_rate
if np.random.random() < 0.5: rate = 1
if img.shape[1] <= self.crop:
len_crop = int(img.shape[1] * rate)
if img.shape[1] - len_crop == 0:
shift_crop = 0
else:
shift_crop = np.random.randint(0, img.shape[1] - len_crop)
img = img[:, shift_crop:shift_crop + len_crop]
if self.crop - len_crop == 0:
shift = 0
else:
shift = np.random.randint(0, self.crop - len_crop)
img_new[:, shift:shift + len_crop] = img
else:
shift = np.random.randint(0, img.shape[1] - self.crop)
img_new = img[:, shift:shift + self.crop]
len_crop = int(self.crop * rate)
if self.crop - len_crop == 0:
shift_crop = 0
else:
shift_crop = np.random.randint(0, self.crop - len_crop)
img_new[:shift_crop] = 0
img_new[shift_crop + len_crop:] = 0
return img_new
def do_random_crop(self, img):
img_new = np.zeros([img.shape[0], self.crop], np.float32)
if img.shape[1] < self.crop:
shift = np.random.randint(0, self.crop - img.shape[1])
img_new[:, shift:shift + img.shape[1]] = img
elif img.shape[1] == self.crop:
img_new = img
else:
shift = np.random.randint(0, img.shape[1] - self.crop)
img_new = img[:, shift:shift + self.crop]
return img_new
def do_crop(self, img):
if self.crop_mode == 'random':
return self.do_random_crop(img)
elif self.crop_mode == 'additional':
return self.do_additional_crop(img)
elif self.crop_mode == 'original':
return img
def do_mixup(self, img, label, alpha=1.):
idx = np.random.randint(0, len(self.X))
img2 = np.load("{}.npy".format(self.X[idx][:-4]))
img2 = self.do_crop(img2)
label2 = self.y[idx].astype(np.float32)
rate = np.random.beta(alpha, alpha)
img = img * rate + img2 * (1 - rate)
label = label * rate + label2 * (1 - rate)
return img, label
def do_freqmask(self, img, max=32):
coord = np.random.randint(0, img.shape[0])
width = np.random.randint(8, max)
cut = np.array([coord - width, coord + width])
cut = np.clip(cut, 0, img.shape[0])
img[cut[0]:cut[1]] = 0
return img
def do_gain(self, img, max=0.1):
rate = 1 - max + np.random.random() * max * 2
return img * rate
def __getitem__(self, index):
img = np.load("{}.npy".format(self.X[index][:-4]))
img = self.do_crop(img)
label = self.y[index].astype(np.float32)
if self.mixup and np.random.random() < 0.5:
img, label = self.do_mixup(img, label)
if self.gain and np.random.random() < 0.5:
img = self.do_gain(img)
if self.freqmask and np.random.random() < 0.5:
img = self.do_freqmask(img)
img = librosa.power_to_db(img)
img = (img - img.mean()) / (img.std() + 1e-7)
img = img.reshape([1, img.shape[0], img.shape[1]])
return img, label
def __len__(self):
return len(self.X)
def compute_gain(sound, fs, min_db=-80.0, mode='RMSE'):
if fs == 16000:
n_fft = 2048
elif fs == 44100:
n_fft = 4096
else:
raise Exception('Invalid fs {}'.format(fs))
stride = n_fft // 2
gain = []
for i in range(0, len(sound) - n_fft + 1, stride):
if mode == 'RMSE':
g = np.mean(sound[i: i + n_fft] ** 2)
elif mode == 'A_weighting':
spec = np.fft.rfft(np.hanning(n_fft + 1)[:-1] * sound[i: i + n_fft])
power_spec = np.abs(spec) ** 2
a_weighted_spec = power_spec * np.power(10, a_weight(fs, n_fft) / 10)
g = np.sum(a_weighted_spec)
else:
raise Exception('Invalid mode {}'.format(mode))
gain.append(g)
gain = np.array(gain)
gain = np.maximum(gain, np.power(10, min_db / 10))
gain_db = 10 * np.log10(gain)
return gain_db
def mix(sound1, sound2, r, fs):
gain1 = np.max(compute_gain(sound1, fs)) # Decibel
gain2 = np.max(compute_gain(sound2, fs))
t = 1.0 / (1 + np.power(10, (gain1 - gain2) / 20.) * (1 - r) / r)
sound = ((sound1 * t + sound2 * (1 - t)) / np.sqrt(t ** 2 + (1 - t) ** 2))
sound = sound.astype(np.float32)
return sound
class WaveDataset(Dataset):
def __init__(self, X, y,
crop=-1, crop_mode='original', padding=0,
mixup=False, scaling=-1, gain=-1,
fs=44100,
):
self.X = X
self.y = y
self.crop = crop
self.crop_mode = crop_mode
self.padding = padding
self.mixup = mixup
self.scaling = scaling
self.gain = gain
self.fs = fs
def preprocess(self, sound):
for f in self.preprocess_funcs:
sound = f(sound)
return sound
def do_padding(self, snd):
snd_new = np.pad(snd, self.padding, 'constant')
return snd_new
def do_crop(self, snd):
if self.crop_mode=='random':
shift = np.random.randint(0, snd.shape[0] - self.crop)
snd_new = snd[shift:shift + self.crop]
else:
snd_new = snd
return snd_new
def do_gain(self, snd):
snd_new = snd * np.power(10, random.uniform(-self.gain, self.gain) / 20.0)
return snd_new
def do_scaling(self, snd, interpolate='Nearest'):
scale = np.power(self.scaling, random.uniform(-1, 1))
output_size = int(len(snd) * scale)
ref = np.arange(output_size) / scale
if interpolate == 'Linear':
ref1 = ref.astype(np.int32)
ref2 = np.minimum(ref1+1, len(snd)-1)
r = ref - ref1
snd_new = snd[ref1] * (1-r) + snd[ref2] * r
elif interpolate == 'Nearest':
snd_new = snd[ref.astype(np.int32)]
else:
raise Exception('Invalid interpolation mode {}'.format(interpolate))
return snd_new
def do_mixup(self, snd, label, alpha=1):
idx2 = np.random.randint(0, len(self.X))
_, snd2 = wavfile.read("{}".format(self.X[idx2]))
label2 = self.y[idx2].astype(np.float32)
if self.scaling!=-1:
snd2 = self.do_scaling(snd2)
snd2 = self.do_padding(snd2)
snd2 = self.do_crop(snd2)
rate = np.random.beta(alpha, alpha)
snd_new = mix(snd, snd, rate, self.fs)
label_new = label * rate + label2 * (1 - rate)
return snd_new, label_new
def __getitem__(self, index):
_, snd = wavfile.read("{}".format(self.X[index]))
label = self.y[index].astype(np.float32)
if self.scaling!=-1:
snd = self.do_scaling(snd)
snd = self.do_padding(snd)
snd = self.do_crop(snd)
if self.mixup:
snd, label = self.do_mixup(snd, label)
if self.gain!=-1:
snd = self.do_gain(snd)
snd = snd.reshape([1, 1, -1]).astype(np.float32) / 32768.0
return snd, label
def __len__(self):
return len(self.X)
def _one_sample_positive_class_precisions(scores, truth):
"""Calculate precisions for each true class for a single sample.
Args:
scores: np.array of (num_classes,) giving the individual classifier scores.
truth: np.array of (num_classes,) bools indicating which classes are true.
Returns:
pos_class_indices: np.array of indices of the true classes for this sample.
pos_class_precisions: np.array of precisions corresponding to each of those
classes.
"""
num_classes = scores.shape[0]
pos_class_indices = np.flatnonzero(truth > 0)
# Only calculate precisions if there are some true classes.
if not len(pos_class_indices):
return pos_class_indices, np.zeros(0)
# Retrieval list of classes for this sample.
retrieved_classes = np.argsort(scores)[::-1]
# class_rankings[top_scoring_class_index] == 0 etc.
class_rankings = np.zeros(num_classes, dtype=np.int)
class_rankings[retrieved_classes] = range(num_classes)
# Which of these is a true label?
retrieved_class_true = np.zeros(num_classes, dtype=np.bool)
retrieved_class_true[class_rankings[pos_class_indices]] = True
# Num hits for every truncated retrieval list.
retrieved_cumulative_hits = np.cumsum(retrieved_class_true)
# Precision of retrieval list truncated at each hit, in order of pos_labels.
precision_at_hits = (
retrieved_cumulative_hits[class_rankings[pos_class_indices]] /
(1 + class_rankings[pos_class_indices].astype(np.float)))
return pos_class_indices, precision_at_hits
# All-in-one calculation of per-class lwlrap.
def calculate_per_class_lwlrap(truth, scores):
"""Calculate label-weighted label-ranking average precision.
Arguments:
truth: np.array of (num_samples, num_classes) giving boolean ground-truth
of presence of that class in that sample.
scores: np.array of (num_samples, num_classes) giving the classifier-under-
test's real-valued score for each class for each sample.
Returns:
per_class_lwlrap: np.array of (num_classes,) giving the lwlrap for each
class.
weight_per_class: np.array of (num_classes,) giving the prior of each
class within the truth labels. Then the overall unbalanced lwlrap is
simply np.sum(per_class_lwlrap * weight_per_class)
"""
assert truth.shape == scores.shape
num_samples, num_classes = scores.shape
# Space to store a distinct precision value for each class on each sample.
# Only the classes that are true for each sample will be filled in.
precisions_for_samples_by_classes = np.zeros((num_samples, num_classes))
for sample_num in range(num_samples):
pos_class_indices, precision_at_hits = (
_one_sample_positive_class_precisions(scores[sample_num, :],
truth[sample_num, :]))
precisions_for_samples_by_classes[sample_num, pos_class_indices] = (
precision_at_hits)
labels_per_class = np.sum(truth > 0, axis=0)
weight_per_class = labels_per_class / float(np.sum(labels_per_class))
# Form average of each column, i.e. all the precisions assigned to labels in
# a particular class.
per_class_lwlrap = (np.sum(precisions_for_samples_by_classes, axis=0) /
np.maximum(1, labels_per_class))
# overall_lwlrap = simple average of all the actual per-class, per-sample precisions
# = np.sum(precisions_for_samples_by_classes) / np.sum(precisions_for_samples_by_classes > 0)
# also = weighted mean of per-class lwlraps, weighted by class label prior across samples
# = np.sum(per_class_lwlrap * weight_per_class)
return per_class_lwlrap, weight_per_class
| 2.484375
| 2
|
python/fleetx/dataset/ctr_data_generator.py
|
hutuxian/FleetX
| 170
|
12782026
|
<filename>python/fleetx/dataset/ctr_data_generator.py
#!/usr/bin/python
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# There are 13 integer features and 26 categorical features
import os
import paddle
import paddle.fluid as fluid
import paddle.distributed.fleet as fleet
continous_features = range(1, 14)
categorial_features = range(14, 40)
continous_clip = [20, 600, 100, 50, 64000, 500, 100, 50, 500, 10, 10, 10, 50]
def get_dataloader(inputs,
train_files_path,
sparse_feature_dim,
batch_size,
shuffle=True):
file_list = [
str(train_files_path) + "/%s" % x for x in os.listdir(train_files_path)
]
loader = fluid.io.DataLoader.from_generator(
feed_list=inputs, capacity=64, use_double_buffer=True, iterable=True)
train_generator = CriteoDataset(sparse_feature_dim)
reader = train_generator.train(file_list,
fleet.worker_num(), fleet.worker_index())
if shuffle:
reader = paddle.batch(
paddle.reader.shuffle(
reader, buf_size=batch_size * 100),
batch_size=batch_size)
else:
reader = paddle.batch(reader, batch_size=batch_size)
places = fluid.CPUPlace()
loader.set_sample_list_generator(reader, places)
return loader
class CriteoDataset(object):
def __init__(self, sparse_feature_dim):
self.cont_min_ = [0, -3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
self.cont_max_ = [
20, 600, 100, 50, 64000, 500, 100, 50, 500, 10, 10, 10, 50
]
self.cont_diff_ = [
20, 603, 100, 50, 64000, 500, 100, 50, 500, 10, 10, 10, 50
]
self.hash_dim_ = sparse_feature_dim
# here, training data are lines with line_index < train_idx_
self.train_idx_ = 41256555
self.continuous_range_ = range(1, 14)
self.categorical_range_ = range(14, 40)
def _reader_creator(self, file_list, is_train, trainer_num, trainer_id):
def reader():
for file in file_list:
with open(file, 'r') as f:
line_idx = 0
for line in f:
line_idx += 1
features = line.rstrip('\n').split('\t')
dense_feature = []
sparse_feature = []
for idx in self.continuous_range_:
if features[idx] == '':
dense_feature.append(0.0)
else:
dense_feature.append(
(float(features[idx]) -
self.cont_min_[idx - 1]) /
self.cont_diff_[idx - 1])
for idx in self.categorical_range_:
sparse_feature.append([
hash(str(idx) + features[idx]) % self.hash_dim_
])
label = [int(features[0])]
yield [dense_feature] + sparse_feature + [label]
return reader
def train(self, file_list, trainer_num, trainer_id):
return self._reader_creator(file_list, True, trainer_num, trainer_id)
def test(self, file_list):
return self._reader_creator(file_list, False, 1, 0)
| 2.46875
| 2
|
tests/helper_test_reload_data.py
|
rndr/aioimport
| 0
|
12782027
|
data: object = object()
| 1.1875
| 1
|
torchio/transforms/__init__.py
|
soumickmj/torchio
| 0
|
12782028
|
from .transform import Transform, TypeTransformInput
from .spatial_transform import SpatialTransform
from .intensity_transform import IntensityTransform
from .interpolation import Interpolation, get_sitk_interpolator
# Generic
from .lambda_transform import Lambda
# Augmentation
from .augmentation.composition import OneOf
from .augmentation.composition import Compose, compose_from_history
from .augmentation.spatial import RandomFlip
from .augmentation.spatial import RandomAffine
from .augmentation.spatial import RandomDownsample
from .augmentation.spatial import RandomElasticDeformation
from .augmentation.intensity import RandomSwap
from .augmentation.intensity import RandomBlur
from .augmentation.intensity import RandomNoise
from .augmentation.intensity import RandomSpike
from .augmentation.intensity import RandomGamma
from .augmentation.intensity import RandomMotion
from .augmentation.intensity import RandomGhosting
from .augmentation.intensity import RandomBiasField
from .augmentation.intensity import RandomLabelsToImage
# Preprocessing
from .preprocessing import Pad
from .preprocessing import Crop
from .preprocessing import Resample
from .preprocessing import ToCanonical
from .preprocessing import ZNormalization
from .preprocessing import HistogramStandardization
from .preprocessing import RescaleIntensity, Rescale
from .preprocessing import CropOrPad, CenterCropOrPad
from .preprocessing.intensity.histogram_standardization import train as train_histogram
__all__ = [
'Transform',
'TypeTransformInput',
'SpatialTransform',
'IntensityTransform',
'Interpolation',
'get_sitk_interpolator',
'Lambda',
'OneOf',
'Compose',
'compose_from_history',
'RandomFlip',
'RandomAffine',
'RandomDownsample',
'RandomElasticDeformation',
'RandomSwap',
'RandomBlur',
'RandomNoise',
'RandomSpike',
'RandomGamma',
'RandomMotion',
'RandomGhosting',
'RandomBiasField',
'RandomLabelsToImage',
'Pad',
'Crop',
'Resample',
'ToCanonical',
'ZNormalization',
'HistogramStandardization',
'Rescale',
'RescaleIntensity',
'CenterCropOrPad',
'CropOrPad',
'train_histogram',
]
| 1.53125
| 2
|
scripts/gen_noise.py
|
williamalu/ofdm_mimo_usrp
| 2
|
12782029
|
import numpy as np
import matplotlib.pyplot as plt
def make_pulses(data, T, pulse):
widen = np.zeros(len(data) * T, dtype=np.complex64)
for idx, val in enumerate(widen):
if idx % T == 0:
widen[idx] = data[ idx//T ]
return np.array(np.convolve(widen, pulse, 'full'), dtype=np.complex64)
def raised_cosine(size, T):
W = 1/T
pulse = np.zeros(size, dtype=np.complex64)
alpha = 0.5
for idx, t in enumerate(range(-size//T, size//T)):
val = np.sinc(2*W*t) * ( np.cos( 2*np.pi*alpha*W*t )/( 1 - 16 * (alpha**2) * (W**2) * (t**2)) )
pulse[idx] = t
plt.plot(pulse)
plt.show()
exit()
return pulse
if __name__ == "__main__":
data_path = '../data/'
# Gen noise
np.random.seed(45)
noise_size = 10000
noise1 = np.array(np.random.choice([0.5, -0.5], size=noise_size))
noise2 = np.array(np.random.choice([0.5, -0.5], size=noise_size))
# Make noise into pulses
T = 10
pulse = np.ones(10)
noise1 = make_pulses(noise1, T, pulse)
noise2 = make_pulses(noise2, T, pulse)
# Save noise for cross correlation later
noise1.tofile(data_path + "noise_1.bin")
noise2.tofile(data_path + "noise_2.bin")
# Make filler so we can send everything at once
zeros_gap = np.zeros(10000)
zeros = np.zeros(len(noise1))
# Data for channel 1
channel1 = np.concatenate( [noise1, zeros_gap, zeros] )
channel2 = np.concatenate( [zeros, zeros_gap, noise2] )
channel1 = np.array( channel1, dtype=np.complex64 )
channel2 = np.array( channel2, dtype=np.complex64 )
# Save out data
channel1.tofile(data_path + "noise_1_transmit.bin")
channel2.tofile(data_path + "noise_2_transmit.bin")
# Plot for verification
plt.plot(channel1)
plt.plot(channel2)
plt.show()
| 2.8125
| 3
|
src/geocat/f2py/triple_to_grid_wrapper.py
|
NCAR/geocat-f2py
| 4
|
12782030
|
<reponame>NCAR/geocat-f2py
import warnings
import numpy as np
import xarray as xr
from dask.array.core import map_blocks
from .errors import ChunkError, CoordinateError, DimensionError
from .fortran import grid2triple as grid2triple_fort
from .fortran import triple2grid1
from .missing_values import fort2py_msg, py2fort_msg
# Dask Wrappers or Internal Wrappers _<funcname>()
# These Wrapper are executed within dask processes, and should do anything that
# can benefit from parallel excution.
def _grid_to_triple(x, y, z, msg_py):
# Transpose z before Fortran function call
z = np.transpose(z, axes=(1, 0))
# Handle Python2Fortran missing value conversion
z, msg_py, msg_fort = py2fort_msg(z, msg_py=msg_py)
# Fortran call
# num_elem is the total number of elements from beginning of each column in the array,
# which are non missing-value
out, num_elem = grid2triple_fort(x, y, z, msg_fort)
# Transpose output to correct dimension order before returning it to outer wrapper
# As well as get rid of indices corresponding to missing values
out = np.asarray(out)
out = np.transpose(out, axes=(1, 0))
out = out[:, :num_elem]
# Handle Fortran2Python missing value conversion back
fort2py_msg(z, msg_fort=msg_fort, msg_py=msg_py)
fort2py_msg(out, msg_fort=msg_fort, msg_py=msg_py)
return out
def _triple_to_grid(data,
x_in,
y_in,
x_out,
y_out,
shape,
method=None,
distmx=None,
domain=None,
msg_py=None):
# Handle Python2Fortran missing value conversion
data, msg_py, msg_fort = py2fort_msg(data, msg_py=msg_py)
# Fortran function call
grid = triple2grid1(x_in,
y_in,
data,
x_out,
y_out,
zmsg=msg_fort,
domain=domain,
method=method,
distmx=distmx)
# Reshape output to correct the dimensionality before returning it to the outer wrapper
grid = np.asarray(grid)
grid = grid.reshape(shape)
# Handle Fortran2Python missing value conversion back
fort2py_msg(data, msg_fort=msg_fort, msg_py=msg_py)
fort2py_msg(grid, msg_fort=msg_fort, msg_py=msg_py)
print(grid)
return grid
# TODO: Revisit for implementing this function after deprecating geocat.ncomp
def _triple_to_grid_2d(x_in, y_in, data, x_out, y_out, msg_py):
# ''' signature: grid = _triple2grid(x_in, y_in,data,x_out,y_out,msg_py)
pass
# Outer Wrappers <funcname>()
# These Wrappers are excecuted in the __main__ python process, and should be
# used for any tasks which would not benefit from parallel execution.
def grid_to_triple(data, x_in=None, y_in=None, msg_py=None):
"""Converts a two-dimensional grid with one-dimensional coordinate
variables to an array where each grid value is associated with its
coordinates.
Parameters
----------
data : :class:`xarray.DataArray` or :class:`numpy.ndarray`:
Two-dimensional array of size ny x mx containing the data values.
Missing values may be present in `data`, but they are ignored.
x_in : :class:`xarray.DataArray` or :class:`numpy.ndarray`:
Coordinates associated with the right dimension of the variable `data`.
If `data` is of type :class:`xarray.DataArray` and `x_in` is unspecified,
then it comes as the associated coordinate of `data` (if `x_in` is explicitly
given, then it will be used for calculations). If `data` is of type
:class:`numpy.ndarray`, then it must be explicitly given as input and it
must have the same dimension (call it `mx`) as the right dimension of `data`.
y_in : :class:`xarray.DataArray` or :class:`numpy.ndarray`:
Coordinates associated with the left dimension of the variable `data`.
If `data` is of type :class:`xarray.DataArray` and `y_in` is unspecified,
then it comes as the associated coordinate of `data` (if `y_in` is explicitly
given, then it will be used for calculations). If `data` is of type
:class:`numpy.ndarray`, then it must be explicitly given as input and it
must have the same dimension (call it `ny`) as the left dimension of `data`.
msg_py : :obj:`numpy.number`:
A numpy scalar value that represent a missing value in `data`.
This argument allows a user to use a missing value scheme
other than NaN or masked arrays, similar to what NCL allows.
Returns
-------
out : :class:`xarray.DataArray`:
The maximum size of the returned array will be 3 x ld, where ld <= ny x mx.
If no missing values are encountered in `data`, then ld = ny x mx. If missing
values are encountered in `data`, they are not returned and hence ld will be
equal to ny x mx minus the number of missing values found in `data`.
The return array will be double if any of the input arrays are double, and float
otherwise.
Examples
--------
Example 1: Using grid_to_triple with :class:`xarray.DataArray` input
.. code-block:: python
import numpy as np
import xarray as xr
import geocat.comp
# Open a netCDF data file using xarray default engine and load the data stream
ds = xr.open_dataset("./NETCDF_FILE.nc")
# [INPUT] Grid & data info on the source curvilinear
data = ds.DIST_236_CBL[:]
x_in = ds.gridlat_236[:]
y_in = ds.gridlon_236[:]
output = geocat.comp.grid_to_triple(data, x_in, y_in)
"""
# TODO: Will need to be revisited after sanity_check work is finished
# ''' Start of boilerplate
if not isinstance(data, xr.DataArray):
if (x_in is None) | (y_in is None):
raise CoordinateError(
"ERROR grid_to_triple: Argument `x_in` and `y_in` must be provided explicitly "
"unless `data` is an xarray.DataArray.")
data = xr.DataArray(data)
data = xr.DataArray(
data.data,
coords={
data.dims[-1]: x_in,
data.dims[-2]: y_in,
},
dims=data.dims,
)
if (x_in is None):
x_in = data.coords[data.dims[-1]]
if (y_in is None):
y_in = data.coords[data.dims[-2]]
# Basic sanity checks
if data.ndim != 2:
raise DimensionError(
"ERROR grid_to_triple: `z` must have two dimensions !\n")
if x_in.ndim != 1:
raise DimensionError(
"ERROR grid_to_triple: `x_in` must have one dimension !\n")
elif x_in.shape[0] != data.shape[1]:
raise DimensionError(
"ERROR grid_to_triple: `x_in` must have the same size (call it `mx`) as the "
"right dimension of z. !\n")
if y_in.ndim != 1:
raise DimensionError(
"ERROR grid_to_triple: `y_in` must have one dimension !\n")
elif y_in.shape[0] != data.shape[0]:
raise DimensionError(
"ERROR grid_to_triple: `y_in` must have the same size (call it `ny`) as the left dimension of z. !\n"
)
# ''' end of boilerplate
out = _grid_to_triple(x_in.data, y_in.data, data.data, msg_py)
out = xr.DataArray(out, attrs=data.attrs)
return out
def triple_to_grid(data,
x_in,
y_in,
x_out,
y_out,
method=1,
domain=float(1.0),
distmx=None,
missing_value=np.nan,
meta=False):
"""Places unstructured (randomly-spaced) data onto the nearest locations of
a rectilinear grid.
Parameters
----------
data : :class:`xarray.DataArray`: or :class:`numpy.ndarray`:
A multi-dimensional array, whose rightmost dimension is the same
length as `x_in` and `y_in`, containing the values associated with
the "x" and "y" coordinates. Missing values may be present but
will be ignored.
x_in : :class:`xarray.DataArray`: or :class:`numpy.ndarray`:
One-dimensional arrays of the same length containing the coordinates
associated with the data values. For geophysical variables, "x"
correspond to longitude.
y_in : :class:`xarray.DataArray`: or :class:`numpy.ndarray`:
One-dimensional arrays of the same length containing the coordinates
associated with the data values. For geophysical variables, "y"
correspond to latitude.
x_out : :class:`xarray.DataArray`: or :class:`numpy.ndarray`:
A one-dimensional array of length M containing the "x" coordinates
associated with the returned two-grid. For geophysical variables,
these are longitudes. The coordinates' values must be
monotonically increasing.
y_out : :class:`xarray.DataArray`: or :class:`numpy.ndarray`:
A one-dimensional array of length N containing the "y" coordinates
associated with the returned grid. For geophysical ~variables,
these are latitudes. The coordinates' values must be
monotonically increasing.
Optional Parameters
-------------------
method :
An integer value that can be 0 or 1. The default value is 1.
A value of 1 means to use the great circle distance formula
for distance calculations.
Warning: `method` = 0, together with `domain` = 1.0, could
result in many of the target grid points to be set to the
missing value if the number of grid points is large (ie: a
high resolution grid) and the number of observations
relatively small.
domain :
A float value that should be set to a value >= 0. The
default value is 1.0. If present, the larger this factor,
the wider the spatial domain allowed to influence grid boundary
points. Typically, `domain` is 1.0 or 2.0. If `domain` <= 0.0,
then values located outside the grid domain specified by
`x_out` and `y_out` arguments will not be used.
distmx :
Setting `distmx` allows the user to specify a search
radius (km) beyond which observations are not considered
for nearest neighbor. Only applicable when `method` = 1.
The default `distmx`=1e20 (km) means that every grid point
will have a nearest neighbor. It is suggested that users
specify a reasonable value for `distmx`.
missing_value : :obj:`numpy.number`:
A numpy scalar value that represent
a missing value in `data`. The default value is `np.nan`.
If specified explicitly, this argument allows the user to
use a missing value scheme other than NaN or masked arrays.
meta : :obj:`bool`:
If set to True and the input array is an Xarray,
the metadata from the input array will be copied to the
output array; default is False.
Warning: This option is not yet supported for this function.
Returns
-------
grid : :class:`xarray.DataArray`:
The returned array will be K x N x M, where K
represents the leftmost dimensions of `data`, N represent the size of `y_out`,
and M represent the size of `x_out` coordinate vectors. It will be of type
double if any of the input is double, and float otherwise.
Description
-----------
This function puts unstructured data (randomly-spaced) onto the nearest
locations of a rectilinear grid. A default value of `domain` option is
now set to 1.0 instead of 0.0.
This function does not perform interpolation; rather, each individual
data point is assigned to the nearest grid point. It is possible that
upon return, grid will contain grid points set to missing value if
no `x_in(n)`, `y_in(n)` are nearby.
Examples
--------
Example 1: Using triple_to_grid with :class:`xarray.DataArray` input
.. code-block:: python
import numpy as np
import xarray as xr
import geocat.comp
# Open a netCDF data file using xarray default engine and load the data stream
ds = xr.open_dataset("./ruc.nc")
# [INPUT] Grid & data info on the source curvilinear
data = ds.DIST_236_CBL[:]
x_in = ds.gridlat_236[:]
y_in = ds.gridlon_236[:]
x_out = ds.gridlat_236[:]
y_out = ds.gridlon_236[:]
# [OUTPUT] Grid on destination points grid (or read the 1D lat and lon from
# an other .nc file.
newlat1D_points=np.linspace(lat2D_curv.min(), lat2D_curv.max(), 100)
newlon1D_points=np.linspace(lon2D_curv.min(), lon2D_curv.max(), 100)
output = geocat.comp.triple_to_grid(data, x_out, y_out, x_in, y_in)
"""
# TODO: May need to be revisited after sanity_check work is finished
if (x_in is None) | (y_in is None):
raise CoordinateError(
"ERROR triple_to_grid: Arguments x_in and y_in must always be explicitly provided"
)
# ''' Start of boilerplate
# If a Numpy input is given, convert it to Xarray and chunk it just with its dims
if not isinstance(data, xr.DataArray):
data = xr.DataArray(data)
data_chunk = dict([
(k, v) for (k, v) in zip(list(data.dims), list(data.shape))
])
data = xr.DataArray(
data.data,
# coords={
# data.dims[-1]: x_in,
# data.dims[-2]: y_in,
# },
dims=data.dims, # Comment
).chunk(data_chunk)
else:
# If an unchunked Xarray input is given, chunk it just with its dims
if (data.chunks is None):
data_chunk = dict([
(k, v) for (k, v) in zip(list(data.dims), list(data.shape))
])
data = data.chunk(data_chunk)
# Ensure the rightmost dimension of input is not chunked
elif list(data.chunks)[-1:] != [x_in.shape]:
raise ChunkError(
"ERROR triple_to_grid: Data must be unchunked along the rightmost two dimensions"
)
# x_in = data.coords[data.dims[-1]]
# y_in = data.coords[data.dims[-2]]
# Basic sanity checks
if x_in.shape[0] != y_in.shape[0] or x_in.shape[0] != data.shape[data.ndim -
1]:
raise DimensionError(
"ERROR triple_to_grid: The length of `x_in` and `y_in` must be the same "
"as the rightmost dimension of `data` !")
if x_in.ndim > 1 or y_in.ndim > 1:
raise DimensionError(
"ERROR triple_to_grid: `x_in` and `y_in` arguments must be one-dimensional arrays !\n"
)
if x_out.ndim > 1 or y_out.ndim > 1:
raise DimensionError(
"ERROR triple_to_grid: `x_out` and `y_out` arguments must be one-dimensional array !\n"
)
if not isinstance(method, int):
raise TypeError(
'ERROR triple_to_grid: `method` arg must be an integer. Set it to either 1 or 0.'
)
if (method != 0) and (method != 1):
raise TypeError(
'ERROR triple_to_grid: `method` arg accepts either 0 or 1.')
# `distmx` is only applicable when `method`==1
if method:
if np.asarray(distmx).size != 1:
raise ValueError(
"ERROR triple_to_grid: Provide a scalar value for `distmx` !")
else:
if distmx is not None:
raise ValueError(
"ERROR triple_to_grid: `distmx` is only applicable when `method`==1 !"
)
if np.asarray(domain).size != 1:
raise ValueError(
"ERROR triple_to_grid: Provide a scalar value for `domain` !")
# `data` data structure elements and autochunking
data_chunks = list(data.dims)
data_chunks[:-1] = [
(k, 1) for (k, v) in zip(list(data.dims)[:-1],
list(data.chunks)[:-1])
]
data_chunks[-1:] = [
(k, v[0])
for (k, v) in zip(list(data.dims)[-1:],
list(data.chunks)[-1:])
]
data_chunks = dict(data_chunks)
data = data.chunk(data_chunks)
# grid datastructure elements
grid_chunks = list(data.chunks)
grid_chunks[-1] = (y_out.shape[0] * x_out.shape[0],)
grid_chunks = tuple(grid_chunks)
dask_grid_shape = tuple(a[0] for a in list(grid_chunks))
grid_coords = {k: v for (k, v) in data.coords.items()}
grid_coords[data.dims[-1]] = x_out
grid_coords[data.dims[-2]] = y_out
# ''' end of boilerplate
grid = map_blocks(
_triple_to_grid,
data.data,
x_in,
y_in,
x_out,
y_out,
dask_grid_shape,
method=method,
distmx=distmx,
domain=domain,
msg_py=missing_value,
chunks=grid_chunks,
dtype=data.dtype,
drop_axis=[data.ndim - 1],
new_axis=[data.ndim - 1],
)
# Reshape grid to its final shape
grid_shape = (data.shape[:-1] + (y_out.shape[0],) + (x_out.shape[0],))
grid = grid.reshape(grid_shape)
if meta:
# grid = xr.DataArray(grid.compute(), attrs=data.attrs, dims=data.dims, coords=grid_coords)
import warnings
warnings.warn(
"WARNING triple_to_grid: Retention of metadata is not yet supported; "
"it will thus be ignored in the output!")
# else:
# grid = xr.DataArray(grid.compute(), coords=grid_coords)
grid = xr.DataArray(grid.compute())
return grid
# TODO: Revisit for implementing this function after deprecating geocat.ncomp
def triple_to_grid_2d(x_in, y_in, data, x_out, y_out, msg_py):
# ''' signature: grid = triple2grid2d(x,y,data,x_out,y_out,msg_py)
pass
# Transparent wrappers for geocat.ncomp backwards compatibility
def grid2triple(x_in, y_in, data, msg_py):
warnings.warn(
"grid2triple function name and signature will be deprecated soon "
"in a future version. Use `grid_to_triple` instead!",
PendingDeprecationWarning)
return grid_to_triple(data, x_in, y_in, msg_py)
def triple2grid(x_in, y_in, data, x_out, y_out, **kwargs):
warnings.warn(
"triple2grid function name and signature will be deprecated soon "
"in a future version. Use `triple_to_grid` instead!",
PendingDeprecationWarning)
return triple_to_grid(data, x_in, y_in, x_out, y_out, **kwargs)
| 2.265625
| 2
|
sge/utils.py
|
wookayin/subtask-graph-execution-light
| 3
|
12782031
|
<filename>sge/utils.py
from enum import Enum
import numpy as np
class KEY(Enum):
UP = 0,
DOWN = 1,
LEFT = 2,
RIGHT = 3,
PICKUP = 4,
TRANSFORM = 5,
USE_1 = 5,
USE_2 = 6,
USE_3 = 7,
USE_4 = 8,
USE_5 = 9,
QUIT = 'q'
WHITE = (255, 255, 255)
LIGHT = (196, 196, 196)
GREEN = (80, 160, 0)
DARK = (128, 128, 128)
DARK_RED = (139, 0, 0)
BLACK = (0, 0, 0)
MOVE_ACTS = {KEY.UP, KEY.DOWN, KEY.LEFT, KEY.RIGHT}
EMPTY = -1
AGENT = 0
BLOCK = 1
WATER = 2
OBJ_BIAS = 3
TYPE_PICKUP = 0
TYPE_TRANSFORM = 1
def get_id_from_ind_multihot(indexed_tensor, mapping, max_dim):
if type(mapping) == dict:
mapping_ = np.zeros(max(mapping.keys())+1, dtype=np.long)
for k, v in mapping.items():
mapping_[k] = v
mapping = mapping_
if indexed_tensor.ndim == 2:
nbatch = indexed_tensor.shape[0]
out = np.zeros(nbatch, max_dim).astype(np.byte)
np.add.at(out, mapping.ravel(), indexed_tensor.ravel())
else:
out = np.zeros(max_dim).astype(np.byte)
np.add.at(out, mapping.ravel(), indexed_tensor.ravel())
return out
| 2.359375
| 2
|
tethysapp/parleys_creek_management/controllers/results.py
|
CI-WATER/tethysapp-parleys_creek_management
| 0
|
12782032
|
from django.shortcuts import render
from ..model import SessionMaker, ManagementScenario
from ..model import (LITTLE_DELL_VOLUME,
LITTLE_DELL_RELEASE,
LITTLE_DELL_SPILL,
MOUNTAIN_DELL_VOLUME,
MOUNTAIN_DELL_RELEASE,
MOUNTAIN_DELL_SPILL,
DELL_CREEK_INFLOW,
LAMBS_CREEK_INFLOW,
RELIABILITY)
def view(request, scenario_id, plot_name):
"""
Default action for results controller
"""
session = SessionMaker()
scenario = session.query(ManagementScenario.name,
ManagementScenario.results_link,
ManagementScenario.reliability). \
filter(ManagementScenario.id == scenario_id). \
one()
# Access Plot data
data_source = ManagementScenario.get_results_dataset(scenario_id, plot_name)
# Other data for template
scenario_name = scenario.name
results_link = scenario.results_link
reliability = scenario.reliability
# Plot vars
plot_title = data_source['title']
plot_subtitle = data_source['subtitle']
y_axis_title = data_source['y_axis_title']
y_axis_units = data_source['y_axis_units']
series_data = data_source['series']
# Setup plot
highcharts_object = {
'chart': {
'type': 'line',
'zoomType': 'x'
},
'title': {
'text': plot_title
},
'subtitle': {
'text': plot_subtitle
},
'legend': {
'enabled': False,
'layout': 'vertical',
'align': 'right',
'verticalAlign': 'middle',
'borderWidth': 0
},
'xAxis': {
'title': {
'enabled': False
},
'type': 'datetime',
'maxZoom': 30 * 24 * 3600000, # 30 days in milliseconds
},
'yAxis': {
'title': {
'text': y_axis_title + ' (' + y_axis_units + ')'
}
},
'tooltip': {
'pointFormat': '{point.y} ' + y_axis_units
},
'series': [{'color': '#0066ff',
'marker': {'enabled': False},
'data': series_data}
]
}
timeseries = {'highcharts_object': highcharts_object,
'width': '100%',
'height': '500px'}
# Template context
type(scenario_id)
context = {'scenario_id': str(scenario_id),
'plot_name': plot_name,
'scenario_name': scenario_name,
'results_link': results_link,
'reliability': round(reliability, 2),
'LITTLE_DELL_VOLUME': LITTLE_DELL_VOLUME,
'LITTLE_DELL_RELEASE': LITTLE_DELL_RELEASE,
'LITTLE_DELL_SPILL': LITTLE_DELL_SPILL,
'MOUNTAIN_DELL_VOLUME': MOUNTAIN_DELL_VOLUME,
'MOUNTAIN_DELL_RELEASE': MOUNTAIN_DELL_RELEASE,
'MOUNTAIN_DELL_SPILL': MOUNTAIN_DELL_SPILL,
'DELL_CREEK_INFLOW': DELL_CREEK_INFLOW,
'LAMBS_CREEK_INFLOW': LAMBS_CREEK_INFLOW,
'RELIABILITY': RELIABILITY,
'timeseries': timeseries}
session.close()
return render(request, 'parleys_creek_management/results/results_viewer.html', context)
| 1.960938
| 2
|
theory/Lesson6-Dictionaries/code/dictionary_final.py
|
dgrafov/redi-python-intro
| 8
|
12782033
|
<gh_stars>1-10
# Lesson6: Dictionaries
# source: code/dictionary_modify.py
grades = {}
finish = 'no'
while finish != 'yes':
action = int(input('Please type "1" for reading the data and "2" for modifying:\n'))
if action == 1:
subject = input('Please enter the name of a subject:\n')
if subject in grades:
print('The grade is:', grades[subject])
else:
print('No such subject')
elif action == 2:
subject = input('Please enter the name of a subject:\n')
grade = int(input('Please enter the grade:\n'))
grades[subject] = grade
for key in grades:
print(key, ':', grades[key])
else:
print('Wrong input')
finish = input('Do you want to quit the program? (type "yes" or "no"):\n').lower()
| 4.15625
| 4
|
solutions/problem_108.py
|
ksvr444/daily-coding-problem
| 1,921
|
12782034
|
def can_shift(target, string):
return \
target and string and \
len(target) == len(string) and \
string in target * 2
assert can_shift("abcde", "cdeab")
assert not can_shift("abc", "acb")
| 2.84375
| 3
|
foo_003_fews_new_food_insecurity/contents/src/__init__.py
|
bapludow/nrt-scripts
| 0
|
12782035
|
<filename>foo_003_fews_new_food_insecurity/contents/src/__init__.py
from __future__ import unicode_literals
import os
import logging
import sys
import urllib
import zipfile
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
import requests as req
import fiona
from collections import OrderedDict
from shapely.geometry import mapping, Polygon, MultiPolygon
import cartosql
from src.update_layers import update_layers
# Constants
DATA_DIR = 'data'
SOURCE_URL = 'http://shapefiles.fews.net.s3.amazonaws.com/HFIC/{region}/{target_file}'
REGIONS = {'WA':'west-africa{date}.zip',
'CA':'central-asia{date}.zip',
'EA':'east-africa{date}.zip',
'LAC':'caribbean-central-america{date}.zip',
'SA':'southern-africa{date}.zip'}
TIMESTEP = {'days': 30}
DATE_FORMAT = '%Y%m'
DATETIME_FORMAT = '%Y%m%dT00:00:00Z'
CLEAR_TABLE_FIRST = False
SIMPLIFICATION_TOLERANCE = .04
PRESERVE_TOPOLOGY = True
# asserting table structure rather than reading from input
CARTO_TABLE = 'foo_003_fews_net_food_insecurity'
CARTO_SCHEMA = OrderedDict([
('the_geom', 'geometry'),
('_uid', 'text'),
('start_date', 'timestamp'),
('end_date', 'timestamp'),
('ifc_type', 'text'),
('ifc', 'numeric')
])
TIME_FIELD = 'start_date'
UID_FIELD = '_uid'
LOG_LEVEL = logging.INFO
MAXROWS = 10000
MAXAGE = datetime.today() - timedelta(days=365*3)
# Generate UID
def genUID(date, region, ifc_type, pos_in_shp):
'''ifc_type can be:
CS = "current status",
ML1 = "most likely status in next four months"
ML2 = "most likely status in following four months"
'''
return str('{}_{}_{}_{}'.format(date,region,ifc_type,pos_in_shp))
def getDate(uid):
'''first component of ID'''
return uid.split('_')[0]
def formatStartAndEndDates(date, plus=0):
dt = datetime.strptime(date, DATE_FORMAT) + relativedelta(months=plus)
return(dt.strftime(DATETIME_FORMAT))
def findShps(zfile):
files = {}
with zipfile.ZipFile(zfile) as z:
for f in z.namelist():
if os.path.splitext(f)[1] == '.shp':
if 'CS' in f:
files['CS'] = f
elif 'ML1' in f:
files['ML1'] = f
elif 'ML2' in f:
files['ML2'] = f
if len(files)!=3:
logging.error('There should be 3 shapefiles: CS, ML1, ML2')
return files
def potentialNewDates(exclude_dates):
'''Get new dates excluding existing'''
new_dates = []
date = datetime.today()
while date > MAXAGE:
datestr = date.strftime(DATE_FORMAT)
if datestr not in exclude_dates:
logging.debug('Will fetch data for {}'.format(datestr))
new_dates.append(datestr)
else:
logging.debug('Data for {} already in table'.format(datestr))
date -= timedelta(**TIMESTEP)
return new_dates
def simpleGeom(geom):
# Simplify complex polygons
# https://gis.stackexchange.com/questions/83084/shapely-multipolygon-construction-will-not-accept-the-entire-set-of-polygons
if geom['type'] == 'MultiPolygon':
multi = []
for polycoords in geom['coordinates']:
multi.append(Polygon(polycoords[0]))
geo = MultiPolygon(multi)
else:
geo = Polygon(geom['coordinates'][0])
logging.debug('Length orig WKT: {}'.format(len(geo.wkt)))
simple_geo = geo.simplify(SIMPLIFICATION_TOLERANCE, PRESERVE_TOPOLOGY)
logging.debug('Length simple WKT: {}'.format(len(simple_geo.wkt)))
geojson = mapping(simple_geo)
return geojson
def processNewData(exclude_dates):
new_ids = []
# get non-existing dates
new_dates = potentialNewDates(exclude_dates)
for date in new_dates:
# 1. Fetch data from source
for region, filename in REGIONS.items():
_file = filename.format(date=date)
url = SOURCE_URL.format(region=region,target_file=_file)
tmpfile = os.path.join(DATA_DIR,_file)
logging.info('Fetching data for {} in {}'.format(region,date))
try:
urllib.request.urlretrieve(url, tmpfile)
except Exception as e:
logging.warning('Could not retrieve {}'.format(url))
logging.error(e)
continue
# 2. Parse fetched data and generate unique ids
logging.info('Parsing data')
shpfiles = findShps(tmpfile)
for ifc_type, shpfile in shpfiles.items():
shpfile = '/{}'.format(shpfile)
zfile = 'zip://{}'.format(tmpfile)
rows = []
if ifc_type == 'CS':
start_date = formatStartAndEndDates(date)
end_date = formatStartAndEndDates(date)
elif ifc_type == 'ML1':
start_date = formatStartAndEndDates(date)
end_date = formatStartAndEndDates(date,plus=4)
elif ifc_type == 'ML2':
start_date = formatStartAndEndDates(date,plus=4)
end_date = formatStartAndEndDates(date,plus=8)
with fiona.open(shpfile, 'r', vfs=zfile) as shp:
logging.debug('Schema: {}'.format(shp.schema))
pos_in_shp = 0
for obs in shp:
uid = genUID(date, region, ifc_type, pos_in_shp)
### Received an error due to attempting to load same UID twice.
# If happens again, to reproduce, set CLEAR_TABLE_FIRST=True and run again.
new_ids.append(uid)
row = []
for field in CARTO_SCHEMA.keys():
if field == 'the_geom':
row.append(simpleGeom(obs['geometry']))
elif field == UID_FIELD:
row.append(uid)
elif field == 'ifc_type':
row.append(ifc_type)
elif field == 'ifc':
row.append(obs['properties'][ifc_type])
elif field == 'start_date':
row.append(start_date)
elif field == 'end_date':
row.append(end_date)
rows.append(row)
pos_in_shp += 1
# 3. Insert new observations
new_count = len(rows)
if new_count:
logging.info('Pushing {} new rows: {} for {} in {}'.format(new_count,ifc_type,region,date))
cartosql.insertRows(CARTO_TABLE, CARTO_SCHEMA.keys(),
CARTO_SCHEMA.values(), rows)
# 4. Delete local files
os.remove(tmpfile)
num_new = len(new_ids)
return num_new
##############################################################
# General logic for Carto
# should be the same for most tabular datasets
##############################################################
def createTableWithIndices(table, schema, idField, timeField):
'''Get existing ids or create table'''
cartosql.createTable(table, schema)
cartosql.createIndex(table, idField, unique=True)
if timeField != idField:
cartosql.createIndex(table, timeField, unique=False)
def getFieldAsList(table, field, orderBy=''):
assert isinstance(field, str), 'Field must be a single string'
r = cartosql.getFields(field, table, order='{}'.format(orderBy),
f='csv')
return(r.text.split('\r\n')[1:-1])
def deleteExcessRows(table, max_rows, time_field, max_age=''):
'''Delete excess rows by age or count'''
num_dropped = 0
if isinstance(max_age, datetime):
max_age = max_age.isoformat()
# 1. delete by age
if max_age:
r = cartosql.deleteRows(table, "{} < '{}'".format(time_field, max_age))
num_dropped = r.json()['total_rows']
# 2. get sorted ids (old->new)
ids = getFieldAsList(CARTO_TABLE, 'cartodb_id', orderBy=''.format(TIME_FIELD))
# 3. delete excess
if len(ids) > max_rows:
r = cartosql.deleteRowsByIDs(table, ids[:-max_rows])
num_dropped += r.json()['total_rows']
if num_dropped:
logging.info('Dropped {} old rows from {}'.format(num_dropped, table))
def main():
logging.basicConfig(stream=sys.stderr, level=LOG_LEVEL)
logging.info('STARTING')
if CLEAR_TABLE_FIRST:
if cartosql.tableExists(CARTO_TABLE):
logging.info("Clearing table")
cartosql.dropTable(CARTO_TABLE)
# 1. Check if table exists and create table
existing_ids = []
if cartosql.tableExists(CARTO_TABLE):
existing_ids = getFieldAsList(CARTO_TABLE, UID_FIELD)
else:
createTableWithIndices(CARTO_TABLE, CARTO_SCHEMA, UID_FIELD, TIME_FIELD)
# 2. Iterively fetch, parse and post new data
existing_dates = [getDate(_id) for _id in existing_ids]
num_new = processNewData(existing_dates)
existing_count = num_new + len(existing_dates)
logging.info('Total rows: {}, New: {}, Max: {}'.format(
existing_count, num_new, MAXROWS))
# 3. Remove old observations
deleteExcessRows(CARTO_TABLE, MAXROWS, TIME_FIELD, MAXAGE)
# 4. Update layer definitions
update_layers()
###
logging.info('SUCCESS')
| 2.0625
| 2
|
Diena_6_lists/lists_g1.py
|
ValdisG31/Python_RTU_08_20
| 1
|
12782036
|
<reponame>ValdisG31/Python_RTU_08_20
# # What is a list after all?
# # * ordered
# # * collection of arbitrary objects (anything goes in)
# # * nested (onion principle, Matroyshka)
# # * mutable - maināmas vērtības
# # * dynamic - size can change
my_list = [5, 6, "Valdis", True, 3.65, "alus"] # most common way of creating a list using [el1, el2]
print(my_list)
print(my_list[0])
my_list[1] = "Mr. 50" # lists are mutable (unlike strings)
print(my_list[:3])
print(my_list[-2:]) # last two
print(my_list[1:4], my_list[1:-1])
print(my_list[::2])
print(my_list[1::2])
print(my_list[-1])
# how to check for existance in list
print(3.65 in my_list)
print(66 in my_list)
print("Valdis" in my_list)
print("al" in my_list) # this is false
# # # iterate over items
needle = "al" # what we want to find in our list
for item in my_list:
print("Checking ", item)
if type(item) == str and needle in item:
print(f"Found {needle=} in {item=}") # python 3.8 and up
print(f"Found {needle} in {item}") # for python 3.7
# # #
# my_list.append()
my_list.append("<NAME>")
my_list.append("<NAME>") # IN PLACE methods, means we modify the list
print(my_list)
# example how to filter something
find_list = [] # so we have an empty list in beginning
needle = "al"
for item in my_list:
# if needle in item: will not work because we have non strings in list
if type(item) == str and needle in item:
print(f"Found {needle=} in {item=}")
find_list.append(item)
print(f"{needle=} found in {find_list=}")
# # ps the above could be done simpler with list comprehension
# # # out of place meaning find_list stays the same
new_list = my_list + ["Kalējs", "Audējs"] # out of place addition, my_list is not modified
print(len(new_list), len(my_list))
# print(f"{str(my_list)}")
# how to convert all values to str
str_list = []
for item in my_list:
str_list.append(str(item)) # so if item is already string nothing will happen
print(str_list)
# # list comprehensions make it even short
str_list_2 = [str(item) for item in my_list]
print(str_list_2)
print(str_list == str_list_2) # check if lists containe equal values
print(str_list is str_list_2) # check if our variables reference the same list
str_list_3 = str_list # so str_list_3 is a shortcut to same values as
print(str_list == str_list_3, str_list is str_list_3)
# # need needle of course
beer_list = [item for item in str_list if needle in item]
print(beer_list)
beer_list = beer_list[1:] #get rid of Valdis in my beer list
print(beer_list)
beer_list += ["Užavas alus"] # we create a list on demand - list literal beer_list = beer_list + ["Užavas alus"]
# similar to beer_list.append("Užavas alus")
print(beer_list)
# # new_list += ["Malējs"] # same as new_list = new_list + ["Malējs"]
# # new_list
print(beer_list[-1])
print(beer_list)
last_beer = beer_list.pop() # also in place meaning i destroyed the last value
print(f"We took out {last_beer}")
print(beer_list)
beer_list.append(last_beer)
print(beer_list)
print(beer_list.count("alus")) # only exact matches
print(beer_list.index("alus"), beer_list.index("Užavas alus"))
beer_list.extend(["Labietis", "Mālpils alus"]) # again in place
print(beer_list)
has_alus = len([el for el in beer_list if "ž" in el])
print(has_alus)
has_alus = len([el for el in beer_list if "alus" in el])
print(has_alus)
beer_list.insert(2, "Cēsu sula") # so it will insert BEFORE index 2 (meaning before 3rd element)
beer_list.insert(5, "Cēsu sula")
print(beer_list)
while "Cēsu sula" in beer_list:
print("found Cēsu sula")
beer_list.remove("Cēsu sula")
print(beer_list)
# beer_list.remove("Cēsu sula") # again in place first match
# print(beer_list)
# beer_list.remove("alus")
# print(beer_list)
# beer_list.remove("alus")
# print(beer_list)
beer_list.reverse() # in place reversal
print(beer_list)
new_beer_list = beer_list[::-1] # so i save the reversed list but keep the original
print(new_beer_list)
# # so if we have comparable data types inside (so same types)
new_beer_list.sort() # in place sort, modifies existing
print(new_beer_list)
sorted_by_len = sorted(new_beer_list, key=len) # out of place meaning returns new list
print(sorted_by_len)
sorted_by_len_rev = sorted(new_beer_list, key=len, reverse=True) # out of place meaning returns new list
print(sorted_by_len_rev)
print(max(beer_list), min(beer_list)) # by alphabet
numbers = [1, 4, -5, 3.16, 10, 9000, 5]
saved_sort_asc = sorted(numbers) # out of place does not modify numbers
print(saved_sort_asc)
# # sorted(numbers, reverse=True) # out of place does not modify numbers
# # numbers
# # numbers.sort() # in place meaning it modifies the numbers
# # numbers
# # numbers.remove(9000) # will remove in place first 9000 found in the list
# # numbers
# # min(numbers), max(numbers)
# print(sum(numbers))
# # our own sum
# total = 0
# for n in numbers:
# total += n
# print(total)
sentence = "Quick brown fox jumped over a sleeping dog"
words = sentence.split() # default split is by whitespace convert into a list of words
print(words)
words[2] = "bear"
print(words)
# # so str(words) will not work exactly so we need something else
new_sentence = " ".join(words) # we will lose any double or triple whitespace
print(new_sentence)
funky_sentence = "*:*".join(words) # we will lose any double or triple whitespace
print(funky_sentence)
# # we can create a list of letters
# food = "kartupelis"
# letters = list(food) # list with all letters
# print(letters)
# letters[5] = "m"
# new_word = "".join(letters) # we join by nothing so no spaces in the new word
# print(new_word)
# new_list = []
# for word in words:
# new_list.append(word.capitalize())
# print(new_list)
# # # list comprehension same as above
# new_list_2 = [w.capitalize() for w in words]
# print(new_list_2)
# filtered_list = [w for w in words if w.startswith("b")]
# print(filtered_list)
# filtered_list = [w.upper() for w in words if w.startswith("b")]
# print(filtered_list)
# # filtered_list_2 = [w for w in words if w[0] == "b"]
# # filtered_list_2
# # filtered_list_3 = [w.upper() for w in words if w[0] == "b"]
# # filtered_list_3
# numbers = list(range(10)) # we cast to list our range object
# print(numbers)
# squares = []
# for n in numbers: # could also use range(10)
# squares.append(n*n)
# print(squares)
# squares_2 = [n*n for n in range(10)] # list comprehension of the above
# print(squares_2)
# even_squares = [n*n for n in range(10) if n % 2 == 0]
# print(even_squares)
# print("Whew we need a beer now don't we ?")
# # food
# # char_codes = [ord(c) for c in food]
# # char_codes
# # char_codes_list = [[f"Char:{c}", ord(c)] for c in food]
# # char_codes_list
# # print(char_codes_list[0])
# # print(char_codes_list[0][0])
# # print(char_codes_list[-1])
# # print(char_codes_list[-1][-1])
| 4.5625
| 5
|
2_Bim/aula.py
|
eucgabriel/Faculdade-2--Semestre
| 1
|
12782037
|
<filename>2_Bim/aula.py
class Animais:
def __init__(self, nome=None, cobertura=None, raca=None):
self.nome = nome
self.cobertura = cobertura
self.raca = raca
def dar_nome(self, nome=None):
self.nome = nome
class Cachorros(Animais):
def trocar(self, cobertura=None, raca=None):
self.cobertura = "Crespo"
self.raca = "Pitbull"
animais = Animais()
cachorros = Cachorros(cobertura="Lisa", raca="Pintcher")
animais.dar_nome("Rex")
print("Nome: ", animais.nome)
print("Cachorro: ", cachorros.cobertura)
print("Raça: ", cachorros.raca)
cachorros.trocar()
print("Cachorro: ", cachorros.cobertura)
print("Raça: ", cachorros.raca)
| 2.875
| 3
|
classifier/vec/doc2vec_train.py
|
banboooo044/natural-language-sentiment-anaysis
| 0
|
12782038
|
<reponame>banboooo044/natural-language-sentiment-anaysis<filename>classifier/vec/doc2vec_train.py<gh_stars>0
# train Doc2vec and save model
import pandas as pd
import numpy as np
from gensim.models.doc2vec import Doc2Vec
from gensim.models.doc2vec import TaggedDocument
# PV-DBOW
def train_DBOW(df):
print("train model ...")
sentences = [ TaggedDocument(words=words.split(","), tags=[i]) for i, (words, _, _) in df.iterrows() ]
model = Doc2Vec(documents=sentences, vector_size=300, alpha=0.025, min_alpha=0.000001, \
window=15, iter=20, min_count=1, dm=0, workers=4, seed=71)
print('train start')
model.train(sentences, total_examples=model.corpus_count, epochs=model.iter)
return model
# PV-DM
def train_DM(df):
print("train model ...")
sentences = [ TaggedDocument(words=words.split(","), tags=[i]) for i, (words, _, _) in df.iterrows() ]
model = Doc2Vec(documents=sentences, vector_size=300, alpha=0.05, min_alpha=0.000001, \
window=10, iter=20, min_count=1, dm=1, workers=4, seed=71)
print('train start')
model.train(sentences, total_examples=model.corpus_count, epochs=model.iter)
return model
def save_model(model, filename):
print("save model ...")
model.save(f"./{filename}.model")
if __name__ == "__main__":
PATH = "../data/courpus-wakati-juman.tsv"
df = pd.read_table(PATH, index_col=0)
df = df[~pd.isnull(df["text"])]
model = train_DM(df)
# モデルの保存
OUTPUT_FILENAME = "Doc2vec"
save_model(model, OUTPUT_FILENAME)
| 2.640625
| 3
|
old_django_malliva/settingsManager/migrations/0001_initial.py
|
olubiyiontheweb/malliva
| 0
|
12782039
|
<gh_stars>0
# Generated by Django 3.2 on 2021-06-22 12:50
from django.db import migrations, models
import django.db.models.deletion
import settingsManager.models
class Migration(migrations.Migration):
initial = True
dependencies = [
('paymentGateways', '0001_initial'),
('customCodes', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='SocialMediaPage',
fields=[
('id', models.BigAutoField(primary_key=True, serialize=False)),
('facebook_page', models.URLField()),
('twitter_page', models.URLField()),
('instagram_page', models.URLField()),
('github_page', models.URLField()),
('linkedin_page', models.URLField()),
('tiktok_page', models.URLField()),
],
),
migrations.CreateModel(
name='TemplateStyling',
fields=[
('id', models.BigAutoField(primary_key=True, serialize=False)),
('favicon', models.ImageField(blank=True, upload_to=settingsManager.models.image_directory_path)),
('logo', models.ImageField(blank=True, upload_to=settingsManager.models.image_directory_path)),
('wide_logo', models.ImageField(blank=True, upload_to=settingsManager.models.image_directory_path)),
('cover_photo', models.ImageField(blank=True, upload_to=settingsManager.models.image_directory_path)),
('small_cover_photo', models.ImageField(blank=True, help_text='cover photo for mobile', upload_to=settingsManager.models.image_directory_path)),
('theme_color', models.CharField(max_length=200)),
('landing_page_settings', models.CharField(max_length=200)),
('footer_background', models.CharField(max_length=200)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now_add=True)),
('custom_codes', models.OneToOneField(null=True, on_delete=django.db.models.deletion.SET_NULL, to='customCodes.customcode')),
],
),
migrations.CreateModel(
name='Configuration',
fields=[
('id', models.BigAutoField(primary_key=True, serialize=False)),
('slogan', models.CharField(max_length=200)),
('description', models.CharField(max_length=500)),
('language', models.CharField(default='en', max_length=200)),
('country', models.CharField(max_length=200)),
('currency', models.CharField(max_length=200)),
('malliva_terms_consent', models.BooleanField(default=False, help_text="track clients agreement of Malliva's latest terms of service")),
('transaction_agreement_in_use', models.BooleanField(default=False)),
('notify_admins_about_new_members', models.BooleanField(default=False)),
('notify_admins_about_new_transactions', models.BooleanField(default=False)),
('require_verification_to_post_listings', models.BooleanField(default=False)),
('private_marketplace', models.BooleanField(default=True)),
('automatic_newsletters', models.BooleanField(default=False)),
('invite_only_marketplace', models.BooleanField(default=False)),
('send_emails_from', models.EmailField(max_length=200)),
('preapprove_listing', models.BooleanField(default=False, help_text='listings are public by default in the marketplace')),
('show_date_in_listing_view', models.BooleanField(default=False)),
('transaction_flow', models.CharField(max_length=200)),
('show_location', models.BooleanField(default=False)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now_add=True)),
('active_payment_processor', models.ForeignKey(default='1', on_delete=django.db.models.deletion.SET_DEFAULT, to='paymentGateways.paymentgateway')),
('templatestyle', models.OneToOneField(default='1', null=True, on_delete=django.db.models.deletion.SET_DEFAULT, to='settingsManager.templatestyling')),
],
options={
'abstract': False,
},
),
]
| 1.65625
| 2
|
cpu_demo/parse_target_repo.py
|
nicktendo64/hash-fixpoints
| 1
|
12782040
|
#! /usr/bin/python3
# run this from the root of a git repository with the command-line arguments
# described in the usage statement below
import sys
import subprocess
import os
AUTHOR = "<NAME> <<EMAIL>>"
TIMEZONE = "-0700"
DESIRED_COMMIT_MESSAGE = "added self-referential commit hash using magic"
DESIRED_COMMIT_TIMESTAMP = "1591753853"
# timestamp (formatted as seconds since UNIX epoch)
# to get git to make a commit at the right time, run the following before
# git commit:
#
# export GIT_COMMITTER_DATE='<timestamp>'
# export GIT_AUTHOR_DATE='<timestamp>'
# values for SHA-1
DIGEST_LEN = 20
HEXDIGEST_LEN = 40
PREFIX_LEN = 6
if len(sys.argv) != 4:
print("usage: parse_target_repo.py (path to target file) (text to replace with hash) (output directory)")
sys.exit(1)
target_file = sys.argv[1]
to_replace = bytes(sys.argv[2], encoding="utf-8")
out_dir = sys.argv[3]
dir_layers = [None] + \
[ bytes(l, encoding="utf-8") for l in target_file.split("/") ]
print("reading relevant hashes from git...")
hashes = [ subprocess.check_output(["git", "rev-parse", "HEAD"])[:-1] ]
for i, layer in enumerate(dir_layers):
curr_tree = subprocess.check_output(["git", "cat-file", "-p", hashes[-1]])
if i == 0:
hash = curr_tree[5: 5 + HEXDIGEST_LEN]
else:
hash_end = curr_tree.find(b"\t%s\n" % layer)
hash_start = hash_end - HEXDIGEST_LEN
hash = curr_tree[hash_start:hash_end]
hashes.append(hash)
print("reading relevant objects from .git/objects...")
hashes = hashes[::-1]
# reverse order of hashes so the blob we are writing to is first in the output
merkle_layer_prefixes = []
merkle_layer_suffixes = []
# Git stores the file tree in a Merkle Tree (the root of a tree where each
# parent is the SHA-1 hash of its children's hashes in a certain format)
digest_types = [False for _ in range(len(hashes) - 2)] + [True, False]
# depending on the point, Git either feeds the bytes in direcly or
# (for the commit) feeds in a hexadecimal string
# True = hexdigest
# False = digest
def tree_unprettify(tree_pretty_printed):
out = b""
for line in tree_pretty_printed.splitlines():
toks = line.split()
out += toks[0] + b" " # mode
out += toks[3] + b"\0" # filename (no spaces in any fname assumed)
out += bytes.fromhex(toks[2].decode())
return out
for i in range(len(hashes) - 1):
hash = hashes[i].decode()
git_obj_type = subprocess.check_output(["git", "cat-file", "-t", hash])[:-1]
git_obj_size = subprocess.check_output(["git", "cat-file", "-s", hash])[:-1]
git_obj_body = subprocess.check_output(["git", "cat-file", "-p", hash])
if i > 0:
git_obj_body = tree_unprettify(git_obj_body)
git_obj_contents = b"%s %s\0%s" % (git_obj_type, git_obj_size, git_obj_body)
if i == 0:
prefix_end = git_obj_contents.find(to_replace)
suffix_begin = prefix_end + len(to_replace)
else:
if digest_types[i - 1]:
prev_hash = bytes(prev_hash, encoding='utf-8')
else:
prev_hash = bytes.fromhex(prev_hash)
prefix_end = git_obj_contents.find(prev_hash)
suffix_begin = prefix_end + \
(HEXDIGEST_LEN if digest_types[i - 1] else DIGEST_LEN)
merkle_layer_prefixes.append(git_obj_contents[:prefix_end])
merkle_layer_suffixes.append(git_obj_contents[suffix_begin:])
prev_hash = hash
commit_suffix = bytes("""
parent {parent_commit}
author {author_str} {timestamp} {timezone}
committer {author_str} {timestamp} {timezone}
{commit_message}
""".format(parent_commit=hashes[-1].decode(), author_str=AUTHOR,
timestamp=DESIRED_COMMIT_TIMESTAMP, timezone=TIMEZONE,
commit_message=DESIRED_COMMIT_MESSAGE), encoding="utf-8")
commit_prefix = bytes("commit {}\0tree ".format(
len(commit_suffix) + 5 + HEXDIGEST_LEN), encoding="utf-8")
# total size is suffix + tree hash + len("tree ")
merkle_layer_prefixes.append(commit_prefix)
merkle_layer_suffixes.append(commit_suffix)
# ensure blob header is accurate with prefix length
merkle_layer_prefixes[0] = merkle_layer_prefixes[0][ merkle_layer_prefixes[0].find(b"\0") + 1:]
actual_size = len(merkle_layer_prefixes[0]) + len(merkle_layer_suffixes[0]) + PREFIX_LEN
merkle_layer_prefixes[0] = (b"blob %d\0" % actual_size) + merkle_layer_prefixes[0]
print("saving bytes to directory...")
os.makedirs(out_dir + "/prefixes")
os.makedirs(out_dir + "/suffixes")
i = 0
for prefix, suffix, digest_type in zip(merkle_layer_prefixes, merkle_layer_suffixes, digest_types):
with open("{}/prefixes/{}.txt".format(out_dir, i), "wb") as f:
f.write(prefix)
with open("{}/suffixes/{}.txt".format(out_dir, i), "wb") as f:
f.write(suffix)
i += 1
with open(out_dir + "/digest_bits.txt", "a") as f:
f.write(" ".join(map(lambda b: str(int(b)), digest_types)))
| 2.59375
| 3
|
gen-template.py
|
claytantor/troposphere-iac
| 0
|
12782041
|
#!/usr/bin/env python
# coding: utf-8
import logging
import argparse
import importlib
from tropiac.utils import make_cloudformation_client, load_config, get_log_level
LOG_FORMAT = ('%(levelname) -10s %(asctime)s %(funcName) '
'-35s %(lineno) -5d: %(message)s')
LOGGER = logging.getLogger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--name', type=str, required=True,
help='the name of the stack to create.')
parser.add_argument('--config', type=str, required=True,
help='the name of the configuration section to use.')
parser.add_argument('--log', type=str, default="INFO", required=False,
help='which log level. DEBUG, INFO, WARNING, CRITICAL')
args = parser.parse_args()
# init LOGGER
stack = importlib.import_module('tropiac.stacks.{0}'.format(args.name))
cfg = stack.get_config()
template = stack.make_template(cfg[args.config])
print(template.to_json())
if __name__ == '__main__':
main()
| 2.453125
| 2
|
PolTools/main_programs/tsrFinder.py
|
GeoffSCollins/PolTools
| 0
|
12782042
|
"""
This program is the interface and driver for tsrFinder
"""
import os
import sys
import argparse
import multiprocessing
from collections import defaultdict
from multiprocessing import Pool
from PolTools.utils.constants import tsr_finder_location
from PolTools.utils.tsr_finder_step_four_from_rocky import run_step_four
from PolTools.utils.remove_files import remove_files
def positive_int(num):
try:
val = int(num)
if val <= 0:
raise Exception("Go to the except")
except:
raise argparse.ArgumentTypeError(num + " must be positive")
return val
parser = argparse.ArgumentParser(prog='PolTools tsrFinder',
description='Find transcription start regions\n' +
"More information can be found at " +
"https://geoffscollins.github.io/PolTools/tsrFinder.html")
parser.add_argument('seq_file', metavar='seq_file', type=str, help='Bed formatted sequencing file to find the TSRs')
parser.add_argument('window_size', metavar='window_size', type=positive_int, help='Base pair size of the sliding window')
parser.add_argument('min_seq_depth', metavar='min_seq_depth', type=positive_int, help="Minimum number of 5' ends to be considered a TSR")
parser.add_argument('min_avg_transcript_length', metavar='min_avg_transcript_length', type=positive_int, help="Minimum average transcript length to be considered a TSR")
parser.add_argument('max_fragment_size', metavar='max_fragment_size', type=positive_int, help="Maximum fragment size for a read to be counted in tsrFinder")
parser.add_argument('chrom_size_file', metavar='chrom_size_file', type=str, help="Chromosome sizes file")
parser.add_argument('-t', '--threads', dest='threads', metavar='threads', type=positive_int, nargs='?', default=multiprocessing.cpu_count())
args = parser.parse_args(sys.argv[1:])
bed_file = args.seq_file
window_size = args.window_size
min_seq_depth = args.min_seq_depth
min_avg_transcript_length = args.min_avg_transcript_length
max_fragment_size = args.max_fragment_size
chrom_size_file = args.chrom_size_file
max_threads = args.threads
# Make sure bed_file and chrom_size_file exist
if not os.path.isfile(bed_file):
sys.stderr.write(bed_file + " was not found. Exiting ...\n")
sys.exit(1)
if not os.path.isfile(chrom_size_file):
sys.stderr.write(chrom_size_file + " was not found. Exiting ...\n")
sys.exit(1)
if not bed_file.endswith(".bed"):
sys.stderr.write("The sequencing file must end in .bed. Exiting ...\n")
sys.exit(1)
chromosome_sizes = defaultdict(int)
with open(chrom_size_file) as file:
for line in file:
chromosome, size = line.split()
chromosome_sizes[chromosome] = int(size)
# Step 1. Split the bed file into files by chromosome and strands
fw_filename = bed_file.replace(".bed", "-FW.bed")
rv_filename = bed_file.replace(".bed", "-RV.bed")
parameters_string = "_".join([str(window_size), str(min_seq_depth), str(min_avg_transcript_length), str(max_fragment_size)])
output_filename = bed_file.replace(".bed", "_" + parameters_string + "-TSR.tab")
chromosome_file_writers = defaultdict(lambda : {"+": None, "-": None})
chromosome_files = []
tsr_finder_step_files = []
output_files = []
with open(bed_file) as file:
for line in file:
chromosome, left, right, name, score, strand = line.split()
if chromosome in chromosome_sizes:
if chromosome not in chromosome_file_writers:
fw_filename = bed_file.replace(".bed", "-" + chromosome + "-FW.bed")
rv_filename = bed_file.replace(".bed", "-" + chromosome + "-RV.bed")
chromosome_file_writers[chromosome]["+"] = open(fw_filename, 'w')
chromosome_file_writers[chromosome]["-"] = open(rv_filename, 'w')
chromosome_files.extend([fw_filename, rv_filename])
for i in range(2, 5):
tsr_finder_step_files.append(fw_filename.replace(".bed", "-" + str(i) + "-output.txt"))
tsr_finder_step_files.append(rv_filename.replace(".bed", "-" + str(i) + "-output.txt"))
output_files.append(fw_filename.replace(".bed", "-4-output.txt"))
output_files.append(rv_filename.replace(".bed", "-4-output.txt"))
chromosome_file_writers[chromosome][strand].write(line)
# Need to close all the writers
for chromosome in chromosome_file_writers:
chromosome_file_writers[chromosome]["+"].close()
chromosome_file_writers[chromosome]["-"].close()
# Step 2: Run tsrFinder on both files concurrently
def run_tsrFinderGC(filename):
os.system(tsr_finder_location + " " + filename + " " +
" ".join([str(window_size), str(min_seq_depth), str(min_avg_transcript_length),
str(max_fragment_size), chrom_size_file]))
step_three_filename = filename.replace(".bed", "-3-output.txt")
step_four_filename = filename.replace(".bed", "-4-output.txt")
run_step_four(step_three_filename, window_size, chromosome_sizes, step_four_filename)
with Pool(max_threads) as pool:
pool.map(run_tsrFinderGC, (filename for filename in chromosome_files))
# # Step 3: Combine the output files and delete intermediate files
os.system("cat " + " ".join(output_files) + " > " + output_filename)
remove_files(tsr_finder_step_files)
remove_files(chromosome_files)
| 2.6875
| 3
|
src/morphforge/simulation/neuron/objects/obj_cclamp.py
|
mikehulluk/morphforge
| 1
|
12782043
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Copyright (c) 2012 <NAME>.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------
from morphforge import units
from morphforge.simulation.base import CurrentClamp
from morphforge.simulation.neuron.objects.neuronobject import NEURONObject
from morphforge.constants.standardtags import StandardTags
from morphforge.simulation.neuron.simulationdatacontainers import MHocFileData
#from morphforge.units import qty
from morphforge.simulation.neuron.hocmodbuilders.hocmodutils import HocModUtils
from morphforge.simulation.neuron.hocmodbuilders import HocBuilder
from morphforge.simulation.neuron.objects.neuronrecordable import NEURONRecordable
from morphforge.simulation.base.stimulation import CurrentClampStepChange
from morphforge.simulation.neuron.core.neuronsimulationenvironment import NEURONEnvironment
class CurrentClampCurrentRecord(NEURONRecordable):
def __init__(self, cclamp, **kwargs):
super(CurrentClampCurrentRecord, self).__init__(**kwargs)
self.cclamp = cclamp
def get_unit(self):
return units.nA
def get_std_tags(self):
return [StandardTags.Current]
def build_hoc(self, hocfile_obj):
name_hoc = hocfile_obj[MHocFileData.CurrentClamps][self.cclamp]['stimname']
HocModUtils.create_record_from_object(
hocfile_obj=hocfile_obj,
vecname='RecVec%s' % self.name,
objname=name_hoc,
objvar='i',
recordobj=self)
def build_mod(self, modfile_set):
pass
def get_description(self):
return 'Step CurrentClamp Injection: %s' % self.cclamp.name
class NEURONCurrentClampStepChange(CurrentClampStepChange, NEURONObject):
def __init__(self, **kwargs):
super(NEURONCurrentClampStepChange, self).__init__(**kwargs)
def build_hoc(self, hocfile_obj):
HocBuilder.CurrentClamp(hocfile_obj=hocfile_obj,
currentclamp=self)
def build_mod(self, modfile_set):
pass
def get_recordable(self, what, name=None, **kwargs):
recorders = {
CurrentClamp.Recordables.Current: CurrentClampCurrentRecord
}
return recorders[what](cclamp=self, name=name, **kwargs)
NEURONEnvironment.currentclamps.register_plugin(CurrentClampStepChange, NEURONCurrentClampStepChange)
| 1.195313
| 1
|
movierama/movierama/movies/views.py
|
abourazanis/movierama
| 0
|
12782044
|
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.core.exceptions import ObjectDoesNotExist
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.utils.decorators import method_decorator
from django.views import View
from django.views.generic import CreateView, ListView
from django.views.generic.edit import FormMixin, FormView, ProcessFormView
from movierama.movies.forms import MovieForm
from movierama.movies.models import Movie
class MovieCreateView(CreateView):
form_class = MovieForm
template_name = "movie_create.html"
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
def get_success_url(self):
return reverse("homepage")
def form_valid(self, form):
self.object = form.save(commit=False)
self.object.user = self.request.user
self.object.save()
messages.add_message(self.request, messages.SUCCESS,
'Movie "{}" successfully added.'.format(self.object),
fail_silently=True)
return HttpResponseRedirect(self.get_success_url())
create_movie = MovieCreateView.as_view()
class MovieListView(ListView):
model = Movie
template_name = "pages/home.html"
context_object_name = "movies"
def get_ordering(self):
return self.request.GET.get('order_by', None)
def get_queryset(self):
queryset = self.model.objects.all()
if self.request.user.is_authenticated:
queryset = self.model.as_user(self.request.user.id).all()
username = self.kwargs.get("username", None)
if username:
queryset = queryset.filter(user__username=username)
ordering = self.get_ordering()
if ordering:
if ordering == "date":
queryset = queryset.order_by("-date_created")
if ordering == "likes":
queryset = queryset.order_by_likes()
if ordering == "hates":
queryset = queryset.order_by_hates()
return queryset
movieslist = MovieListView.as_view()
class VoteMovieView(View):
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
def post(self, request, *args, **kwargs):
try:
movie = Movie.objects.get(id=kwargs.get('movie_id', None))
except ObjectDoesNotExist as e:
return HttpResponseRedirect(reverse("homepage"))
vote = request.POST.get('vote', None)
if vote is not None:
movie.vote(self.request.user, vote)
return HttpResponseRedirect(reverse("homepage"))
vote_movie = VoteMovieView.as_view()
class UnVoteMovieView(View):
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
def post(self, request, *args, **kwargs):
try:
movie = Movie.objects.get(id=kwargs.get('movie_id', None))
except ObjectDoesNotExist as e:
return HttpResponseRedirect(reverse("homepage"))
movie.remove_vote(self.request.user)
return HttpResponseRedirect(reverse("homepage"))
unvote_movie = UnVoteMovieView.as_view()
| 2.1875
| 2
|
homeworks/yan_romanovich/hw05/level05.py
|
tgrx/Z22
| 0
|
12782045
|
<gh_stars>0
def unique(collection):
return len(collection) == len(set(collection))
| 2.453125
| 2
|
tests/test_q0202.py
|
mirzadm/ctci-5th-py
| 0
|
12782046
|
"""Unit tests for q0202.py."""
import unittest
from src.utils.linkedlist import LinkedList
from src.q0202 import kth_element_to_last
class TestKthElementToLast(unittest.TestCase):
"""Tests for kth element to last."""
def test_kth_element_to_last(self):
linked_list = LinkedList()
self.assertEqual(kth_element_to_last(None, 1), None)
self.assertEqual(kth_element_to_last(linked_list, 1), None)
linked_list.insert_at_head(3)
self.assertEqual(kth_element_to_last(linked_list, 0), None)
self.assertEqual(kth_element_to_last(linked_list, 1), 3)
self.assertEqual(kth_element_to_last(linked_list, 2), None)
linked_list.insert_at_head(2)
self.assertEqual(kth_element_to_last(linked_list, 0), None)
self.assertEqual(kth_element_to_last(linked_list, 1), 3)
self.assertEqual(kth_element_to_last(linked_list, 2), 2)
self.assertEqual(kth_element_to_last(linked_list, 3), None)
if __name__ == '__main__':
unittest.main()
| 3.5625
| 4
|
rushapi/blueprints/url_shortener.py
|
Kyuunex/rush-api
| 0
|
12782047
|
"""
This file provides endpoints for everything URL shortener related
"""
from flask import Blueprint, request, make_response, redirect, json
import time
import validators
from rushapi.reusables.context import db_cursor
from rushapi.reusables.context import db_connection
from rushapi.reusables.rng import get_random_string
from rushapi.reusables.user_validation import get_user_context
url_shortener = Blueprint("url_shortener", __name__)
@url_shortener.route('/create_redirect', methods=['POST'])
@url_shortener.route('/create_redirect/<desired_id>', methods=['POST', 'PUT'])
def create_redirect(desired_id=None):
"""
This endpoint handles the POST data submitted by the client.
It will process this information and create a shortened a URL.
:return: A newly created shortened URL.
"""
if desired_id:
user_context = get_user_context()
if not (user_context and user_context.premium):
return json.dumps({
"error": "Creating a custom redirect requires a premium account. "
"If you already have one, put your token in the headers.",
}), 401
premium = 1
url_id = desired_id
author_id = user_context.id
else:
premium = 0
url_id = get_random_string(7)
author_id = None
if request.method == 'POST':
url = request.form['url']
delete_after = request.form['delete_after']
if len(delete_after) > 0:
try:
delete_after = int(delete_after)
except ValueError:
delete_after = 0
else:
delete_after = int(time.time()) + 2.592e+6
if not len(url) < 250:
return json.dumps({
"error": "URL length must be below 250 characters.",
}), 403
if not validators.url(url):
return json.dumps({
"error": "URL is not valid",
}), 403
domain_blacklist = tuple(db_cursor.execute("SELECT domain FROM domain_blacklist"))
for blacklisted_domain in domain_blacklist:
if blacklisted_domain[0] in url:
return json.dumps({
"error": "This domain is blacklisted.",
}), 403
db_cursor.execute("INSERT INTO urls "
"(id, author_id, url, creation_timestamp, premium, visits, delete_after, last_visit) "
"VALUES (?, ?, ?, ?, ?, ?, ?, ?)",
[url_id, author_id, url, int(time.time()), premium, 0, delete_after, int(time.time())])
db_connection.commit()
return json.dumps({
"shortened_url": f"https://{request.host}/u/{url_id}"
}), 200
@url_shortener.route('/u/<url_id>')
def redirect_url(url_id):
"""
This endpoint looks up the unique identifier of the URL and redirects the user there.
Along the way it takes note of the time visited and increase the visit count.
:param url_id: URL's unique identifier
:return: A redirect to that URL
"""
post_url_lookup = tuple(db_cursor.execute("SELECT url, visits, delete_after FROM urls WHERE id = ?", [url_id]))
if not post_url_lookup:
return make_response(redirect("https://www.youtube.com/watch?v=dQw4w9WgXcQ"))
visits = int(post_url_lookup[0][1])
db_cursor.execute("UPDATE urls SET visits = ? AND last_visit = ? WHERE id = ?",
[(visits+1), int(time.time()), url_id])
db_connection.commit()
return make_response(redirect(post_url_lookup[0][0]))
@url_shortener.route('/my_urls')
def my_urls():
"""
:return: This endpoint returns all the URLs the user has created.
"""
user_context = get_user_context()
if not user_context:
return json.dumps({
"error": "This endpoint requires an account. "
"If you already have one, put your token in the headers.",
}), 401
urls = db_cursor.execute("SELECT id, author_id, url, creation_timestamp, premium, visits, delete_after, last_visit "
"FROM urls WHERE author_id = ?", [user_context.id])
buffer = []
for url in urls:
buffer.append({
"id": url[0],
"author_id": url[1],
"url": url[2],
"creation_timestamp": url[3],
"premium": url[4],
"visits": url[5],
"delete_after": url[6],
"last_visit": url[7],
})
return json.dumps(buffer)
| 3.265625
| 3
|
profetorch/blocks/squasher.py
|
sachinruk/ProFeTorch1
| 19
|
12782048
|
<filename>profetorch/blocks/squasher.py<gh_stars>10-100
# AUTOGENERATED! DO NOT EDIT! File to edit: Squasher.ipynb (unless otherwise specified).
__all__ = ['Squasher']
# Cell
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
torch.Tensor.ndim = property(lambda x: x.dim())
# Cell
# export
class Squasher(nn.Module):
"""
Squashes output to lie beween `high` and `low`.
"""
def __init__(self, low=None, high=None, mean=0, sd=1, alpha=0.01):
super().__init__()
if low is not None:
low = (low - mean) / sd
if high is not None:
high = (high - mean) / sd
self.L, self.H, self.alpha = low, high, alpha
def forward(self, t):
if self.L is not None:
t[t < self.L] = self.alpha * (t[t < self.L] - self.L) + self.L
if self.H is not None:
t[t > self.H] = self.alpha * (t[t > self.H] - self.H) + self.H
return t
| 2.546875
| 3
|
aux_routines/PerfStats.py
|
jshtok/StarNet
| 3
|
12782049
|
<reponame>jshtok/StarNet<gh_stars>1-10
import numpy as np
import os
import matplotlib
#matplotlib.use('agg')
import matplotlib.pyplot as plt
from aux_routines.data_structures import get_GT_IoUs,get_GT_IoPs,get_GT_IoGs
import copy
def voc_ap(rec, prec, score, sc_part, use_07_metric=False):
"""
average precision calculations
[precision integrated to recall]
:param rec: recall
:param prec: precision
:param use_07_metric: 2007 metric is 11-recall-point based AP
:return: average precision
"""
if use_07_metric:
ap = 0.
for t in np.arange(0., 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap += p / 11.
else:
# append sentinel values at both ends
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
mscore = np.concatenate(([0.], score, [0.]))
# compute precision integration ladder
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# look for recall value changes
i = np.where(mrec[1:] != mrec[:-1])[0]
# sum (\delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
pr_graph = {'mrec': mrec[i + 1], 'mpre': mpre[i + 1], 'mscore': mscore[i + 1]}
return ap, pr_graph
from aux_routines.stats import img_dets_CSV_2_A,detName_2_imgName
class PerfStats():
def __init__(self, Nclasses=1, Nslots=10000, is_valid=True, vet_score_thresh=0.55):
self.valid = is_valid
self.maxFpw = 0 # max FPW on some image in the query set
self.nGT = 0 # number of GT objects so far
self.d = 0 # current entry
self.sc = np.zeros(Nslots) # scores
self.tp = np.zeros(Nslots) # True Positives
self.fp = np.zeros(Nslots) # False Positives
self.fpw = np.zeros(Nslots) # False Positives - wrong class
self.fpb = np.zeros(Nslots) # False Positives - background box
#self.require_difficult = difficult # require that objects marked 'difficult' will be detected. Accept their detections anyway.
self.vet_score_thresh = vet_score_thresh
self.CM_thresh = np.linspace(0.1, 0.9, 9).tolist()
self.CM = np.zeros([Nclasses, Nclasses,len(self.CM_thresh)],dtype=int)
self.IoG=[]
def add_base_stats(self,dets_folder, cat_ords, gt_entry, ovthresh,fg_measure=0):
'''
:param dets_folder:
:param cat_ords:
:param gt_entry:
:param ovthresh:
:param fg_measure: 0 - IoU, 1 - IoP (intersection over prediction)
:return:
'''
# show_dets_gt_boxes(im, dets, classes, gt_boxes, gt_classes, scale=1.0, FS=22, LW=3.5, save_file_path='temp_det_gt.png'):
# show_detsB_gt_boxes(im, dets_B, gt_boxes, gt_classes, scale = 1.0, save_file_path='temp_det_gt.png'):
for detsName in os.listdir(dets_folder):
if detsName.startswith('dets') and detsName.endswith('.txt'):
q_dets, cat_names = img_dets_CSV_2_A(os.path.join(dets_folder, detsName), cat_ords)
imgName = detName_2_imgName(detsName)
# gt_entry = roidb_ni[imgName]
# then run
q = int(detsName.split('_')[2].split('.')[0])
gt_boxes_test = gt_entry['boxes'][q]
gt_boxes_test = gt_boxes_test[np.where(np.max(gt_boxes_test, axis=1) > 0)]
gt_classes_test = gt_entry['gt_classes'][q]
gt_classes_test = np.asarray([gt_classes_test for _ in gt_boxes_test])
# gt_boxes_test = []
# gt_classes_test = []
# for gt_box, gt_class in zip( gt_entry['boxes'], gt_entry['gt_classes']):
# if gt_class in cat_ords:
# gt_boxes_test += [gt_box]
# gt_classes_test += [gt_class]
# gt_classes_test = np.asarray(gt_classes_test)
# gt_boxes_test = np.asarray(gt_boxes_test)
self.comp_epi_stats_m(q_dets, gt_boxes_test, gt_classes_test, cat_ords, ovthresh,fg_measure=fg_measure)
# perf_str_gl = self.print_perf( prefix='stats_global: ')
def assert_space(self):
btch = 100000
if self.sc.shape[0] < self.d + btch:
self.sc = np.concatenate((self.sc, np.zeros(btch, )))
self.tp = np.concatenate((self.tp, np.zeros(btch, )))
self.fp = np.concatenate((self.fp, np.zeros(btch, )))
self.fpw = np.concatenate((self.fpw, np.zeros(btch, )))
self.fpb = np.concatenate((self.fpb, np.zeros(btch, )))
def comp_epi_CM(self,scores_all,boxes_all,gt_boxes, gt_classes,epi_cats, ovthresh):
all_cats = [0]+epi_cats
Ndets = scores_all.shape[0]
Nclasses = scores_all.shape[1]
# for i_DetCat, DetCat in enumerate(epi_cats):
# cat_dets = np.hstack((boxes_all,scores_all[:,i_DetCat+1]))
N_gt = gt_boxes.shape[0]
gt_IoU_map = np.zeros((Ndets, N_gt))
for i_det, box_det in enumerate(boxes_all):
gt_IoU_map[i_det, :] = get_GT_IoUs(box_det, gt_boxes)
true_class_indices = np.argmax(gt_IoU_map,axis=1)
FG_idx = np.where(np.max(gt_IoU_map,axis=1) >= ovthresh)
BG_idx = np.where(np.max(gt_IoU_map, axis=1) < ovthresh)
true_class_indices_FG = true_class_indices[FG_idx]
scores_FG = scores_all[FG_idx]
scores_BG = scores_all[BG_idx]
for i_thresh, thresh in enumerate(self.CM_thresh):
FG_high_score_idx = np.where(np.max(scores_FG, axis=1) > thresh)
BG_high_score_idx = np.where(np.max(scores_BG, axis=1) > thresh)
true_class_indices_FG_high = true_class_indices_FG[FG_high_score_idx]
scores_FG_high = scores_FG[FG_high_score_idx]
scores_BG_high = scores_BG[BG_high_score_idx]
hit_indices = np.argmax(scores_FG_high, axis=1)
miss_indices = np.argmax(scores_BG_high, axis=1)
# confusion matrix: for each detection roi: if its FG, the true class is its category; if its BG, the true class is 0 (background).
# add 1 to row <true class>, column <argMax(scores)> for this detection
for det_idx in miss_indices:
self.CM[0, all_cats[det_idx],i_thresh ] += 1
#print('object at cat 0 detected as cat {0}'.format(all_cats[det_idx]))
for true_cls_idx, det_idx in zip(true_class_indices_FG_high,hit_indices):
self.CM[gt_classes[true_cls_idx], all_cats[det_idx],i_thresh] += 1
#print('object at cat {0} detected as cat {1}'.format(gt_classes[true_cls_idx],all_cats[det_idx]))
def comp_epi_stats_m(self, q_dets, gt_boxes, gt_classes, epi_cats, ovthresh, err_score_thres=0,miss_count={},false_count={},true_count={},fg_measure=0):
'''
:param q_dets:
:param gt_boxes:
:param gt_classes:
:param epi_cats:
:param ovthresh:
:param err_score_thres:
:param miss_count:
:param false_count:
:param true_count:
:param fg_measure: 0 - IoU, 1 - IoP (intersection over prediction)
:return:
'''
# err_score_thres is the threshold for ploting the boxes as error case
self.assert_space()
error_case = False #flag for ploting the boxes as error case
N_gt = gt_boxes.shape[0]
if N_gt==0: # no Ground Truth. mark all as False Positive
for cat_dets, DetCat in zip(q_dets, epi_cats): # for each class -------------------------------
Ndets = cat_dets.shape[0]
if Ndets == 0:
continue
for i_det, det in enumerate(cat_dets):
score = det[-1]
if score > 0:
self.set_score(score)
self.mark_FP() # fp[d] = 1.
self.mark_FPB() # fpb[d] = 1.
self.d += 1
return error_case, miss_count, true_count, false_count
nTP = 0
for cat_dets, DetCat in zip(q_dets,epi_cats): # for each class -------------------------------
# for i_DetCat, DetCat in enumerate(epi_cats): # for each class -------------------------------
# cat_dets = q_dets[i_DetCat]
self.nGT += sum(gt_classes == DetCat) # number of instances of this object
Ndets = cat_dets.shape[0]
if Ndets == 0:
continue
if not DetCat in miss_count:
miss_count[DetCat]=0
false_count[DetCat]=0
true_count[DetCat]=0
gt_FG_map, score_map, hit_map,det_portion_map = [np.zeros((Ndets, N_gt)) for _ in range(4)]
for i_det, det in enumerate(cat_dets):
if fg_measure==0:
gt_FG_map[i_det, :] = get_GT_IoUs(det, gt_boxes)
elif fg_measure==1:
gt_FG_map[i_det, :] = get_GT_IoPs(det, gt_boxes)
else:
os.error('comp_epi_stats_m: fg_measure not recognized')
score_map[i_det, :] = det[-1]
for col, gt_c in enumerate(gt_classes):
hit_map[:, col] = gt_c == DetCat
score_FG_map = np.multiply(score_map, gt_FG_map >= ovthresh)
score_BG_map = np.multiply(score_map, gt_FG_map < ovthresh)
hit_score_FG_map = np.multiply(score_FG_map, hit_map)
#det_portion_map[i_det, :] = get_GT_IoGs(det, gt_boxes)
best_hit_scores = np.max(hit_score_FG_map, axis=0)
miss_score_FG_map = np.multiply(score_FG_map, 1 - hit_map)
good_dets = np.max(hit_score_FG_map, axis=1)
miss_score_FG_map[np.where(good_dets > 0)] = 0
best_miss_scores = np.max(miss_score_FG_map, axis=0)
score_BG_list = np.min(score_BG_map, axis=1)
# TP
for idx, score in enumerate(best_hit_scores): #dims = [1, Ngt]
# for every GT box, take the best matching FG detection roi with the correct class, and set a True Positive with its score
if score > 0:
self.set_score(score)
self.mark_TP()
self.d += 1
true_count[DetCat] += 1
det = cat_dets[0]
gt_box = gt_boxes[idx]
self.set_IoG(get_GT_IoGs(det, gt_box))
# FPW
for score in best_miss_scores: #dims = [1, Ngt].
# for every GT box, take the best matching FG detection roi (gt_IoU_map >= ovthresh) and set a False Positive with its score
if score > 0:
self.set_score(score)
self.mark_FP() # fp[d] = 1.
self.mark_FPW() # fpw[d] = 1.
self.d += 1
false_count[DetCat] +=1
# FPB
for score in score_BG_list: # dims = [Ndets, 1]
# for every detection roi, that was decided to be on background (gt_IoU_map < ovthresh), set a False Positive with its score
if score > 0:
self.set_score(score)
self.mark_FP() # fp[d] = 1.
self.mark_FPB() # fpb[d] = 1.
self.d += 1
false_count[DetCat] +=1
# flag for ploting the boxes as error case
if np.any(best_miss_scores>err_score_thres):
error_case = True # incorret class
if np.any(score_BG_list>err_score_thres):
error_case = True #high score outlier
nTP += len(np.where(best_hit_scores>err_score_thres)[0])
miss_count[DetCat] += np.sum(hit_map)-len(np.where(best_hit_scores>err_score_thres)[0])
if nTP<N_gt:
error_case = True # not all GT were detected
# self.set_img_recAtK(d0, nGT_prev)
return error_case, miss_count, true_count, false_count
def set_score(self, sc):
self.sc[self.d] = sc
def set_IoG(self,val):
self.IoG.append(val)
def get_score(self):
return self.sc[0:self.d]
def mark_TP(self):
self.tp[self.d] = 1
def get_TP(self):
return self.tp[0:self.d]
def mark_FP(self):
self.fp[self.d] = 1
def get_FP(self):
return self.fp[0:self.d]
def mark_FPW(self):
self.fpw[self.d] = 1
def get_FPW(self):
return self.fpw[0:self.d]
def mark_FPB(self):
self.fpb[self.d] = 1
def get_FPB(self):
return self.fpb[0:self.d]
def compute_stats(self, start_idx=0, use_nGT=-1):
if start_idx >= self.d:
return [None for _ in range(6)]
sorted_inds = np.argsort(-self.sc[start_idx:self.d]) # ascending
tp_part = self.tp[sorted_inds]
fp_part = self.fp[sorted_inds]
sc_part = self.sc[sorted_inds]
fp_wrong_part = self.fpw[sorted_inds]
fp_bkgnd_part = self.fpb[sorted_inds]
tp_acc = np.cumsum(tp_part)
fp_acc = np.cumsum(fp_part)
fp_wrong_acc = np.cumsum(fp_wrong_part)
fp_bkgnd_acc = np.cumsum(fp_bkgnd_part)
if use_nGT >= 0:
nGT = use_nGT
else:
nGT = self.nGT
rec_part = tp_acc / float(self.nGT)
rec_class_agn_part = (tp_acc+fp_wrong_acc)/ float(self.nGT)
prec_part = tp_acc / np.maximum(tp_acc + fp_acc, np.finfo(np.float64).eps) # avoid division by zero
prec_w_part = tp_acc / np.maximum(rec_class_agn_part + fp_wrong_acc, np.finfo(np.float64).eps) # avoid division by zero
score_part = sc_part
use_07_metric = False
ap, pr_graph = voc_ap(rec_part, prec_part,sc_part, use_07_metric)
ap_w, pr_w_graph = voc_ap(rec_class_agn_part, prec_w_part, sc_part, use_07_metric)
tot_tp = tp_acc[-1]
tot_fp = fp_acc[-1]
tot_fp_wrong = fp_wrong_acc[-1]
tot_fp_bkgnd = fp_bkgnd_acc[-1]
if nGT > 0:
recall = rec_part[-1] # tot_tp / self.nGT # recall @ score thresh
else:
recall = 0
pr_data = [[ap,pr_graph],[ap_w,pr_w_graph]]
return tot_tp, tot_fp, tot_fp_wrong, tot_fp_bkgnd, recall, pr_data
def compute_stats_ext(self, stats, start_idx=0):
sc = stats[0]
tp = stats[1]
fp = stats[2]
fpw = stats[3]
fpb = stats[4]
nGT = stats[5]
d = stats[6]
if start_idx > d:
return [[] for _ in range(6)]
sorted_inds = np.argsort(-sc[start_idx:d])
tp_part = tp[sorted_inds]
fp_part = fp[sorted_inds]
sc_part = sc[sorted_inds]
fp_wrong_part = fpw[sorted_inds]
fp_bkgnd_part = fpb[sorted_inds]
tp_acc = np.cumsum(tp_part)
fp_acc = np.cumsum(fp_part)
fp_wrong_acc = np.cumsum(fp_wrong_part)
fp_bkgnd_acc = np.cumsum(fp_bkgnd_part)
rec_part = tp_acc / float(nGT)
prec_part = tp_acc / np.maximum(tp_acc + fp_acc, np.finfo(np.float64).eps) # avoid division by zero
use_07_metric = False
ap,pr_graph = voc_ap(rec_part, prec_part, sc_part,use_07_metric)
tot_tp = tp_acc[-1]
tot_fp = fp_acc[-1]
tot_fp_wrong = fp_wrong_acc[-1]
tot_fp_bkgnd = fp_bkgnd_acc[-1]
if nGT > 0:
recall = tot_tp / nGT
else:
recall = 0
return tot_tp, tot_fp, tot_fp_wrong, tot_fp_bkgnd, recall, ap,pr_graph
def isvalid(self):
return self.valid
def merge_stats_ext(self, stats):
# stats[0] = [sc, tp, fp, fpw, fpb]
d = stats[2]
sc = stats[0][0, :]
tp = stats[0][1, :]
fp = stats[0][2, :]
fpw = stats[0][3, :]
fpb = stats[0][4, :]
# left = nd - stats[0].shape[1]
# if left>0:
# sc = np.concatenate((sc,np.zeros(left)))
# tp = np.concatenate((tp, np.zeros(left)))
# fp = np.concatenate((fp, np.zeros(left)))
# fpw = np.concatenate((fpw, np.zeros(left)))
# fpb = np.concatenate((fpb, np.zeros(left)))
nGT = stats[1]
if self.valid: # concat the provided object
self.sc = np.concatenate((self.get_score(), sc))
self.tp = np.concatenate((self.get_TP(), tp))
self.fp = np.concatenate((self.get_FP(), fp))
self.fpw = np.concatenate((self.get_FPW(), fpw))
self.fpb = np.concatenate((self.get_FPB(), fpb))
self.nGT += nGT
self.d += d + 1
else: # copy the provided object
self.sc = sc
self.tp = tp
self.fp = fp
self.fpw = fpw
self.fpb = fpb
self.nGT = nGT
self.maxFpw = 0
self.d = d
self.valid = True
def merge_stats(self, perf_stats):
if not perf_stats.isvalid:
return
if self.valid: # concat the provided object
self.sc = np.concatenate((self.get_score(), perf_stats.get_score()))
self.tp = np.concatenate((self.get_TP(), perf_stats.get_TP()))
self.fp = np.concatenate((self.get_FP(), perf_stats.get_FP()))
self.fpw = np.concatenate((self.get_FPW(), perf_stats.get_FPW()))
self.fpb = np.concatenate((self.get_FPB(), perf_stats.get_FPB()))
self.nGT += perf_stats.nGT
self.maxFpw = max(self.maxFpw, perf_stats.maxFpw)
self.d += perf_stats.d + 1
else: # copy the provided object
self.sc = perf_stats.get_score()
self.tp = perf_stats.get_TP()
self.fp = perf_stats.get_FP()
self.fpw = perf_stats.get_FPW()
self.fpb = perf_stats.get_FPB()
self.nGT = perf_stats.nGT
self.maxFpw = perf_stats.maxFpw
self.d = perf_stats.d
self.valid = True
def print_stats(self,graph_img_fname, title_prefix=''):
if self.d <= 0:
print('No statistics were gathered.')
return
if self.nGT == 0 and use_nGT == -1:
logger.info('#Dets: {0}, #GT: {1}'.format(0, self.nGT))
return
# statistics
tot_tp, tot_fp, tot_fp_wrong, tot_fp_bkgnd, recall, pr_data = self.compute_stats()
ap = pr_data[0][0]
pr_graph =pr_data[0][1]
ap_w = pr_data[0][0]
pr_w_graph = pr_data[0][1]
# RP curve -----------------------------------------------
fig = plt.figure(1)
plt.cla()
plt.plot(pr_graph['mrec'],pr_graph['mpre'],linewidth=2.0)
plt.xlabel('Recall')
plt.ylabel('Precision')
for i in range(0,len(pr_graph['mscore']),4):
plt.text(pr_graph['mrec'][i], pr_graph['mpre'][i], '{0:.2f}'.format(pr_graph['mscore'][i]))
plt.title(title_prefix+'TP={0} FP_conf={1} FP_bkgnd={2} AP={3:.3f}'.format(tot_tp,tot_fp_wrong,tot_fp_bkgnd,ap))
plt.grid(True)
fig.savefig(graph_img_fname+'PR_graph.jpg')
# confusion matrix -----------------------------------------------
#
fig = plt.figure(2)
plt.cla()
plt.imshow(self.CM[:,:,3])
f_path, f_name = os.path.split(graph_img_fname)
fig.savefig(graph_img_fname+'_confusion_matrix.jpg')
if False: #class agnostic
fig = plt.figure(2)
plt.cla()
plt.plot(pr_graph['mrec'],pr_graph['mpre'],linewidth=2.0)
plt.xlabel('Recall - class agn.')
plt.ylabel('Precision - class agn.')
for i in range(0,len(pr_w_graph['mscore']),4):
plt.text(pr_w_graph['mrec'][i], pr_w_graph['mpre'][i], '{0:.2f}'.format(pr_w_graph['mscore'][i]))
plt.title(title_prefix+' class agn. AP={0:.3f}'.format(ap_w))
plt.grid(True)
f_path, f_name = os.path.split(graph_img_fname)
fig.savefig(os.path.join(f_path,'class_agn_'+f_name))
def print_perf(self, prefix, start_idx=0, use_nGT=-1):
# logger.info('== performance stats: ============================================================================================')
if self.d <= 0:
perf_str = 'No statistics were gathered.'
return perf_str
if self.nGT == 0 and use_nGT == -1:
perf_str = '#Dets: {0}, #GT: {1}'.format(0, self.nGT)
return perf_str
tot_tp, tot_fp, tot_fp_wrong, tot_fp_bkgnd, recall, pr_data = self.compute_stats(start_idx=start_idx, use_nGT=use_nGT)
ap = pr_data[0][0]
pr_graph =pr_data[0][1]
ap_w = pr_data[0][0]
pr_w_graph = pr_data[0][1]
perf_str = prefix + ' #Dets: {0}, #GT: {1} TP: {2} FP: {3} = {4} wrong + {5} bkgnd Recall: {6:.3f} AP: {7:.3f}' \
.format(self.d, self.nGT, np.int(tot_tp), np.int(tot_fp), np.int(tot_fp_wrong), np.int(tot_fp_bkgnd), recall, ap)
temp = np.flip(np.abs(pr_graph['mpre']-0.80))
idx=len(temp)-np.argmin(temp)-1
score_th = pr_graph['mscore'][idx]
perf_data = [recall,ap,score_th,self.IoG]
return perf_str,perf_data
def print_perf_ext(self, logger, prefix, stats, start_idx=0):
logger.info('== performance stats: ============================================================================================')
nGT = stats[5]
d = stats[6]
if d <= 0:
logger.info('No statistics were gathered.')
return
if nGT == 0:
logger.info('#Dets: {0}, #GT: {1}'.format(0, nGT))
return
tot_tp, tot_fp, tot_fp_wrong, tot_fp_bkgnd, recall, pr_data = self.compute_stats_ext(stats, start_idx=start_idx)
ap = pr_data[0][0]
pr_graph =pr_data[0][1]
ap_w = pr_data[0][0]
pr_w_graph = pr_data[0][1]
logger.info(prefix + ' #Dets: {0}, #GT: {1} TP: {2} FP: {3} = {4} wrong + {5} bkgnd Recall: {6:.3f} AP: {7:.3f}'.format(d, nGT, \
np.int(tot_tp), np.int(tot_fp),np.int(tot_fp_wrong), np.int(tot_fp_bkgnd), recall,ap))
return
def save_stats(self, store_stats_fname):
np.savez(store_stats_fname, stats=[self.sc, self.tp, self.fp, self.fpw, self.fpb, self.nGT, self.d, self.CM])
def get_stats(self):
stat_array = np.expand_dims(self.sc[:self.d], axis=0)
stat_array = np.concatenate((stat_array, np.expand_dims(self.tp[:self.d], axis=0)), axis=0)
stat_array = np.concatenate((stat_array, np.expand_dims(self.fp[:self.d], axis=0)), axis=0)
stat_array = np.concatenate((stat_array, np.expand_dims(self.fpw[:self.d], axis=0)), axis=0)
stat_array = np.concatenate((stat_array, np.expand_dims(self.fpb[:self.d], axis=0)), axis=0)
stat_array = stat_array.astype(np.float16)
return [stat_array, self.nGT, self.d, self.CM] # [, , self.fp[:self.d], self.fpw[:self.d], self.fpb[:self.d], self.nGT, self.d, self.img_recAtK]
| 1.992188
| 2
|
table2ascii/alignment.py
|
sairamkiran9/table2ascii
| 24
|
12782050
|
from enum import Enum
class Alignment(Enum):
"""
Enum for text alignment types within a table cell
Example::
from table2ascii import Alignment
output = table2ascii(
...
alignments=[Alignment.LEFT, Alignment.RIGHT, Alignment.CENTER, Alignment.CENTER]
)
"""
LEFT = 0
CENTER = 1
RIGHT = 2
| 3.53125
| 4
|